summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Kconfig209
-rw-r--r--drivers/s390/Makefile9
-rw-r--r--drivers/s390/block/Kconfig68
-rw-r--r--drivers/s390/block/Makefile17
-rw-r--r--drivers/s390/block/dasd.c2065
-rw-r--r--drivers/s390/block/dasd_3370_erp.c104
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2742
-rw-r--r--drivers/s390/block/dasd_9336_erp.c61
-rw-r--r--drivers/s390/block/dasd_9343_erp.c22
-rw-r--r--drivers/s390/block/dasd_cmb.c145
-rw-r--r--drivers/s390/block/dasd_devmap.c772
-rw-r--r--drivers/s390/block/dasd_diag.c541
-rw-r--r--drivers/s390/block/dasd_diag.h66
-rw-r--r--drivers/s390/block/dasd_eckd.c1722
-rw-r--r--drivers/s390/block/dasd_eckd.h346
-rw-r--r--drivers/s390/block/dasd_erp.c254
-rw-r--r--drivers/s390/block/dasd_fba.c607
-rw-r--r--drivers/s390/block/dasd_fba.h73
-rw-r--r--drivers/s390/block/dasd_genhd.c185
-rw-r--r--drivers/s390/block/dasd_int.h576
-rw-r--r--drivers/s390/block/dasd_ioctl.c554
-rw-r--r--drivers/s390/block/dasd_proc.c319
-rw-r--r--drivers/s390/block/dcssblk.c775
-rw-r--r--drivers/s390/block/xpram.c539
-rw-r--r--drivers/s390/char/Makefile28
-rw-r--r--drivers/s390/char/con3215.c1192
-rw-r--r--drivers/s390/char/con3270.c638
-rw-r--r--drivers/s390/char/ctrlchar.c75
-rw-r--r--drivers/s390/char/ctrlchar.h20
-rw-r--r--drivers/s390/char/defkeymap.c156
-rw-r--r--drivers/s390/char/defkeymap.map191
-rw-r--r--drivers/s390/char/fs3270.c373
-rw-r--r--drivers/s390/char/keyboard.c519
-rw-r--r--drivers/s390/char/keyboard.h57
-rw-r--r--drivers/s390/char/monreader.c662
-rw-r--r--drivers/s390/char/raw3270.c1335
-rw-r--r--drivers/s390/char/raw3270.h274
-rw-r--r--drivers/s390/char/sclp.c915
-rw-r--r--drivers/s390/char/sclp.h159
-rw-r--r--drivers/s390/char/sclp_con.c252
-rw-r--r--drivers/s390/char/sclp_cpi.c254
-rw-r--r--drivers/s390/char/sclp_quiesce.c99
-rw-r--r--drivers/s390/char/sclp_rw.c471
-rw-r--r--drivers/s390/char/sclp_rw.h96
-rw-r--r--drivers/s390/char/sclp_tty.c813
-rw-r--r--drivers/s390/char/sclp_tty.h71
-rw-r--r--drivers/s390/char/sclp_vt220.c785
-rw-r--r--drivers/s390/char/tape.h384
-rw-r--r--drivers/s390/char/tape_34xx.c1385
-rw-r--r--drivers/s390/char/tape_block.c492
-rw-r--r--drivers/s390/char/tape_char.c492
-rw-r--r--drivers/s390/char/tape_class.c126
-rw-r--r--drivers/s390/char/tape_class.h61
-rw-r--r--drivers/s390/char/tape_core.c1242
-rw-r--r--drivers/s390/char/tape_proc.c145
-rw-r--r--drivers/s390/char/tape_std.c765
-rw-r--r--drivers/s390/char/tape_std.h152
-rw-r--r--drivers/s390/char/tty3270.c1836
-rw-r--r--drivers/s390/char/vmlogrdr.c920
-rw-r--r--drivers/s390/char/vmwatchdog.c292
-rw-r--r--drivers/s390/cio/Makefile10
-rw-r--r--drivers/s390/cio/airq.c87
-rw-r--r--drivers/s390/cio/airq.h10
-rw-r--r--drivers/s390/cio/blacklist.c351
-rw-r--r--drivers/s390/cio/blacklist.h6
-rw-r--r--drivers/s390/cio/ccwgroup.c482
-rw-r--r--drivers/s390/cio/chsc.c1114
-rw-r--r--drivers/s390/cio/chsc.h66
-rw-r--r--drivers/s390/cio/cio.c860
-rw-r--r--drivers/s390/cio/cio.h143
-rw-r--r--drivers/s390/cio/cio_debug.h32
-rw-r--r--drivers/s390/cio/cmf.c1042
-rw-r--r--drivers/s390/cio/css.c575
-rw-r--r--drivers/s390/cio/css.h155
-rw-r--r--drivers/s390/cio/device.c1135
-rw-r--r--drivers/s390/cio/device.h115
-rw-r--r--drivers/s390/cio/device_fsm.c1250
-rw-r--r--drivers/s390/cio/device_id.c355
-rw-r--r--drivers/s390/cio/device_ops.c603
-rw-r--r--drivers/s390/cio/device_pgid.c448
-rw-r--r--drivers/s390/cio/device_status.c385
-rw-r--r--drivers/s390/cio/ioasm.h228
-rw-r--r--drivers/s390/cio/qdio.c3468
-rw-r--r--drivers/s390/cio/qdio.h648
-rw-r--r--drivers/s390/crypto/Makefile6
-rw-r--r--drivers/s390/crypto/z90common.h168
-rw-r--r--drivers/s390/crypto/z90crypt.h258
-rw-r--r--drivers/s390/crypto/z90hardware.c2243
-rw-r--r--drivers/s390/crypto/z90main.c3563
-rw-r--r--drivers/s390/ebcdic.c246
-rw-r--r--drivers/s390/net/Kconfig108
-rw-r--r--drivers/s390/net/Makefile14
-rw-r--r--drivers/s390/net/claw.c4447
-rw-r--r--drivers/s390/net/claw.h335
-rw-r--r--drivers/s390/net/ctcdbug.c83
-rw-r--r--drivers/s390/net/ctcdbug.h123
-rw-r--r--drivers/s390/net/ctcmain.c3304
-rw-r--r--drivers/s390/net/ctctty.c1276
-rw-r--r--drivers/s390/net/ctctty.h37
-rw-r--r--drivers/s390/net/cu3088.c166
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c220
-rw-r--r--drivers/s390/net/fsm.h265
-rw-r--r--drivers/s390/net/iucv.c2567
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/lcs.c2347
-rw-r--r--drivers/s390/net/lcs.h321
-rw-r--r--drivers/s390/net/netiucv.c2149
-rw-r--r--drivers/s390/net/qeth.h1162
-rw-r--r--drivers/s390/net/qeth_eddp.c643
-rw-r--r--drivers/s390/net/qeth_eddp.h85
-rw-r--r--drivers/s390/net/qeth_fs.h163
-rw-r--r--drivers/s390/net/qeth_main.c8236
-rw-r--r--drivers/s390/net/qeth_mpc.c168
-rw-r--r--drivers/s390/net/qeth_mpc.h538
-rw-r--r--drivers/s390/net/qeth_proc.c495
-rw-r--r--drivers/s390/net/qeth_sys.c1788
-rw-r--r--drivers/s390/net/qeth_tso.c285
-rw-r--r--drivers/s390/net/qeth_tso.h58
-rw-r--r--drivers/s390/net/smsgiucv.c180
-rw-r--r--drivers/s390/net/smsgiucv.h10
-rw-r--r--drivers/s390/s390mach.c219
-rw-r--r--drivers/s390/s390mach.h79
-rw-r--r--drivers/s390/scsi/Makefile9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1977
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c312
-rw-r--r--drivers/s390/scsi/zfcp_def.h1121
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3585
-rw-r--r--drivers/s390/scsi/zfcp_ext.h186
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c5087
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h472
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c868
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c949
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c298
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_driver.c135
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_port.c311
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_unit.c179
-rw-r--r--drivers/s390/sysinfo.c347
138 files changed, 99147 insertions, 0 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
new file mode 100644
index 000000000000..96413c2cd1ad
--- /dev/null
+++ b/drivers/s390/Kconfig
@@ -0,0 +1,209 @@
+config CCW
+ bool
+ default y
+
+source "drivers/block/Kconfig"
+
+source "drivers/md/Kconfig"
+
+
+menu "Character device drivers"
+
+config UNIX98_PTYS
+ bool "Unix98 PTY support"
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx for
+ masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
+ has a number of problems. The GNU C library glibc 2.1 and later,
+ however, supports the Unix98 naming standard: in order to acquire a
+ pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
+ terminal is then made available to the process and the pseudo
+ terminal slave can be accessed as /dev/pts/<number>. What was
+ traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
+
+ The entries in /dev/pts/ are created on the fly by a virtual
+ file system; therefore, if you say Y here you should say Y to
+ "/dev/pts file system for Unix98 PTYs" as well.
+
+ If you want to say Y here, you need to have the C library glibc 2.1
+ or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
+ Read the instructions in <file:Documentation/Changes> pertaining to
+ pseudo terminals. It's safe to say N.
+
+config UNIX98_PTY_COUNT
+ int "Maximum number of Unix98 PTYs in use (0-2048)"
+ depends on UNIX98_PTYS
+ default "256"
+ help
+ The maximum number of Unix98 PTYs that can be used at any one time.
+ The default is 256, and should be enough for desktop systems. Server
+ machines which support incoming telnet/rlogin/ssh connections and/or
+ serve several X terminals may want to increase this: every incoming
+ connection and every xterm uses up one PTY.
+
+ When not in use, each additional set of 256 PTYs occupy
+ approximately 8 KB of kernel memory on 32-bit architectures.
+
+source "drivers/char/watchdog/Kconfig"
+
+comment "S/390 character device drivers"
+
+config TN3270
+ tristate "Support for locally attached 3270 terminals"
+ help
+ Include support for IBM 3270 terminals.
+
+config TN3270_TTY
+ tristate "Support for tty input/output on 3270 terminals"
+ depends on TN3270
+ help
+ Include support for using an IBM 3270 terminal as a Linux tty.
+
+config TN3270_FS
+ tristate "Support for fullscreen applications on 3270 terminals"
+ depends on TN3270
+ help
+ Include support for fullscreen applications on an IBM 3270 terminal.
+
+config TN3270_CONSOLE
+ bool "Support for console on 3270 terminal"
+ depends on TN3270=y && TN3270_TTY=y
+ help
+ Include support for using an IBM 3270 terminal as a Linux system
+ console. Available only if 3270 support is compiled in statically.
+
+config TN3215
+ bool "Support for 3215 line mode terminal"
+ help
+ Include support for IBM 3215 line-mode terminals.
+
+config TN3215_CONSOLE
+ bool "Support for console on 3215 line mode terminal"
+ depends on TN3215
+ help
+ Include support for using an IBM 3215 line-mode terminal as a
+ Linux system console.
+
+config CCW_CONSOLE
+ bool
+ depends on TN3215_CONSOLE || TN3270_CONSOLE
+ default y
+
+config SCLP
+ bool "Support for SCLP"
+ help
+ Include support for the SCLP interface to the service element.
+
+config SCLP_TTY
+ bool "Support for SCLP line mode terminal"
+ depends on SCLP
+ help
+ Include support for IBM SCLP line-mode terminals.
+
+config SCLP_CONSOLE
+ bool "Support for console on SCLP line mode terminal"
+ depends on SCLP_TTY
+ help
+ Include support for using an IBM HWC line-mode terminal as the Linux
+ system console.
+
+config SCLP_VT220_TTY
+ bool "Support for SCLP VT220-compatible terminal"
+ depends on SCLP
+ help
+ Include support for an IBM SCLP VT220-compatible terminal.
+
+config SCLP_VT220_CONSOLE
+ bool "Support for console on SCLP VT220-compatible terminal"
+ depends on SCLP_VT220_TTY
+ help
+ Include support for using an IBM SCLP VT220-compatible terminal as a
+ Linux system console.
+
+config SCLP_CPI
+ tristate "Control-Program Identification"
+ depends on SCLP
+ help
+ This option enables the hardware console interface for system
+ identification. This is commonly used for workload management and
+ gives you a nice name for the system on the service element.
+ Please select this option as a module since built-in operation is
+ completely untested.
+ You should only select this option if you know what you are doing,
+ need this feature and intend to run your kernel in LPAR.
+
+config S390_TAPE
+ tristate "S/390 tape device support"
+ help
+ Select this option if you want to access channel-attached tape
+ devices on IBM S/390 or zSeries.
+ If you select this option you will also want to select at
+ least one of the tape interface options and one of the tape
+ hardware options in order to access a tape device.
+ This option is also available as a module. The module will be
+ called tape390 and include all selected interfaces and
+ hardware drivers.
+
+comment "S/390 tape interface support"
+ depends on S390_TAPE
+
+config S390_TAPE_BLOCK
+ bool "Support for tape block devices"
+ depends on S390_TAPE
+ help
+ Select this option if you want to access your channel-attached tape
+ devices using the block device interface. This interface is similar
+ to CD-ROM devices on other platforms. The tapes can only be
+ accessed read-only when using this interface. Have a look at
+ <file:Documentation/s390/TAPE> for further information about creating
+ volumes for and using this interface. It is safe to say "Y" here.
+
+comment "S/390 tape hardware support"
+ depends on S390_TAPE
+
+config S390_TAPE_34XX
+ tristate "Support for 3480/3490 tape hardware"
+ depends on S390_TAPE
+ help
+ Select this option if you want to access IBM 3480/3490 magnetic
+ tape subsystems and 100% compatibles.
+ It is safe to say "Y" here.
+
+
+
+config VMLOGRDR
+ tristate "Support for the z/VM recording system services (VM only)"
+ depends on IUCV
+ help
+ Select this option if you want to be able to receive records collected
+ by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
+ *SYMPTOM.
+ This driver depends on the IUCV support driver.
+
+config MONREADER
+ tristate "API for reading z/VM monitor service records"
+ depends on IUCV
+ help
+ Character device driver for reading z/VM monitor service records
+
+endmenu
+
+menu "Cryptographic devices"
+
+config Z90CRYPT
+ tristate "Support for PCI-attached cryptographic adapters"
+ default "m"
+ help
+ Select this option if you want to use a PCI-attached cryptographic
+ adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI
+ Cryptographic Coprocessor (PCICC). This option is also available
+ as a module called z90crypt.ko.
+
+endmenu
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
new file mode 100644
index 000000000000..c99a2fe92fb0
--- /dev/null
+++ b/drivers/s390/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the S/390 specific device drivers
+#
+
+obj-y += s390mach.o sysinfo.o
+obj-y += cio/ block/ char/ crypto/ net/ scsi/
+
+drivers-y += drivers/s390/built-in.o
+
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
new file mode 100644
index 000000000000..dc1c89dbdb8f
--- /dev/null
+++ b/drivers/s390/block/Kconfig
@@ -0,0 +1,68 @@
+if ARCH_S390
+
+comment "S/390 block device drivers"
+ depends on ARCH_S390
+
+config BLK_DEV_XPRAM
+ tristate "XPRAM disk support"
+ depends on ARCH_S390
+ help
+ Select this option if you want to use your expanded storage on S/390
+ or zSeries as a disk. This is useful as a _fast_ swap device if you
+ want to access more than 2G of memory when running in 31 bit mode.
+ This option is also available as a module which will be called
+ xpram. If unsure, say "N".
+
+config DCSSBLK
+ tristate "DCSSBLK support"
+ help
+ Support for dcss block device
+
+config DASD
+ tristate "Support for DASD devices"
+ depends on CCW
+ help
+ Enable this option if you want to access DASDs directly utilizing
+ S/390s channel subsystem commands. This is necessary for running
+ natively on a single image or an LPAR.
+
+config DASD_PROFILE
+ bool "Profiling support for dasd devices"
+ depends on DASD
+ help
+ Enable this option if you want to see profiling information
+ in /proc/dasd/statistics.
+
+config DASD_ECKD
+ tristate "Support for ECKD Disks"
+ depends on DASD
+ help
+ ECKD devices are the most commonly used devices. You should enable
+ this option unless you are very sure to have no ECKD device.
+
+config DASD_FBA
+ tristate "Support for FBA Disks"
+ depends on DASD
+ help
+ Select this option to be able to access FBA devices. It is safe to
+ say "Y".
+
+config DASD_DIAG
+ tristate "Support for DIAG access to Disks"
+ depends on DASD && ARCH_S390X = 'n'
+ help
+ Select this option if you want to use Diagnose250 command to access
+ Disks under VM. If you are not running under VM or unsure what it is,
+ say "N".
+
+config DASD_CMB
+ tristate "Compatibility interface for DASD channel measurement blocks"
+ depends on DASD
+ help
+ This driver provides an additional interface to the channel measurement
+ facility, which is normally accessed though sysfs, with a set of
+ ioctl functions specific to the dasd driver.
+ This is only needed if you want to use applications written for
+ linux-2.4 dasd channel measurement facility interface.
+
+endif
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 000000000000..58c6780134f7
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,17 @@
+#
+# S/390 block devices
+#
+
+dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
+dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
+dasd_diag_mod-objs := dasd_diag.o
+dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
+ dasd_genhd.o dasd_erp.o
+
+obj-$(CONFIG_DASD) += dasd_mod.o
+obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
+obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
+obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
+obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
+obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
+obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 000000000000..b755bac6ccbc
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,2065 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * $Revision: 1.158 $
+ */
+
+#include <linux/config.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+#include <asm/todclk.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd:"
+
+#include "dasd_int.h"
+/*
+ * SECTION: Constant definitions to be used within this file
+ */
+#define DASD_CHANQ_MAX_SIZE 4
+
+/*
+ * SECTION: exported variables of dasd.c
+ */
+debug_info_t *dasd_debug_area;
+struct dasd_discipline *dasd_diag_discipline_pointer;
+
+MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
+MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
+ " Copyright 2000 IBM Corporation");
+MODULE_SUPPORTED_DEVICE("dasd");
+MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
+MODULE_LICENSE("GPL");
+
+/*
+ * SECTION: prototypes for static functions of dasd.c
+ */
+static int dasd_alloc_queue(struct dasd_device * device);
+static void dasd_setup_queue(struct dasd_device * device);
+static void dasd_free_queue(struct dasd_device * device);
+static void dasd_flush_request_queue(struct dasd_device *);
+static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
+static void dasd_flush_ccw_queue(struct dasd_device *, int);
+static void dasd_tasklet(struct dasd_device *);
+static void do_kick_device(void *data);
+
+/*
+ * SECTION: Operations on the device structure.
+ */
+static wait_queue_head_t dasd_init_waitq;
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_device *
+dasd_alloc_device(void)
+{
+ struct dasd_device *device;
+
+ device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
+ if (device == NULL)
+ return ERR_PTR(-ENOMEM);
+ memset(device, 0, sizeof (struct dasd_device));
+ /* open_count = 0 means device online but not in use */
+ atomic_set(&device->open_count, -1);
+
+ /* Get two pages for normal block device operations. */
+ device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+ if (device->ccw_mem == NULL) {
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+ /* Get one page for error recovery. */
+ device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
+ if (device->erp_mem == NULL) {
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
+ dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+ spin_lock_init(&device->mem_lock);
+ spin_lock_init(&device->request_queue_lock);
+ atomic_set (&device->tasklet_scheduled, 0);
+ tasklet_init(&device->tasklet,
+ (void (*)(unsigned long)) dasd_tasklet,
+ (unsigned long) device);
+ INIT_LIST_HEAD(&device->ccw_queue);
+ init_timer(&device->timer);
+ INIT_WORK(&device->kick_work, do_kick_device, device);
+ device->state = DASD_STATE_NEW;
+ device->target = DASD_STATE_NEW;
+
+ return device;
+}
+
+/*
+ * Free memory of a device structure.
+ */
+void
+dasd_free_device(struct dasd_device *device)
+{
+ if (device->private)
+ kfree(device->private);
+ free_page((unsigned long) device->erp_mem);
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+}
+
+/*
+ * Make a new device known to the system.
+ */
+static inline int
+dasd_state_new_to_known(struct dasd_device *device)
+{
+ int rc;
+
+ /*
+ * As long as the device is not in state DASD_STATE_NEW we want to
+ * keep the reference count > 0.
+ */
+ dasd_get_device(device);
+
+ rc = dasd_alloc_queue(device);
+ if (rc) {
+ dasd_put_device(device);
+ return rc;
+ }
+
+ device->state = DASD_STATE_KNOWN;
+ return 0;
+}
+
+/*
+ * Let the system forget about a device.
+ */
+static inline void
+dasd_state_known_to_new(struct dasd_device * device)
+{
+ /* Forget the discipline information. */
+ device->discipline = NULL;
+ device->state = DASD_STATE_NEW;
+
+ dasd_free_queue(device);
+
+ /* Give up reference we took in dasd_state_new_to_known. */
+ dasd_put_device(device);
+}
+
+/*
+ * Request the irq line for the device.
+ */
+static inline int
+dasd_state_known_to_basic(struct dasd_device * device)
+{
+ int rc;
+
+ /* Allocate and register gendisk structure. */
+ rc = dasd_gendisk_alloc(device);
+ if (rc)
+ return rc;
+
+ /* register 'device' debug area, used for all DBF_DEV_XXX calls */
+ device->debug_area = debug_register(device->cdev->dev.bus_id, 0, 2,
+ 8 * sizeof (long));
+ debug_register_view(device->debug_area, &debug_sprintf_view);
+ debug_set_level(device->debug_area, DBF_EMERG);
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
+
+ device->state = DASD_STATE_BASIC;
+ return 0;
+}
+
+/*
+ * Release the irq line for the device. Terminate any running i/o.
+ */
+static inline void
+dasd_state_basic_to_known(struct dasd_device * device)
+{
+ dasd_gendisk_free(device);
+ dasd_flush_ccw_queue(device, 1);
+ DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
+ if (device->debug_area != NULL) {
+ debug_unregister(device->debug_area);
+ device->debug_area = NULL;
+ }
+ device->state = DASD_STATE_KNOWN;
+}
+
+/*
+ * Do the initial analysis. The do_analysis function may return
+ * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
+ * until the discipline decides to continue the startup sequence
+ * by calling the function dasd_change_state. The eckd disciplines
+ * uses this to start a ccw that detects the format. The completion
+ * interrupt for this detection ccw uses the kernel event daemon to
+ * trigger the call to dasd_change_state. All this is done in the
+ * discipline code, see dasd_eckd.c.
+ * After the analysis ccw is done (do_analysis returned 0 or error)
+ * the block device is setup. Either a fake disk is added to allow
+ * formatting or a proper device request queue is created.
+ */
+static inline int
+dasd_state_basic_to_ready(struct dasd_device * device)
+{
+ int rc;
+
+ rc = 0;
+ if (device->discipline->do_analysis != NULL)
+ rc = device->discipline->do_analysis(device);
+ if (rc)
+ return rc;
+ dasd_setup_queue(device);
+ device->state = DASD_STATE_READY;
+ if (dasd_scan_partitions(device) != 0)
+ device->state = DASD_STATE_BASIC;
+ return 0;
+}
+
+/*
+ * Remove device from block device layer. Destroy dirty buffers.
+ * Forget format information. Check if the target level is basic
+ * and if it is create fake disk for formatting.
+ */
+static inline void
+dasd_state_ready_to_basic(struct dasd_device * device)
+{
+ dasd_flush_ccw_queue(device, 0);
+ dasd_destroy_partitions(device);
+ dasd_flush_request_queue(device);
+ device->blocks = 0;
+ device->bp_block = 0;
+ device->s2b_shift = 0;
+ device->state = DASD_STATE_BASIC;
+}
+
+/*
+ * Make the device online and schedule the bottom half to start
+ * the requeueing of requests from the linux request queue to the
+ * ccw queue.
+ */
+static inline int
+dasd_state_ready_to_online(struct dasd_device * device)
+{
+ device->state = DASD_STATE_ONLINE;
+ dasd_schedule_bh(device);
+ return 0;
+}
+
+/*
+ * Stop the requeueing of requests again.
+ */
+static inline void
+dasd_state_online_to_ready(struct dasd_device * device)
+{
+ device->state = DASD_STATE_READY;
+}
+
+/*
+ * Device startup state changes.
+ */
+static inline int
+dasd_increase_state(struct dasd_device *device)
+{
+ int rc;
+
+ rc = 0;
+ if (device->state == DASD_STATE_NEW &&
+ device->target >= DASD_STATE_KNOWN)
+ rc = dasd_state_new_to_known(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_KNOWN &&
+ device->target >= DASD_STATE_BASIC)
+ rc = dasd_state_known_to_basic(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_BASIC &&
+ device->target >= DASD_STATE_READY)
+ rc = dasd_state_basic_to_ready(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_READY &&
+ device->target >= DASD_STATE_ONLINE)
+ rc = dasd_state_ready_to_online(device);
+
+ return rc;
+}
+
+/*
+ * Device shutdown state changes.
+ */
+static inline int
+dasd_decrease_state(struct dasd_device *device)
+{
+ if (device->state == DASD_STATE_ONLINE &&
+ device->target <= DASD_STATE_READY)
+ dasd_state_online_to_ready(device);
+
+ if (device->state == DASD_STATE_READY &&
+ device->target <= DASD_STATE_BASIC)
+ dasd_state_ready_to_basic(device);
+
+ if (device->state == DASD_STATE_BASIC &&
+ device->target <= DASD_STATE_KNOWN)
+ dasd_state_basic_to_known(device);
+
+ if (device->state == DASD_STATE_KNOWN &&
+ device->target <= DASD_STATE_NEW)
+ dasd_state_known_to_new(device);
+
+ return 0;
+}
+
+/*
+ * This is the main startup/shutdown routine.
+ */
+static void
+dasd_change_state(struct dasd_device *device)
+{
+ int rc;
+
+ if (device->state == device->target)
+ /* Already where we want to go today... */
+ return;
+ if (device->state < device->target)
+ rc = dasd_increase_state(device);
+ else
+ rc = dasd_decrease_state(device);
+ if (rc && rc != -EAGAIN)
+ device->target = device->state;
+
+ if (device->state == device->target)
+ wake_up(&dasd_init_waitq);
+}
+
+/*
+ * Kick starter for devices that did not complete the startup/shutdown
+ * procedure or were sleeping because of a pending state.
+ * dasd_kick_device will schedule a call do do_kick_device to the kernel
+ * event daemon.
+ */
+static void
+do_kick_device(void *data)
+{
+ struct dasd_device *device;
+
+ device = (struct dasd_device *) data;
+ dasd_change_state(device);
+ dasd_schedule_bh(device);
+ dasd_put_device(device);
+}
+
+void
+dasd_kick_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_kick_device to the kernel event daemon. */
+ schedule_work(&device->kick_work);
+}
+
+/*
+ * Set the target state for a device and starts the state change.
+ */
+void
+dasd_set_target_state(struct dasd_device *device, int target)
+{
+ /* If we are in probeonly mode stop at DASD_STATE_READY. */
+ if (dasd_probeonly && target > DASD_STATE_READY)
+ target = DASD_STATE_READY;
+ if (device->target != target) {
+ if (device->state == target)
+ wake_up(&dasd_init_waitq);
+ device->target = target;
+ }
+ if (device->state != device->target)
+ dasd_change_state(device);
+}
+
+/*
+ * Enable devices with device numbers in [from..to].
+ */
+static inline int
+_wait_for_device(struct dasd_device *device)
+{
+ return (device->state == device->target);
+}
+
+void
+dasd_enable_device(struct dasd_device *device)
+{
+ dasd_set_target_state(device, DASD_STATE_ONLINE);
+ if (device->state <= DASD_STATE_KNOWN)
+ /* No discipline for device found. */
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ /* Now wait for the devices to come up. */
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+}
+
+/*
+ * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
+ */
+#ifdef CONFIG_DASD_PROFILE
+
+struct dasd_profile_info_t dasd_global_profile;
+unsigned int dasd_profile_level = DASD_PROFILE_OFF;
+
+/*
+ * Increments counter in global and local profiling structures.
+ */
+#define dasd_profile_counter(value, counter, device) \
+{ \
+ int index; \
+ for (index = 0; index < 31 && value >> (2+index); index++); \
+ dasd_global_profile.counter[index]++; \
+ device->profile.counter[index]++; \
+}
+
+/*
+ * Add profiling information for cqr before execution.
+ */
+static inline void
+dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
+ struct request *req)
+{
+ struct list_head *l;
+ unsigned int counter;
+
+ if (dasd_profile_level != DASD_PROFILE_ON)
+ return;
+
+ /* count the length of the chanq for statistics */
+ counter = 0;
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ dasd_global_profile.dasd_io_nr_req[counter]++;
+ device->profile.dasd_io_nr_req[counter]++;
+}
+
+/*
+ * Add profiling information for cqr after execution.
+ */
+static inline void
+dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
+ struct request *req)
+{
+ long strtime, irqtime, endtime, tottime; /* in microseconds */
+ long tottimeps, sectors;
+
+ if (dasd_profile_level != DASD_PROFILE_ON)
+ return;
+
+ sectors = req->nr_sectors;
+ if (!cqr->buildclk || !cqr->startclk ||
+ !cqr->stopclk || !cqr->endclk ||
+ !sectors)
+ return;
+
+ strtime = ((cqr->startclk - cqr->buildclk) >> 12);
+ irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
+ endtime = ((cqr->endclk - cqr->stopclk) >> 12);
+ tottime = ((cqr->endclk - cqr->buildclk) >> 12);
+ tottimeps = tottime / sectors;
+
+ if (!dasd_global_profile.dasd_io_reqs)
+ memset(&dasd_global_profile, 0,
+ sizeof (struct dasd_profile_info_t));
+ dasd_global_profile.dasd_io_reqs++;
+ dasd_global_profile.dasd_io_sects += sectors;
+
+ if (!device->profile.dasd_io_reqs)
+ memset(&device->profile, 0,
+ sizeof (struct dasd_profile_info_t));
+ device->profile.dasd_io_reqs++;
+ device->profile.dasd_io_sects += sectors;
+
+ dasd_profile_counter(sectors, dasd_io_secs, device);
+ dasd_profile_counter(tottime, dasd_io_times, device);
+ dasd_profile_counter(tottimeps, dasd_io_timps, device);
+ dasd_profile_counter(strtime, dasd_io_time1, device);
+ dasd_profile_counter(irqtime, dasd_io_time2, device);
+ dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
+ dasd_profile_counter(endtime, dasd_io_time3, device);
+}
+#else
+#define dasd_profile_start(device, cqr, req) do {} while (0)
+#define dasd_profile_end(device, cqr, req) do {} while (0)
+#endif /* CONFIG_DASD_PROFILE */
+
+/*
+ * Allocate memory for a channel program with 'cplength' channel
+ * command words and 'datasize' additional space. There are two
+ * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
+ * memory and 2) dasd_smalloc_request uses the static ccw memory
+ * that gets allocated for each device.
+ */
+struct dasd_ccw_req *
+dasd_kmalloc_request(char *magic, int cplength, int datasize,
+ struct dasd_device * device)
+{
+ struct dasd_ccw_req *cqr;
+
+ /* Sanity checks */
+ if ( magic == NULL || datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
+ BUG();
+
+ cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
+ if (cqr == NULL)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(struct dasd_ccw_req));
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
+ GFP_ATOMIC | GFP_DMA);
+ if (cqr->cpaddr == NULL) {
+ kfree(cqr);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
+ if (cqr->data == NULL) {
+ if (cqr->cpaddr != NULL)
+ kfree(cqr->cpaddr);
+ kfree(cqr);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(cqr->data, 0, datasize);
+ }
+ strncpy((char *) &cqr->magic, magic, 4);
+ ASCEBC((char *) &cqr->magic, 4);
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+ return cqr;
+}
+
+struct dasd_ccw_req *
+dasd_smalloc_request(char *magic, int cplength, int datasize,
+ struct dasd_device * device)
+{
+ unsigned long flags;
+ struct dasd_ccw_req *cqr;
+ char *data;
+ int size;
+
+ /* Sanity checks */
+ if ( magic == NULL || datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
+ BUG();
+
+ size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+ spin_lock_irqsave(&device->mem_lock, flags);
+ cqr = (struct dasd_ccw_req *)
+ dasd_alloc_chunk(&device->ccw_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (cqr == NULL)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(struct dasd_ccw_req));
+ data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = (struct ccw1 *) data;
+ data += cplength*sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+ strncpy((char *) &cqr->magic, magic, 4);
+ ASCEBC((char *) &cqr->magic, 4);
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+ return cqr;
+}
+
+/*
+ * Free memory of a channel program. This function needs to free all the
+ * idal lists that might have been created by dasd_set_cda and the
+ * struct dasd_ccw_req itself.
+ */
+void
+dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+{
+#ifdef CONFIG_ARCH_S390X
+ struct ccw1 *ccw;
+
+ /* Clear any idals used for the request. */
+ ccw = cqr->cpaddr;
+ do {
+ clear_normalized_cda(ccw);
+ } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
+#endif
+ if (cqr->cpaddr != NULL)
+ kfree(cqr->cpaddr);
+ if (cqr->data != NULL)
+ kfree(cqr->data);
+ kfree(cqr);
+ dasd_put_device(device);
+}
+
+void
+dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->ccw_chunks, cqr);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ dasd_put_device(device);
+}
+
+/*
+ * Check discipline magic in cqr.
+ */
+static inline int
+dasd_check_cqr(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+
+ if (cqr == NULL)
+ return -EINVAL;
+ device = cqr->device;
+ if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
+ DEV_MESSAGE(KERN_WARNING, device,
+ " dasd_ccw_req 0x%08x magic doesn't match"
+ " discipline 0x%08x",
+ cqr->magic,
+ *(unsigned int *) device->discipline->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Terminate the current i/o and set the request to clear_pending.
+ * Timer keeps device runnig.
+ * ccw_device_clear can fail if the i/o subsystem
+ * is in a bad mood.
+ */
+int
+dasd_term_IO(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+ int retries, rc;
+
+ /* Check the cqr */
+ rc = dasd_check_cqr(cqr);
+ if (rc)
+ return rc;
+ retries = 0;
+ device = (struct dasd_device *) cqr->device;
+ while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
+ rc = ccw_device_clear(device->cdev, (long) cqr);
+ switch (rc) {
+ case 0: /* termination successful */
+ if (cqr->retries > 0) {
+ cqr->retries--;
+ cqr->status = DASD_CQR_CLEAR;
+ } else
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock();
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "terminate cqr %p successful",
+ cqr);
+ break;
+ case -ENODEV:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "device gone, retry");
+ break;
+ case -EIO:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "I/O error, retry");
+ break;
+ case -EINVAL:
+ case -EBUSY:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "device busy, retry later");
+ break;
+ default:
+ DEV_MESSAGE(KERN_ERR, device,
+ "line %d unknown RC=%d, please "
+ "report to linux390@de.ibm.com",
+ __LINE__, rc);
+ BUG();
+ break;
+ }
+ retries++;
+ }
+ dasd_schedule_bh(device);
+ return rc;
+}
+
+/*
+ * Start the i/o. This start_IO can fail if the channel is really busy.
+ * In that case set up a timer to start the request later.
+ */
+int
+dasd_start_IO(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+ int rc;
+
+ /* Check the cqr */
+ rc = dasd_check_cqr(cqr);
+ if (rc)
+ return rc;
+ device = (struct dasd_device *) cqr->device;
+ if (cqr->retries < 0) {
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "start_IO: request %p (%02x/%i) - no retry left.",
+ cqr, cqr->status, cqr->retries);
+ cqr->status = DASD_CQR_FAILED;
+ return -EIO;
+ }
+ cqr->startclk = get_clock();
+ cqr->starttime = jiffies;
+ cqr->retries--;
+ rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
+ cqr->lpm, 0);
+ switch (rc) {
+ case 0:
+ cqr->status = DASD_CQR_IN_IO;
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "start_IO: request %p started successful",
+ cqr);
+ break;
+ case -EBUSY:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "start_IO: device busy, retry later");
+ break;
+ case -ETIMEDOUT:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "start_IO: request timeout, retry later");
+ break;
+ case -EACCES:
+ /* -EACCES indicates that the request used only a
+ * subset of the available pathes and all these
+ * pathes are gone.
+ * Do a retry with all available pathes.
+ */
+ cqr->lpm = LPM_ANYPATH;
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "start_IO: selected pathes gone,"
+ " retry on all pathes");
+ break;
+ case -ENODEV:
+ case -EIO:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "start_IO: device gone, retry");
+ break;
+ default:
+ DEV_MESSAGE(KERN_ERR, device,
+ "line %d unknown RC=%d, please report"
+ " to linux390@de.ibm.com", __LINE__, rc);
+ BUG();
+ break;
+ }
+ return rc;
+}
+
+/*
+ * Timeout function for dasd devices. This is used for different purposes
+ * 1) missing interrupt handler for normal operation
+ * 2) delayed start of request where start_IO failed with -EBUSY
+ * 3) timeout for missing state change interrupts
+ * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
+ * DASD_CQR_QUEUED for 2) and 3).
+ */
+static void
+dasd_timeout_device(unsigned long ptr)
+{
+ unsigned long flags;
+ struct dasd_device *device;
+
+ device = (struct dasd_device *) ptr;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ /* re-activate request queue */
+ device->stopped &= ~DASD_STOPPED_PENDING;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ dasd_schedule_bh(device);
+}
+
+/*
+ * Setup timeout for a device in jiffies.
+ */
+void
+dasd_set_timer(struct dasd_device *device, int expires)
+{
+ if (expires == 0) {
+ if (timer_pending(&device->timer))
+ del_timer(&device->timer);
+ return;
+ }
+ if (timer_pending(&device->timer)) {
+ if (mod_timer(&device->timer, jiffies + expires))
+ return;
+ }
+ device->timer.function = dasd_timeout_device;
+ device->timer.data = (unsigned long) device;
+ device->timer.expires = jiffies + expires;
+ add_timer(&device->timer);
+}
+
+/*
+ * Clear timeout for a device.
+ */
+void
+dasd_clear_timer(struct dasd_device *device)
+{
+ if (timer_pending(&device->timer))
+ del_timer(&device->timer);
+}
+
+static void
+dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_device *device;
+
+ cqr = (struct dasd_ccw_req *) intparm;
+ if (cqr->status != DASD_CQR_IN_IO) {
+ MESSAGE(KERN_DEBUG,
+ "invalid status in handle_killed_request: "
+ "bus_id %s, status %02x",
+ cdev->dev.bus_id, cqr->status);
+ return;
+ }
+
+ device = (struct dasd_device *) cqr->device;
+ if (device == NULL ||
+ device != dasd_device_from_cdev(cdev) ||
+ strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
+ cdev->dev.bus_id);
+ return;
+ }
+
+ /* Schedule request to be retried. */
+ cqr->status = DASD_CQR_QUEUED;
+
+ dasd_clear_timer(device);
+ dasd_schedule_bh(device);
+ dasd_put_device(device);
+}
+
+static void
+dasd_handle_state_change_pending(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ struct list_head *l, *n;
+
+ device->stopped &= ~DASD_STOPPED_PENDING;
+
+ /* restart all 'running' IO on queue */
+ list_for_each_safe(l, n, &device->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, list);
+ if (cqr->status == DASD_CQR_IN_IO) {
+ cqr->status = DASD_CQR_QUEUED;
+ }
+ }
+ dasd_clear_timer(device);
+ dasd_schedule_bh(device);
+}
+
+/*
+ * Interrupt handler for "normal" ssch-io based dasd devices.
+ */
+void
+dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct dasd_ccw_req *cqr, *next;
+ struct dasd_device *device;
+ unsigned long long now;
+ int expires;
+ dasd_era_t era;
+ char mask;
+
+ if (IS_ERR(irb)) {
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ dasd_handle_killed_request(cdev, intparm);
+ break;
+ case -ETIMEDOUT:
+ printk(KERN_WARNING"%s(%s): request timed out\n",
+ __FUNCTION__, cdev->dev.bus_id);
+ //FIXME - dasd uses own timeout interface...
+ break;
+ default:
+ printk(KERN_WARNING"%s(%s): unknown error %ld\n",
+ __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
+ }
+ return;
+ }
+
+ now = get_clock();
+
+ DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
+ cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
+ (unsigned int) intparm);
+
+ /* first of all check for state change pending interrupt */
+ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+ if ((irb->scsw.dstat & mask) == mask) {
+ device = dasd_device_from_cdev(cdev);
+ if (!IS_ERR(device)) {
+ dasd_handle_state_change_pending(device);
+ dasd_put_device(device);
+ }
+ return;
+ }
+
+ cqr = (struct dasd_ccw_req *) intparm;
+
+ /* check for unsolicited interrupts */
+ if (cqr == NULL) {
+ MESSAGE(KERN_DEBUG,
+ "unsolicited interrupt received: bus_id %s",
+ cdev->dev.bus_id);
+ return;
+ }
+
+ device = (struct dasd_device *) cqr->device;
+ if (device == NULL ||
+ strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
+ cdev->dev.bus_id);
+ return;
+ }
+
+ /* Check for clear pending */
+ if (cqr->status == DASD_CQR_CLEAR &&
+ irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ cqr->status = DASD_CQR_QUEUED;
+ dasd_clear_timer(device);
+ dasd_schedule_bh(device);
+ return;
+ }
+
+ /* check status - the request might have been killed by dyn detach */
+ if (cqr->status != DASD_CQR_IN_IO) {
+ MESSAGE(KERN_DEBUG,
+ "invalid status: bus_id %s, status %02x",
+ cdev->dev.bus_id, cqr->status);
+ return;
+ }
+ DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
+ ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
+
+ /* Find out the appropriate era_action. */
+ if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
+ era = dasd_era_fatal;
+ else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+ irb->scsw.cstat == 0 &&
+ !irb->esw.esw0.erw.cons)
+ era = dasd_era_none;
+ else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
+ era = dasd_era_fatal; /* don't recover this request */
+ else if (irb->esw.esw0.erw.cons)
+ era = device->discipline->examine_error(cqr, irb);
+ else
+ era = dasd_era_recover;
+
+ DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
+ expires = 0;
+ if (era == dasd_era_none) {
+ cqr->status = DASD_CQR_DONE;
+ cqr->stopclk = now;
+ /* Start first request on queue if possible -> fast_io. */
+ if (cqr->list.next != &device->ccw_queue) {
+ next = list_entry(cqr->list.next,
+ struct dasd_ccw_req, list);
+ if ((next->status == DASD_CQR_QUEUED) &&
+ (!device->stopped)) {
+ if (device->discipline->start_IO(next) == 0)
+ expires = next->expires;
+ else
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Interrupt fastpath "
+ "failed!");
+ }
+ }
+ } else { /* error */
+ memcpy(&cqr->irb, irb, sizeof (struct irb));
+#ifdef ERP_DEBUG
+ /* dump sense data */
+ dasd_log_sense(cqr, irb);
+#endif
+ switch (era) {
+ case dasd_era_fatal:
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = now;
+ break;
+ case dasd_era_recover:
+ cqr->status = DASD_CQR_ERROR;
+ break;
+ default:
+ BUG();
+ }
+ }
+ if (expires != 0)
+ dasd_set_timer(device, expires);
+ else
+ dasd_clear_timer(device);
+ dasd_schedule_bh(device);
+}
+
+/*
+ * posts the buffer_cache about a finalized request
+ */
+static inline void
+dasd_end_request(struct request *req, int uptodate)
+{
+ if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+ BUG();
+ add_disk_randomness(req->rq_disk);
+ end_that_request_last(req);
+}
+
+/*
+ * Process finished error recovery ccw.
+ */
+static inline void
+__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
+{
+ dasd_erp_fn_t erp_fn;
+
+ if (cqr->status == DASD_CQR_DONE)
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
+ else
+ DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
+ erp_fn = device->discipline->erp_postaction(cqr);
+ erp_fn(cqr);
+}
+
+/*
+ * Process ccw request queue.
+ */
+static inline void
+__dasd_process_ccw_queue(struct dasd_device * device,
+ struct list_head *final_queue)
+{
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+ dasd_erp_fn_t erp_fn;
+
+restart:
+ /* Process request with final status. */
+ list_for_each_safe(l, n, &device->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, list);
+ /* Stop list processing at the first non-final request. */
+ if (cqr->status != DASD_CQR_DONE &&
+ cqr->status != DASD_CQR_FAILED &&
+ cqr->status != DASD_CQR_ERROR)
+ break;
+ /* Process requests with DASD_CQR_ERROR */
+ if (cqr->status == DASD_CQR_ERROR) {
+ if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock();
+ } else {
+ if (cqr->irb.esw.esw0.erw.cons) {
+ erp_fn = device->discipline->
+ erp_action(cqr);
+ erp_fn(cqr);
+ } else
+ dasd_default_erp_action(cqr);
+ }
+ goto restart;
+ }
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ __dasd_process_erp(device, cqr);
+ goto restart;
+ }
+
+ /* Rechain finished requests to final queue */
+ cqr->endclk = get_clock();
+ list_move_tail(&cqr->list, final_queue);
+ }
+}
+
+static void
+dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
+{
+ struct request *req;
+ struct dasd_device *device;
+ int status;
+
+ req = (struct request *) data;
+ device = cqr->device;
+ dasd_profile_end(device, cqr, req);
+ status = cqr->device->discipline->free_cp(cqr,req);
+ spin_lock_irq(&device->request_queue_lock);
+ dasd_end_request(req, status);
+ spin_unlock_irq(&device->request_queue_lock);
+}
+
+
+/*
+ * Fetch requests from the block device queue.
+ */
+static inline void
+__dasd_process_blk_queue(struct dasd_device * device)
+{
+ request_queue_t *queue;
+ struct request *req;
+ struct dasd_ccw_req *cqr;
+ int nr_queued;
+
+ queue = device->request_queue;
+ /* No queue ? Then there is nothing to do. */
+ if (queue == NULL)
+ return;
+
+ /*
+ * We requeue request from the block device queue to the ccw
+ * queue only in two states. In state DASD_STATE_READY the
+ * partition detection is done and we need to requeue requests
+ * for that. State DASD_STATE_ONLINE is normal block device
+ * operation.
+ */
+ if (device->state != DASD_STATE_READY &&
+ device->state != DASD_STATE_ONLINE)
+ return;
+ nr_queued = 0;
+ /* Now we try to fetch requests from the request queue */
+ list_for_each_entry(cqr, &device->ccw_queue, list)
+ if (cqr->status == DASD_CQR_QUEUED)
+ nr_queued++;
+ while (!blk_queue_plugged(queue) &&
+ elv_next_request(queue) &&
+ nr_queued < DASD_CHANQ_MAX_SIZE) {
+ req = elv_next_request(queue);
+ if (test_bit(DASD_FLAG_RO, &device->flags) &&
+ rq_data_dir(req) == WRITE) {
+ DBF_DEV_EVENT(DBF_ERR, device,
+ "Rejecting write request %p",
+ req);
+ blkdev_dequeue_request(req);
+ dasd_end_request(req, 0);
+ continue;
+ }
+ if (device->stopped & DASD_STOPPED_DC_EIO) {
+ blkdev_dequeue_request(req);
+ dasd_end_request(req, 0);
+ continue;
+ }
+ cqr = device->discipline->build_cp(device, req);
+ if (IS_ERR(cqr)) {
+ if (PTR_ERR(cqr) == -ENOMEM)
+ break; /* terminate request queue loop */
+ DBF_DEV_EVENT(DBF_ERR, device,
+ "CCW creation failed (rc=%ld) "
+ "on request %p",
+ PTR_ERR(cqr), req);
+ blkdev_dequeue_request(req);
+ dasd_end_request(req, 0);
+ continue;
+ }
+ cqr->callback = dasd_end_request_cb;
+ cqr->callback_data = (void *) req;
+ cqr->status = DASD_CQR_QUEUED;
+ blkdev_dequeue_request(req);
+ list_add_tail(&cqr->list, &device->ccw_queue);
+ dasd_profile_start(device, cqr, req);
+ nr_queued++;
+ }
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it reached its expire time. If so, terminate the IO.
+ */
+static inline void
+__dasd_check_expire(struct dasd_device * device)
+{
+ struct dasd_ccw_req *cqr;
+
+ if (list_empty(&device->ccw_queue))
+ return;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
+ if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
+ if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
+ if (device->discipline->term_IO(cqr) != 0)
+ /* Hmpf, try again in 1/10 sec */
+ dasd_set_timer(device, 10);
+ }
+ }
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it needs to be started.
+ */
+static inline void
+__dasd_start_head(struct dasd_device * device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (list_empty(&device->ccw_queue))
+ return;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
+ if ((cqr->status == DASD_CQR_QUEUED) &&
+ (!device->stopped)) {
+ /* try to start the first I/O that can be started */
+ rc = device->discipline->start_IO(cqr);
+ if (rc == 0)
+ dasd_set_timer(device, cqr->expires);
+ else if (rc == -EACCES) {
+ dasd_schedule_bh(device);
+ } else
+ /* Hmpf, try again in 1/2 sec */
+ dasd_set_timer(device, 50);
+ }
+}
+
+/*
+ * Remove requests from the ccw queue.
+ */
+static void
+dasd_flush_ccw_queue(struct dasd_device * device, int all)
+{
+ struct list_head flush_queue;
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+
+ INIT_LIST_HEAD(&flush_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ list_for_each_safe(l, n, &device->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, list);
+ /* Flush all request or only block device requests? */
+ if (all == 0 && cqr->callback == dasd_end_request_cb)
+ continue;
+ if (cqr->status == DASD_CQR_IN_IO)
+ device->discipline->term_IO(cqr);
+ if (cqr->status != DASD_CQR_DONE ||
+ cqr->status != DASD_CQR_FAILED) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock();
+ }
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ __dasd_process_erp(device, cqr);
+ continue;
+ }
+ /* Rechain request on device request queue */
+ cqr->endclk = get_clock();
+ list_move_tail(&cqr->list, &flush_queue);
+ }
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ /* Now call the callback function of flushed requests */
+ list_for_each_safe(l, n, &flush_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, list);
+ if (cqr->callback != NULL)
+ (cqr->callback)(cqr, cqr->callback_data);
+ }
+}
+
+/*
+ * Acquire the device lock and process queues for the device.
+ */
+static void
+dasd_tasklet(struct dasd_device * device)
+{
+ struct list_head final_queue;
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+
+ atomic_set (&device->tasklet_scheduled, 0);
+ INIT_LIST_HEAD(&final_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Check expire time of first request on the ccw queue. */
+ __dasd_check_expire(device);
+ /* Finish off requests on ccw queue */
+ __dasd_process_ccw_queue(device, &final_queue);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ /* Now call the callback function of requests with final status */
+ list_for_each_safe(l, n, &final_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, list);
+ list_del(&cqr->list);
+ if (cqr->callback != NULL)
+ (cqr->callback)(cqr, cqr->callback_data);
+ }
+ spin_lock_irq(&device->request_queue_lock);
+ spin_lock(get_ccwdev_lock(device->cdev));
+ /* Get new request from the block device request queue */
+ __dasd_process_blk_queue(device);
+ /* Now check if the head of the ccw queue needs to be started. */
+ __dasd_start_head(device);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ spin_unlock_irq(&device->request_queue_lock);
+ dasd_put_device(device);
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void
+dasd_schedule_bh(struct dasd_device * device)
+{
+ /* Protect against rescheduling. */
+ if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
+ return;
+ dasd_get_device(device);
+ tasklet_hi_schedule(&device->tasklet);
+}
+
+/*
+ * Queue a request to the head of the ccw_queue. Start the I/O if
+ * possible.
+ */
+void
+dasd_add_request_head(struct dasd_ccw_req *req)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ device = req->device;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ req->status = DASD_CQR_QUEUED;
+ req->device = device;
+ list_add(&req->list, &device->ccw_queue);
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/*
+ * Queue a request to the tail of the ccw_queue. Start the I/O if
+ * possible.
+ */
+void
+dasd_add_request_tail(struct dasd_ccw_req *req)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ device = req->device;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ req->status = DASD_CQR_QUEUED;
+ req->device = device;
+ list_add_tail(&req->list, &device->ccw_queue);
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/*
+ * Wakeup callback.
+ */
+static void
+dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ wake_up((wait_queue_head_t *) data);
+}
+
+static inline int
+_wait_for_wakeup(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ int rc;
+
+ device = cqr->device;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = cqr->status == DASD_CQR_DONE || cqr->status == DASD_CQR_FAILED;
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+}
+
+/*
+ * Attempts to start a special ccw queue and waits for its completion.
+ */
+int
+dasd_sleep_on(struct dasd_ccw_req * cqr)
+{
+ wait_queue_head_t wait_q;
+ struct dasd_device *device;
+ int rc;
+
+ device = cqr->device;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+
+ init_waitqueue_head (&wait_q);
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = (void *) &wait_q;
+ cqr->status = DASD_CQR_QUEUED;
+ list_add_tail(&cqr->list, &device->ccw_queue);
+
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_bh(device);
+
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+ wait_event(wait_q, _wait_for_wakeup(cqr));
+
+ /* Request status is either done or failed. */
+ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
+ return rc;
+}
+
+/*
+ * Attempts to start a special ccw queue and wait interruptible
+ * for its completion.
+ */
+int
+dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
+{
+ wait_queue_head_t wait_q;
+ struct dasd_device *device;
+ int rc, finished;
+
+ device = cqr->device;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+
+ init_waitqueue_head (&wait_q);
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = (void *) &wait_q;
+ cqr->status = DASD_CQR_QUEUED;
+ list_add_tail(&cqr->list, &device->ccw_queue);
+
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_bh(device);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+ finished = 0;
+ while (!finished) {
+ rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
+ if (rc != -ERESTARTSYS) {
+ /* Request status is either done or failed. */
+ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
+ break;
+ }
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ if (cqr->status == DASD_CQR_IN_IO &&
+ device->discipline->term_IO(cqr) == 0) {
+ list_del(&cqr->list);
+ finished = 1;
+ }
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ }
+ return rc;
+}
+
+/*
+ * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
+ * for eckd devices) the currently running request has to be terminated
+ * and be put back to status queued, before the special request is added
+ * to the head of the queue. Then the special request is waited on normally.
+ */
+static inline int
+_dasd_term_running_cqr(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (list_empty(&device->ccw_queue))
+ return 0;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
+ rc = device->discipline->term_IO(cqr);
+ if (rc == 0) {
+ /* termination successful */
+ cqr->status = DASD_CQR_QUEUED;
+ cqr->startclk = cqr->stopclk = 0;
+ cqr->starttime = 0;
+ }
+ return rc;
+}
+
+int
+dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
+{
+ wait_queue_head_t wait_q;
+ struct dasd_device *device;
+ int rc;
+
+ device = cqr->device;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = _dasd_term_running_cqr(device);
+ if (rc) {
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+ }
+
+ init_waitqueue_head (&wait_q);
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = (void *) &wait_q;
+ cqr->status = DASD_CQR_QUEUED;
+ list_add(&cqr->list, &device->ccw_queue);
+
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_bh(device);
+
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+ wait_event(wait_q, _wait_for_wakeup(cqr));
+
+ /* Request status is either done or failed. */
+ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
+ return rc;
+}
+
+/*
+ * Cancels a request that was started with dasd_sleep_on_req.
+ * This is useful to timeout requests. The request will be
+ * terminated if it is currently in i/o.
+ * Returns 1 if the request has been terminated.
+ */
+int
+dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device = cqr->device;
+ unsigned long flags;
+ int rc;
+
+ rc = 0;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ switch (cqr->status) {
+ case DASD_CQR_QUEUED:
+ /* request was not started - just set to failed */
+ cqr->status = DASD_CQR_FAILED;
+ break;
+ case DASD_CQR_IN_IO:
+ /* request in IO - terminate IO and release again */
+ if (device->discipline->term_IO(cqr) != 0)
+ /* what to do if unable to terminate ??????
+ e.g. not _IN_IO */
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock();
+ rc = 1;
+ break;
+ case DASD_CQR_DONE:
+ case DASD_CQR_FAILED:
+ /* already finished - do nothing */
+ break;
+ default:
+ DEV_MESSAGE(KERN_ALERT, device,
+ "invalid status %02x in request",
+ cqr->status);
+ BUG();
+
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ dasd_schedule_bh(device);
+ return rc;
+}
+
+/*
+ * SECTION: Block device operations (request queue, partitions, open, release).
+ */
+
+/*
+ * Dasd request queue function. Called from ll_rw_blk.c
+ */
+static void
+do_dasd_request(request_queue_t * queue)
+{
+ struct dasd_device *device;
+
+ device = (struct dasd_device *) queue->queuedata;
+ spin_lock(get_ccwdev_lock(device->cdev));
+ /* Get new request from the block device request queue */
+ __dasd_process_blk_queue(device);
+ /* Now check if the head of the ccw queue needs to be started. */
+ __dasd_start_head(device);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+}
+
+/*
+ * Allocate and initialize request queue and default I/O scheduler.
+ */
+static int
+dasd_alloc_queue(struct dasd_device * device)
+{
+ int rc;
+
+ device->request_queue = blk_init_queue(do_dasd_request,
+ &device->request_queue_lock);
+ if (device->request_queue == NULL)
+ return -ENOMEM;
+
+ device->request_queue->queuedata = device;
+
+ elevator_exit(device->request_queue->elevator);
+ rc = elevator_init(device->request_queue, "deadline");
+ if (rc) {
+ blk_cleanup_queue(device->request_queue);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Allocate and initialize request queue.
+ */
+static void
+dasd_setup_queue(struct dasd_device * device)
+{
+ int max;
+
+ blk_queue_hardsect_size(device->request_queue, device->bp_block);
+ max = device->discipline->max_blocks << device->s2b_shift;
+ blk_queue_max_sectors(device->request_queue, max);
+ blk_queue_max_phys_segments(device->request_queue, -1L);
+ blk_queue_max_hw_segments(device->request_queue, -1L);
+ blk_queue_max_segment_size(device->request_queue, -1L);
+ blk_queue_segment_boundary(device->request_queue, -1L);
+}
+
+/*
+ * Deactivate and free request queue.
+ */
+static void
+dasd_free_queue(struct dasd_device * device)
+{
+ if (device->request_queue) {
+ blk_cleanup_queue(device->request_queue);
+ device->request_queue = NULL;
+ }
+}
+
+/*
+ * Flush request on the request queue.
+ */
+static void
+dasd_flush_request_queue(struct dasd_device * device)
+{
+ struct request *req;
+
+ if (!device->request_queue)
+ return;
+
+ spin_lock_irq(&device->request_queue_lock);
+ while (!list_empty(&device->request_queue->queue_head)) {
+ req = elv_next_request(device->request_queue);
+ if (req == NULL)
+ break;
+ dasd_end_request(req, 0);
+ blkdev_dequeue_request(req);
+ }
+ spin_unlock_irq(&device->request_queue_lock);
+}
+
+static int
+dasd_open(struct inode *inp, struct file *filp)
+{
+ struct gendisk *disk = inp->i_bdev->bd_disk;
+ struct dasd_device *device = disk->private_data;
+ int rc;
+
+ atomic_inc(&device->open_count);
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ if (!try_module_get(device->discipline->owner)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ if (dasd_probeonly) {
+ DEV_MESSAGE(KERN_INFO, device, "%s",
+ "No access to device due to probeonly mode");
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (device->state < DASD_STATE_BASIC) {
+ DBF_DEV_EVENT(DBF_ERR, device, " %s",
+ " Cannot open unrecognized device");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ module_put(device->discipline->owner);
+unlock:
+ atomic_dec(&device->open_count);
+ return rc;
+}
+
+static int
+dasd_release(struct inode *inp, struct file *filp)
+{
+ struct gendisk *disk = inp->i_bdev->bd_disk;
+ struct dasd_device *device = disk->private_data;
+
+ atomic_dec(&device->open_count);
+ module_put(device->discipline->owner);
+ return 0;
+}
+
+struct block_device_operations
+dasd_device_operations = {
+ .owner = THIS_MODULE,
+ .open = dasd_open,
+ .release = dasd_release,
+ .ioctl = dasd_ioctl,
+};
+
+
+static void
+dasd_exit(void)
+{
+#ifdef CONFIG_PROC_FS
+ dasd_proc_exit();
+#endif
+ dasd_ioctl_exit();
+ dasd_gendisk_exit();
+ dasd_devmap_exit();
+ devfs_remove("dasd");
+ if (dasd_debug_area != NULL) {
+ debug_unregister(dasd_debug_area);
+ dasd_debug_area = NULL;
+ }
+}
+
+/*
+ * SECTION: common functions for ccw_driver use
+ */
+
+/* initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone */
+int
+dasd_generic_probe (struct ccw_device *cdev,
+ struct dasd_discipline *discipline)
+{
+ int ret;
+
+ ret = dasd_add_sysfs_files(cdev);
+ if (ret) {
+ printk(KERN_WARNING
+ "dasd_generic_probe: could not add sysfs entries "
+ "for %s\n", cdev->dev.bus_id);
+ }
+
+ cdev->handler = &dasd_int_handler;
+
+ return ret;
+}
+
+/* this will one day be called from a global not_oper handler.
+ * It is also used by driver_unregister during module unload */
+void
+dasd_generic_remove (struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+
+ dasd_remove_sysfs_files(cdev);
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return;
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ return;
+ }
+ /*
+ * This device is removed unconditionally. Set offline
+ * flag to prevent dasd_open from opening it while it is
+ * no quite down yet.
+ */
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ /* dasd_delete_device destroys the device reference. */
+ dasd_delete_device(device);
+}
+
+/* activate a device. This is called from dasd_{eckd,fba}_probe() when either
+ * the device is detected for the first time and is supposed to be used
+ * or the user has started activation through sysfs */
+int
+dasd_generic_set_online (struct ccw_device *cdev,
+ struct dasd_discipline *discipline)
+
+{
+ struct dasd_device *device;
+ int rc;
+
+ device = dasd_create_device(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ if (test_bit(DASD_FLAG_USE_DIAG, &device->flags)) {
+ if (!dasd_diag_discipline_pointer) {
+ printk (KERN_WARNING
+ "dasd_generic couldn't online device %s "
+ "- discipline DIAG not available\n",
+ cdev->dev.bus_id);
+ dasd_delete_device(device);
+ return -ENODEV;
+ }
+ discipline = dasd_diag_discipline_pointer;
+ }
+ device->discipline = discipline;
+
+ rc = discipline->check_device(device);
+ if (rc) {
+ printk (KERN_WARNING
+ "dasd_generic couldn't online device %s "
+ "with discipline %s rc=%i\n",
+ cdev->dev.bus_id, discipline->name, rc);
+ dasd_delete_device(device);
+ return rc;
+ }
+
+ dasd_set_target_state(device, DASD_STATE_ONLINE);
+ if (device->state <= DASD_STATE_KNOWN) {
+ printk (KERN_WARNING
+ "dasd_generic discipline not found for %s\n",
+ cdev->dev.bus_id);
+ rc = -ENODEV;
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ dasd_delete_device(device);
+ } else
+ pr_debug("dasd_generic device %s found\n",
+ cdev->dev.bus_id);
+
+ /* FIXME: we have to wait for the root device but we don't want
+ * to wait for each single device but for all at once. */
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+
+ dasd_put_device(device);
+
+ return rc;
+}
+
+int
+dasd_generic_set_offline (struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+ int max_count;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ return 0;
+ }
+ /*
+ * We must make sure that this device is currently not in use.
+ * The open_count is increased for every opener, that includes
+ * the blkdev_get in dasd_scan_partitions. We are only interested
+ * in the other openers.
+ */
+ max_count = device->bdev ? 0 : -1;
+ if (atomic_read(&device->open_count) > max_count) {
+ printk (KERN_WARNING "Can't offline dasd device with open"
+ " count = %i.\n",
+ atomic_read(&device->open_count));
+ clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+ dasd_put_device(device);
+ return -EBUSY;
+ }
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ /* dasd_delete_device destroys the device reference. */
+ dasd_delete_device(device);
+
+ return 0;
+}
+
+int
+dasd_generic_notify(struct ccw_device *cdev, int event)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
+ unsigned long flags;
+ int ret;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return 0;
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ ret = 0;
+ switch (event) {
+ case CIO_GONE:
+ case CIO_NO_PATH:
+ if (device->state < DASD_STATE_BASIC)
+ break;
+ /* Device is active. We want to keep it. */
+ if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
+ list_for_each_entry(cqr, &device->ccw_queue, list)
+ if (cqr->status == DASD_CQR_IN_IO)
+ cqr->status = DASD_CQR_FAILED;
+ device->stopped |= DASD_STOPPED_DC_EIO;
+ dasd_schedule_bh(device);
+ } else {
+ list_for_each_entry(cqr, &device->ccw_queue, list)
+ if (cqr->status == DASD_CQR_IN_IO) {
+ cqr->status = DASD_CQR_QUEUED;
+ cqr->retries++;
+ }
+ device->stopped |= DASD_STOPPED_DC_WAIT;
+ dasd_set_timer(device, 0);
+ }
+ ret = 1;
+ break;
+ case CIO_OPER:
+ /* FIXME: add a sanity check. */
+ device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
+ dasd_schedule_bh(device);
+ ret = 1;
+ break;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ dasd_put_device(device);
+ return ret;
+}
+
+/*
+ * Automatically online either all dasd devices (dasd_autodetect) or
+ * all devices specified with dasd= parameters.
+ */
+void
+dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
+{
+ struct device_driver *drv;
+ struct device *d, *dev;
+ struct ccw_device *cdev;
+
+ drv = get_driver(&dasd_discipline_driver->driver);
+ down_read(&drv->bus->subsys.rwsem);
+ dev = NULL;
+ list_for_each_entry(d, &drv->devices, driver_list) {
+ dev = get_device(d);
+ if (!dev)
+ continue;
+ cdev = to_ccwdev(dev);
+ if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
+ ccw_device_set_online(cdev);
+ put_device(dev);
+ }
+ up_read(&drv->bus->subsys.rwsem);
+ put_driver(drv);
+}
+
+static int __init
+dasd_init(void)
+{
+ int rc;
+
+ init_waitqueue_head(&dasd_init_waitq);
+
+ /* register 'common' DASD debug area, used for all DBF_XXX calls */
+ dasd_debug_area = debug_register("dasd", 0, 2, 8 * sizeof (long));
+ if (dasd_debug_area == NULL) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+ debug_register_view(dasd_debug_area, &debug_sprintf_view);
+ debug_set_level(dasd_debug_area, DBF_EMERG);
+
+ DBF_EVENT(DBF_EMERG, "%s", "debug area created");
+
+ dasd_diag_discipline_pointer = NULL;
+
+ rc = devfs_mk_dir("dasd");
+ if (rc)
+ goto failed;
+ rc = dasd_devmap_init();
+ if (rc)
+ goto failed;
+ rc = dasd_gendisk_init();
+ if (rc)
+ goto failed;
+ rc = dasd_parse();
+ if (rc)
+ goto failed;
+ rc = dasd_ioctl_init();
+ if (rc)
+ goto failed;
+#ifdef CONFIG_PROC_FS
+ rc = dasd_proc_init();
+ if (rc)
+ goto failed;
+#endif
+
+ return 0;
+failed:
+ MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
+ dasd_exit();
+ return rc;
+}
+
+module_init(dasd_init);
+module_exit(dasd_exit);
+
+EXPORT_SYMBOL(dasd_debug_area);
+EXPORT_SYMBOL(dasd_diag_discipline_pointer);
+
+EXPORT_SYMBOL(dasd_add_request_head);
+EXPORT_SYMBOL(dasd_add_request_tail);
+EXPORT_SYMBOL(dasd_cancel_req);
+EXPORT_SYMBOL(dasd_clear_timer);
+EXPORT_SYMBOL(dasd_enable_device);
+EXPORT_SYMBOL(dasd_int_handler);
+EXPORT_SYMBOL(dasd_kfree_request);
+EXPORT_SYMBOL(dasd_kick_device);
+EXPORT_SYMBOL(dasd_kmalloc_request);
+EXPORT_SYMBOL(dasd_schedule_bh);
+EXPORT_SYMBOL(dasd_set_target_state);
+EXPORT_SYMBOL(dasd_set_timer);
+EXPORT_SYMBOL(dasd_sfree_request);
+EXPORT_SYMBOL(dasd_sleep_on);
+EXPORT_SYMBOL(dasd_sleep_on_immediatly);
+EXPORT_SYMBOL(dasd_sleep_on_interruptible);
+EXPORT_SYMBOL(dasd_smalloc_request);
+EXPORT_SYMBOL(dasd_start_IO);
+EXPORT_SYMBOL(dasd_term_IO);
+
+EXPORT_SYMBOL_GPL(dasd_generic_probe);
+EXPORT_SYMBOL_GPL(dasd_generic_remove);
+EXPORT_SYMBOL_GPL(dasd_generic_notify);
+EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
+EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
new file mode 100644
index 000000000000..84565c8f584e
--- /dev/null
+++ b/drivers/s390/block/dasd_3370_erp.c
@@ -0,0 +1,104 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_3370_erp.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ *
+ * $Revision: 1.9 $
+ */
+
+#define PRINTK_HEADER "dasd_erp(3370)"
+
+#include "dasd_int.h"
+
+
+/*
+ * DASD_3370_ERP_EXAMINE
+ *
+ * DESCRIPTION
+ * Checks only for fatal/no/recover error.
+ * A detailed examination of the sense data is done later outside
+ * the interrupt handler.
+ *
+ * The logic is based on the 'IBM 3880 Storage Control Reference' manual
+ * 'Chapter 7. 3370 Sense Data'.
+ *
+ * RETURN VALUES
+ * dasd_era_none no error
+ * dasd_era_fatal for all fatal (unrecoverable errors)
+ * dasd_era_recover for all others.
+ */
+dasd_era_t
+dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+ char *sense = irb->ecw;
+
+ /* check for successful execution first */
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+ if (sense[0] & 0x80) { /* CMD reject */
+ return dasd_era_fatal;
+ }
+ if (sense[0] & 0x40) { /* Drive offline */
+ return dasd_era_recover;
+ }
+ if (sense[0] & 0x20) { /* Bus out parity */
+ return dasd_era_recover;
+ }
+ if (sense[0] & 0x10) { /* equipment check */
+ if (sense[1] & 0x80) {
+ return dasd_era_fatal;
+ }
+ return dasd_era_recover;
+ }
+ if (sense[0] & 0x08) { /* data check */
+ if (sense[1] & 0x80) {
+ return dasd_era_fatal;
+ }
+ return dasd_era_recover;
+ }
+ if (sense[0] & 0x04) { /* overrun */
+ if (sense[1] & 0x80) {
+ return dasd_era_fatal;
+ }
+ return dasd_era_recover;
+ }
+ if (sense[1] & 0x40) { /* invalid blocksize */
+ return dasd_era_fatal;
+ }
+ if (sense[1] & 0x04) { /* file protected */
+ return dasd_era_recover;
+ }
+ if (sense[1] & 0x01) { /* operation incomplete */
+ return dasd_era_recover;
+ }
+ if (sense[2] & 0x80) { /* check data erroor */
+ return dasd_era_recover;
+ }
+ if (sense[2] & 0x10) { /* Env. data present */
+ return dasd_era_recover;
+ }
+ /* examine the 24 byte sense data */
+ return dasd_era_recover;
+
+} /* END dasd_3370_erp_examine */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 000000000000..c143ecb53d9d
--- /dev/null
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2742 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_3990_erp.c
+ * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
+ *
+ * $Revision: 1.36 $
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <asm/idals.h>
+#include <asm/todclk.h>
+
+#define PRINTK_HEADER "dasd_erp(3990): "
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+
+struct DCTL_data {
+ unsigned char subcommand; /* e.g Inhibit Write, Enable Write,... */
+ unsigned char modifier; /* Subcommand modifier */
+ unsigned short res; /* reserved */
+} __attribute__ ((packed));
+
+/*
+ *****************************************************************************
+ * SECTION ERP EXAMINATION
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_EXAMINE_24
+ *
+ * DESCRIPTION
+ * Checks only for fatal (unrecoverable) error.
+ * A detailed examination of the sense data is done later outside
+ * the interrupt handler.
+ *
+ * Each bit configuration leading to an action code 2 (Exit with
+ * programming error or unusual condition indication)
+ * are handled as fatal error´s.
+ *
+ * All other configurations are handled as recoverable errors.
+ *
+ * RETURN VALUES
+ * dasd_era_fatal for all fatal (unrecoverable errors)
+ * dasd_era_recover for all others.
+ */
+static dasd_era_t
+dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
+{
+
+ struct dasd_device *device = cqr->device;
+
+ /* check for 'Command Reject' */
+ if ((sense[0] & SNS0_CMD_REJECT) &&
+ (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "EXAMINE 24: Command Reject detected - "
+ "fatal error");
+
+ return dasd_era_fatal;
+ }
+
+ /* check for 'Invalid Track Format' */
+ if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
+ (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "EXAMINE 24: Invalid Track Format detected "
+ "- fatal error");
+
+ return dasd_era_fatal;
+ }
+
+ /* check for 'No Record Found' */
+ if (sense[1] & SNS1_NO_REC_FOUND) {
+
+ /* FIXME: fatal error ?!? */
+ DEV_MESSAGE(KERN_ERR, device,
+ "EXAMINE 24: No Record Found detected %s",
+ device->state <= DASD_STATE_BASIC ?
+ " " : "- fatal error");
+
+ return dasd_era_fatal;
+ }
+
+ /* return recoverable for all others */
+ return dasd_era_recover;
+} /* END dasd_3990_erp_examine_24 */
+
+/*
+ * DASD_3990_ERP_EXAMINE_32
+ *
+ * DESCRIPTION
+ * Checks only for fatal/no/recoverable error.
+ * A detailed examination of the sense data is done later outside
+ * the interrupt handler.
+ *
+ * RETURN VALUES
+ * dasd_era_none no error
+ * dasd_era_fatal for all fatal (unrecoverable errors)
+ * dasd_era_recover for recoverable others.
+ */
+static dasd_era_t
+dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
+{
+
+ struct dasd_device *device = cqr->device;
+
+ switch (sense[25]) {
+ case 0x00:
+ return dasd_era_none;
+
+ case 0x01:
+ DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
+
+ return dasd_era_fatal;
+
+ default:
+
+ return dasd_era_recover;
+ }
+
+} /* end dasd_3990_erp_examine_32 */
+
+/*
+ * DASD_3990_ERP_EXAMINE
+ *
+ * DESCRIPTION
+ * Checks only for fatal/no/recover error.
+ * A detailed examination of the sense data is done later outside
+ * the interrupt handler.
+ *
+ * The logic is based on the 'IBM 3990 Storage Control Reference' manual
+ * 'Chapter 7. Error Recovery Procedures'.
+ *
+ * RETURN VALUES
+ * dasd_era_none no error
+ * dasd_era_fatal for all fatal (unrecoverable errors)
+ * dasd_era_recover for all others.
+ */
+dasd_era_t
+dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+
+ char *sense = irb->ecw;
+ dasd_era_t era = dasd_era_recover;
+ struct dasd_device *device = cqr->device;
+
+ /* check for successful execution first */
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+ /* distinguish between 24 and 32 byte sense data */
+ if (sense[27] & DASD_SENSE_BIT_0) {
+
+ era = dasd_3990_erp_examine_24(cqr, sense);
+
+ } else {
+
+ era = dasd_3990_erp_examine_32(cqr, sense);
+
+ }
+
+ /* log the erp chain if fatal error occurred */
+ if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
+ dasd_log_sense(cqr, irb);
+ dasd_log_ccw(cqr, 0, irb->scsw.cpa);
+ }
+
+ return era;
+
+} /* END dasd_3990_erp_examine */
+
+/*
+ *****************************************************************************
+ * SECTION ERP HANDLING
+ *****************************************************************************
+ */
+/*
+ *****************************************************************************
+ * 24 and 32 byte sense ERP functions
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CLEANUP
+ *
+ * DESCRIPTION
+ * Removes the already build but not necessary ERP request and sets
+ * the status of the original cqr / erp to the given (final) status
+ *
+ * PARAMETER
+ * erp request to be blocked
+ * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
+ *
+ * RETURN VALUES
+ * cqr original cqr
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
+{
+ struct dasd_ccw_req *cqr = erp->refers;
+
+ dasd_free_erp_request(erp, erp->device);
+ cqr->status = final_status;
+ return cqr;
+
+} /* end dasd_3990_erp_cleanup */
+
+/*
+ * DASD_3990_ERP_BLOCK_QUEUE
+ *
+ * DESCRIPTION
+ * Block the given device request queue to prevent from further
+ * processing until the started timer has expired or an related
+ * interrupt was received.
+ */
+static void
+dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
+{
+
+ struct dasd_device *device = erp->device;
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "blocking request queue for %is", expires/HZ);
+
+ device->stopped |= DASD_STOPPED_PENDING;
+ erp->status = DASD_CQR_QUEUED;
+
+ dasd_set_timer(device, expires);
+}
+
+/*
+ * DASD_3990_ERP_INT_REQ
+ *
+ * DESCRIPTION
+ * Handles 'Intervention Required' error.
+ * This means either device offline or not installed.
+ *
+ * PARAMETER
+ * erp current erp
+ * RETURN VALUES
+ * erp modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->device;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without blocking queue */
+ /* (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_int_req) {
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_int_req;
+
+ } else {
+
+ /* issue a message and wait for 'device ready' interrupt */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "is offline or not installed - "
+ "INTERVENTION REQUIRED!!");
+
+ dasd_3990_erp_block_queue(erp, 60*HZ);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_int_req */
+
+/*
+ * DASD_3990_ERP_ALTERNATE_PATH
+ *
+ * DESCRIPTION
+ * Repeat the operation on a different channel path.
+ * If all alternate paths have been tried, the request is posted with a
+ * permanent error.
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp modified pointer to the ERP
+ */
+static void
+dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+{
+ struct dasd_device *device = erp->device;
+ __u8 opm;
+
+ /* try alternate valid path */
+ opm = ccw_device_get_path_mask(device->cdev);
+ //FIXME: start with get_opm ?
+ if (erp->lpm == 0)
+ erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
+ else
+ erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
+
+ if ((erp->lpm & opm) != 0x00) {
+
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "try alternate lpm=%x (lpum=%x / opm=%x)",
+ erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
+
+ /* reset status to queued to handle the request again... */
+ if (erp->status > DASD_CQR_QUEUED)
+ erp->status = DASD_CQR_QUEUED;
+ erp->retries = 1;
+ } else {
+ DEV_MESSAGE(KERN_ERR, device,
+ "No alternate channel path left (lpum=%x / "
+ "opm=%x) -> permanent error",
+ erp->irb.esw.esw0.sublog.lpum, opm);
+
+ /* post request with permanent error */
+ if (erp->status > DASD_CQR_QUEUED)
+ erp->status = DASD_CQR_FAILED;
+ }
+} /* end dasd_3990_erp_alternate_path */
+
+/*
+ * DASD_3990_ERP_DCTL
+ *
+ * DESCRIPTION
+ * Setup cqr to do the Diagnostic Control (DCTL) command with an
+ * Inhibit Write subcommand (0x20) and the given modifier.
+ *
+ * PARAMETER
+ * erp pointer to the current (failed) ERP
+ * modifier subcommand modifier
+ *
+ * RETURN VALUES
+ * dctl_cqr pointer to NEW dctl_cqr
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+{
+
+ struct dasd_device *device = erp->device;
+ struct DCTL_data *DCTL_data;
+ struct ccw1 *ccw;
+ struct dasd_ccw_req *dctl_cqr;
+
+ dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+ sizeof (struct DCTL_data),
+ erp->device);
+ if (IS_ERR(dctl_cqr)) {
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Unable to allocate DCTL-CQR");
+ erp->status = DASD_CQR_FAILED;
+ return erp;
+ }
+
+ DCTL_data = dctl_cqr->data;
+
+ DCTL_data->subcommand = 0x02; /* Inhibit Write */
+ DCTL_data->modifier = modifier;
+
+ ccw = dctl_cqr->cpaddr;
+ memset(ccw, 0, sizeof (struct ccw1));
+ ccw->cmd_code = CCW_CMD_DCTL;
+ ccw->count = 4;
+ ccw->cda = (__u32)(addr_t) DCTL_data;
+ dctl_cqr->function = dasd_3990_erp_DCTL;
+ dctl_cqr->refers = erp;
+ dctl_cqr->device = erp->device;
+ dctl_cqr->magic = erp->magic;
+ dctl_cqr->expires = 5 * 60 * HZ;
+ dctl_cqr->retries = 2;
+
+ dctl_cqr->buildclk = get_clock();
+
+ dctl_cqr->status = DASD_CQR_FILLED;
+
+ return dctl_cqr;
+
+} /* end dasd_3990_erp_DCTL */
+
+/*
+ * DASD_3990_ERP_ACTION_1
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 1 (see Reference manual).
+ * Repeat the operation on a different channel path.
+ * If all alternate paths have been tried, the request is posted with a
+ * permanent error.
+ * Note: duplex handling is not implemented (yet).
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
+{
+
+ erp->function = dasd_3990_erp_action_1;
+
+ dasd_3990_erp_alternate_path(erp);
+
+ return erp;
+
+} /* end dasd_3990_erp_action_1 */
+
+/*
+ * DASD_3990_ERP_ACTION_4
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 4 (see Reference manual).
+ * Set the current request to PENDING to block the CQR queue for that device
+ * until the state change interrupt appears.
+ * Use a timer (20 seconds) to retry the cqr if the interrupt is still
+ * missing.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without waiting for state change pending */
+ /* interrupt (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_action_4) {
+
+ DEV_MESSAGE(KERN_INFO, device, "%s",
+ "dasd_3990_erp_action_4: first time retry");
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_action_4;
+
+ } else {
+
+ if (sense[25] == 0x1D) { /* state change pending */
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "waiting for state change pending "
+ "interrupt, %d retries left",
+ erp->retries);
+
+ dasd_3990_erp_block_queue(erp, 30*HZ);
+
+ } else if (sense[25] == 0x1E) { /* busy */
+ DEV_MESSAGE(KERN_INFO, device,
+ "busy - redriving request later, "
+ "%d retries left",
+ erp->retries);
+ dasd_3990_erp_block_queue(erp, HZ);
+ } else {
+
+ /* no state change pending - retry */
+ DEV_MESSAGE (KERN_INFO, device,
+ "redriving request immediately, "
+ "%d retries left",
+ erp->retries);
+ erp->status = DASD_CQR_QUEUED;
+ }
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_action_4 */
+
+/*
+ *****************************************************************************
+ * 24 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_ACTION_5
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 5 (see Reference manual).
+ * NOTE: Further handling is done in xxx_further_erp after the retries.
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
+{
+
+ /* first of all retry */
+ erp->retries = 10;
+ erp->function = dasd_3990_erp_action_5;
+
+ return erp;
+
+} /* end dasd_3990_erp_action_5 */
+
+/*
+ * DASD_3990_HANDLE_ENV_DATA
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Environmental data present'.
+ * Does a analysis of the sense data (message Format)
+ * and prints the error messages.
+ *
+ * PARAMETER
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * void
+ */
+static void
+dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+ char msg_format = (sense[7] & 0xF0);
+ char msg_no = (sense[7] & 0x0F);
+
+ switch (msg_format) {
+ case 0x00: /* Format 0 - Program or System Checks */
+
+ if (sense[1] & 0x10) { /* check message to operator bit */
+
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Invalid Command");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Invalid Command "
+ "Sequence");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - CCW Count less than "
+ "required");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Invalid Parameter");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Diagnostic of Sepecial"
+ " Command Violates File Mask");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Channel Returned with "
+ "Incorrect retry CCW");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Reset Notification");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Storage Path Restart");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device,
+ "FORMAT 0 - Channel requested "
+ "... %02x", sense[8]);
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Invalid Defective/"
+ "Alternate Track Pointer");
+ break;
+ case 0x0C:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - DPS Installation "
+ "Check");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Command Invalid on "
+ "Secondary Address");
+ break;
+ case 0x0F:
+ DEV_MESSAGE(KERN_WARNING, device,
+ "FORMAT 0 - Status Not As "
+ "Required: reason %02x", sense[8]);
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Reseved");
+ }
+ } else {
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Device Error Source");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Reserved");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device,
+ "FORMAT 0 - Device Fenced - "
+ "device = %02x", sense[4]);
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Data Pinned for "
+ "Device");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 0 - Reserved");
+ }
+ }
+ break;
+
+ case 0x10: /* Format 1 - Device Equipment Checks */
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Device Status 1 not as "
+ "expected");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Index missing");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Interruption cannot be reset");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Device did not respond to "
+ "selection");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Device check-2 error or Set "
+ "Sector is not complete");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Head address does not "
+ "compare");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Device status 1 not valid");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Device not ready");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Track physical address did "
+ "not compare");
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Missing device address bit");
+ break;
+ case 0x0C:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Drive motor switch is off");
+ break;
+ case 0x0D:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Seek incomplete");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Cylinder address did not "
+ "compare");
+ break;
+ case 0x0F:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Offset active cannot be "
+ "reset");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 1 - Reserved");
+ }
+ break;
+
+ case 0x20: /* Format 2 - 3990 Equipment Checks */
+ switch (msg_no) {
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 2 - 3990 check-2 error");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 2 - Support facility errors");
+ break;
+ case 0x0F:
+ DEV_MESSAGE(KERN_WARNING, device,
+ "FORMAT 2 - Microcode detected error %02x",
+ sense[8]);
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 2 - Reserved");
+ }
+ break;
+
+ case 0x30: /* Format 3 - 3990 Control Checks */
+ switch (msg_no) {
+ case 0x0F:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 3 - Allegiance terminated");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 3 - Reserved");
+ }
+ break;
+
+ case 0x40: /* Format 4 - Data Checks */
+ switch (msg_no) {
+ case 0x00:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Home address area error");
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Count area error");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Key area error");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Data area error");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in home address "
+ "area");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in count address "
+ "area");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in key area");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in data area");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Home address area error; "
+ "offset active");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Count area error; offset "
+ "active");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Key area error; offset "
+ "active");
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Data area error; "
+ "offset active");
+ break;
+ case 0x0C:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in home "
+ "address area; offset active");
+ break;
+ case 0x0D:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No syn byte in count "
+ "address area; offset active");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No sync byte in key area; "
+ "offset active");
+ break;
+ case 0x0F:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - No syn byte in data area; "
+ "offset active");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 4 - Reserved");
+ }
+ break;
+
+ case 0x50: /* Format 5 - Data Check with displacement information */
+ switch (msg_no) {
+ case 0x00:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the "
+ "home address area");
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the count area");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the key area");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the data area");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the "
+ "home address area; offset active");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the count area; "
+ "offset active");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the key area; "
+ "offset active");
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Data Check in the data area; "
+ "offset active");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 5 - Reserved");
+ }
+ break;
+
+ case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
+ switch (msg_no) {
+ case 0x00:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel A");
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel B");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel C");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel D");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel E");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel F");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel G");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Overrun on channel H");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 6 - Reserved");
+ }
+ break;
+
+ case 0x70: /* Format 7 - Device Connection Control Checks */
+ switch (msg_no) {
+ case 0x00:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - RCC initiated by a connection "
+ "check alert");
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - RCC 1 sequence not "
+ "successful");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - RCC 1 and RCC 2 sequences not "
+ "successful");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Invalid tag-in during "
+ "selection sequence");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - extra RCC required");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Invalid DCC selection "
+ "response or timeout");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Missing end operation; device "
+ "transfer complete");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Missing end operation; device "
+ "transfer incomplete");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Invalid tag-in for an "
+ "immediate command sequence");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Invalid tag-in for an "
+ "extended command sequence");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - 3990 microcode time out when "
+ "stopping selection");
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - No response to selection "
+ "after a poll interruption");
+ break;
+ case 0x0C:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Permanent path error (DASD "
+ "controller not available)");
+ break;
+ case 0x0D:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - DASD controller not available"
+ " on disconnected command chain");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 7 - Reserved");
+ }
+ break;
+
+ case 0x80: /* Format 8 - Additional Device Equipment Checks */
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - Error correction code "
+ "hardware fault");
+ break;
+ case 0x03:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - Unexpected end operation "
+ "response code");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - End operation with transfer "
+ "count not zero");
+ break;
+ case 0x05:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - End operation with transfer "
+ "count zero");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - DPS checks after a system "
+ "reset or selective reset");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - DPS cannot be filled");
+ break;
+ case 0x08:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - Short busy time-out during "
+ "device selection");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - DASD controller failed to "
+ "set or reset the long busy latch");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - No interruption from device "
+ "during a command chain");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 8 - Reserved");
+ }
+ break;
+
+ case 0x90: /* Format 9 - Device Read, Write, and Seek Checks */
+ switch (msg_no) {
+ case 0x00:
+ break; /* No Message */
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 9 - Device check-2 error");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 9 - Head address did not compare");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 9 - Track physical address did "
+ "not compare while oriented");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 9 - Cylinder address did not "
+ "compare");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT 9 - Reserved");
+ }
+ break;
+
+ case 0xF0: /* Format F - Cache Storage Checks */
+ switch (msg_no) {
+ case 0x00:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Operation Terminated");
+ break;
+ case 0x01:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Subsystem Processing Error");
+ break;
+ case 0x02:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Cache or nonvolatile storage "
+ "equipment failure");
+ break;
+ case 0x04:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Caching terminated");
+ break;
+ case 0x06:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Cache fast write access not "
+ "authorized");
+ break;
+ case 0x07:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Track format incorrect");
+ break;
+ case 0x09:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Caching reinitiated");
+ break;
+ case 0x0A:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Nonvolatile storage "
+ "terminated");
+ break;
+ case 0x0B:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Volume is suspended duplex");
+ break;
+ case 0x0C:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Subsystem status connot be "
+ "determined");
+ break;
+ case 0x0D:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - Caching status reset to "
+ "default");
+ break;
+ case 0x0E:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT F - DASD Fast Write inhibited");
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "FORMAT D - Reserved");
+ }
+ break;
+
+ default: /* unknown message format - should not happen */
+ DEV_MESSAGE (KERN_WARNING, device,
+ "unknown message format %02x",
+ msg_format);
+ break;
+ } /* end switch message format */
+
+} /* end dasd_3990_handle_env_data */
+
+/*
+ * DASD_3990_ERP_COM_REJ
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Command Reject' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * erp 'new' erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_com_rej;
+
+ /* env data present (ACTION 10 - retry should work) */
+ if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Command Reject - environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp->retries = 5;
+
+ } else {
+ /* fatal error - set status to FAILED */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Command Reject - Fatal error");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_com_rej */
+
+/*
+ * DASD_3990_ERP_BUS_OUT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Bus Out Parity Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->device;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without blocking queue */
+ /* (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_bus_out) {
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_bus_out;
+
+ } else {
+
+ /* issue a message and wait for 'device ready' interrupt */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "bus out parity error or BOPC requested by "
+ "channel");
+
+ dasd_3990_erp_block_queue(erp, 60*HZ);
+
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_bus_out */
+
+/*
+ * DASD_3990_ERP_EQUIP_CHECK
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Equipment Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_equip_check;
+
+ if (sense[1] & SNS1_WRITE_INHIBITED) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Write inhibited path encountered");
+
+ /* vary path offline */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Path should be varied off-line. "
+ "This is not implemented yet \n - please report "
+ "to linux390@de.ibm.com");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Equipment Check - " "environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (sense[1] & SNS1_PERM_ERR) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Equipment Check - retry exhausted or "
+ "undesirable");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else {
+ /* all other equipment checks - Action 5 */
+ /* rest is done when retries == 0 */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Equipment check or processing error");
+
+ erp = dasd_3990_erp_action_5(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_equip_check */
+
+/*
+ * DASD_3990_ERP_DATA_CHECK
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Data Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_data_check;
+
+ if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
+
+ /* issue message that the data has been corrected */
+ DEV_MESSAGE(KERN_EMERG, device, "%s",
+ "Data recovered during retry with PCI "
+ "fetch mode active");
+
+ /* not possible to handle this situation in Linux */
+ panic("No way to inform application about the possibly "
+ "incorrect data");
+
+ } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Uncorrectable data check recovered secondary "
+ "addr of duplex pair");
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (sense[1] & SNS1_PERM_ERR) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Uncorrectable data check with internal "
+ "retry exhausted");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else {
+ /* all other data checks */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Uncorrectable data check with retry count "
+ "exhausted...");
+
+ erp = dasd_3990_erp_action_5(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_data_check */
+
+/*
+ * DASD_3990_ERP_OVERRUN
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Overrun' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_overrun;
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Overrun - service overrun or overrun"
+ " error requested by channel");
+
+ erp = dasd_3990_erp_action_5(erp);
+
+ return erp;
+
+} /* end dasd_3990_erp_overrun */
+
+/*
+ * DASD_3990_ERP_INV_FORMAT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Invalid Track Format' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_inv_format;
+
+ if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Track format error when destaging or "
+ "staging data");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else {
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Invalid Track Format - Fatal error should have "
+ "been handled within the interrupt handler");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_inv_format */
+
+/*
+ * DASD_3990_ERP_EOC
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'End-of-Cylinder' error.
+ *
+ * PARAMETER
+ * erp already added default erp
+ * RETURN VALUES
+ * erp pointer to original (failed) cqr.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->device;
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "End-of-Cylinder - must never happen");
+
+ /* implement action 7 - BUG */
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_EOC */
+
+/*
+ * DASD_3990_ERP_ENV_DATA
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Environmental-Data Present' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_env_data;
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s", "Environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ /* don't retry on disabled interface */
+ if (sense[7] != 0x0F) {
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+ } else {
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_env_data */
+
+/*
+ * DASD_3990_ERP_NO_REC
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'No Record Found' error.
+ *
+ * PARAMETER
+ * erp already added default ERP
+ *
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->device;
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "No Record Found - Fatal error should "
+ "have been handled within the interrupt handler");
+
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_no_rec */
+
+/*
+ * DASD_3990_ERP_FILE_PROT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'File Protected' error.
+ * Note: Seek related recovery is not implemented because
+ * wee don't use the seek command yet.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->device;
+
+ DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
+
+ return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_file_prot */
+
+/*
+ * DASD_3990_ERP_INSPECT_24
+ *
+ * DESCRIPTION
+ * Does a detailed inspection of the 24 byte sense data
+ * and sets up a related error recovery action.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the (addtitional) ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_ccw_req *erp_filled = NULL;
+
+ /* Check sense for .... */
+ /* 'Command Reject' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
+ erp_filled = dasd_3990_erp_com_rej(erp, sense);
+ }
+ /* 'Intervention Required' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
+ erp_filled = dasd_3990_erp_int_req(erp);
+ }
+ /* 'Bus Out Parity Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
+ erp_filled = dasd_3990_erp_bus_out(erp);
+ }
+ /* 'Equipment Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
+ erp_filled = dasd_3990_erp_equip_check(erp, sense);
+ }
+ /* 'Data Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
+ erp_filled = dasd_3990_erp_data_check(erp, sense);
+ }
+ /* 'Overrun' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
+ erp_filled = dasd_3990_erp_overrun(erp, sense);
+ }
+ /* 'Invalid Track Format' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
+ erp_filled = dasd_3990_erp_inv_format(erp, sense);
+ }
+ /* 'End-of-Cylinder' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
+ erp_filled = dasd_3990_erp_EOC(erp, sense);
+ }
+ /* 'Environmental Data' */
+ if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
+ erp_filled = dasd_3990_erp_env_data(erp, sense);
+ }
+ /* 'No Record Found' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
+ erp_filled = dasd_3990_erp_no_rec(erp, sense);
+ }
+ /* 'File Protected' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
+ erp_filled = dasd_3990_erp_file_prot(erp);
+ }
+ /* other (unknown) error - do default ERP */
+ if (erp_filled == NULL) {
+
+ erp_filled = erp;
+ }
+
+ return erp_filled;
+
+} /* END dasd_3990_erp_inspect_24 */
+
+/*
+ *****************************************************************************
+ * 32 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERPACTION_10_32
+ *
+ * DESCRIPTION
+ * Handles 32 byte 'Action 10' of Single Program Action Codes.
+ * Just retry and if retry doesn't work, return with error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * sense current sense data
+ * RETURN VALUES
+ * erp modified erp_head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_action_10_32;
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s", "Perform logging requested");
+
+ return erp;
+
+} /* end dasd_3990_erp_action_10_32 */
+
+/*
+ * DASD_3990_ERP_ACTION_1B_32
+ *
+ * DESCRIPTION
+ * Handles 32 byte 'Action 1B' of Single Program Action Codes.
+ * A write operation could not be finished because of an unexpected
+ * condition.
+ * The already created 'default erp' is used to get the link to
+ * the erp chain, but it can not be used for this recovery
+ * action because it contains no DE/LO data space.
+ *
+ * PARAMETER
+ * default_erp already added default erp.
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * erp new erp or
+ * default_erp in case of imprecise ending or error
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->device;
+ __u32 cpa = 0;
+ struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *erp;
+ struct DE_eckd_data *DE_data;
+ char *LO_data; /* LO_eckd_data_t */
+ struct ccw1 *ccw;
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Write not finished because of unexpected condition");
+
+ default_erp->function = dasd_3990_erp_action_1B_32;
+
+ /* determine the original cqr */
+ cqr = default_erp;
+
+ while (cqr->refers != NULL) {
+ cqr = cqr->refers;
+ }
+
+ /* for imprecise ending just do default erp */
+ if (sense[1] & 0x01) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Imprecise ending is set - just retry");
+
+ return default_erp;
+ }
+
+ /* determine the address of the CCW to be restarted */
+ /* Imprecise ending is not set -> addr from IRB-SCSW */
+ cpa = default_erp->refers->irb.scsw.cpa;
+
+ if (cpa == 0) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Unable to determine address of the CCW "
+ "to be restarted");
+
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ /* Build new ERP request including DE/LO */
+ erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ 2 + 1,/* DE/LO + TIC */
+ sizeof (struct DE_eckd_data) +
+ sizeof (struct LO_eckd_data), device);
+
+ if (IS_ERR(erp)) {
+ DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ /* use original DE */
+ DE_data = erp->data;
+ memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data));
+
+ /* create LO */
+ LO_data = erp->data + sizeof (struct DE_eckd_data);
+
+ if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "BUG - this should not happen");
+
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ if ((sense[7] & 0x3F) == 0x01) {
+ /* operation code is WRITE DATA -> data area orientation */
+ LO_data[0] = 0x81;
+
+ } else if ((sense[7] & 0x3F) == 0x03) {
+ /* operation code is FORMAT WRITE -> index orientation */
+ LO_data[0] = 0xC3;
+
+ } else {
+ LO_data[0] = sense[7]; /* operation */
+ }
+
+ LO_data[1] = sense[8]; /* auxiliary */
+ LO_data[2] = sense[9];
+ LO_data[3] = sense[3]; /* count */
+ LO_data[4] = sense[29]; /* seek_addr.cyl */
+ LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
+ LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
+
+ memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+ /* create DE ccw */
+ ccw = erp->cpaddr;
+ memset(ccw, 0, sizeof (struct ccw1));
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = 16;
+ ccw->cda = (__u32)(addr_t) DE_data;
+
+ /* create LO ccw */
+ ccw++;
+ memset(ccw, 0, sizeof (struct ccw1));
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = 16;
+ ccw->cda = (__u32)(addr_t) LO_data;
+
+ /* TIC to the failed ccw */
+ ccw++;
+ ccw->cmd_code = CCW_CMD_TIC;
+ ccw->cda = cpa;
+
+ /* fill erp related fields */
+ erp->function = dasd_3990_erp_action_1B_32;
+ erp->refers = default_erp->refers;
+ erp->device = device;
+ erp->magic = default_erp->magic;
+ erp->expires = 0;
+ erp->retries = 256;
+ erp->buildclk = get_clock();
+ erp->status = DASD_CQR_FILLED;
+
+ /* remove the default erp */
+ dasd_free_erp_request(default_erp, device);
+
+ return erp;
+
+} /* end dasd_3990_erp_action_1B_32 */
+
+/*
+ * DASD_3990_UPDATE_1B
+ *
+ * DESCRIPTION
+ * Handles the update to the 32 byte 'Action 1B' of Single Program
+ * Action Codes in case the first action was not successful.
+ * The already created 'previous_erp' is the currently not successful
+ * ERP.
+ *
+ * PARAMETER
+ * previous_erp already created previous erp.
+ * sense current sense data
+ * RETURN VALUES
+ * erp modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+{
+
+ struct dasd_device *device = previous_erp->device;
+ __u32 cpa = 0;
+ struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *erp;
+ char *LO_data; /* struct LO_eckd_data */
+ struct ccw1 *ccw;
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Write not finished because of unexpected condition"
+ " - follow on");
+
+ /* determine the original cqr */
+ cqr = previous_erp;
+
+ while (cqr->refers != NULL) {
+ cqr = cqr->refers;
+ }
+
+ /* for imprecise ending just do default erp */
+ if (sense[1] & 0x01) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Imprecise ending is set - just retry");
+
+ previous_erp->status = DASD_CQR_QUEUED;
+
+ return previous_erp;
+ }
+
+ /* determine the address of the CCW to be restarted */
+ /* Imprecise ending is not set -> addr from IRB-SCSW */
+ cpa = previous_erp->irb.scsw.cpa;
+
+ if (cpa == 0) {
+
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Unable to determine address of the CCW "
+ "to be restarted");
+
+ previous_erp->status = DASD_CQR_FAILED;
+
+ return previous_erp;
+ }
+
+ erp = previous_erp;
+
+ /* update the LO with the new returned sense data */
+ LO_data = erp->data + sizeof (struct DE_eckd_data);
+
+ if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "BUG - this should not happen");
+
+ previous_erp->status = DASD_CQR_FAILED;
+
+ return previous_erp;
+ }
+
+ if ((sense[7] & 0x3F) == 0x01) {
+ /* operation code is WRITE DATA -> data area orientation */
+ LO_data[0] = 0x81;
+
+ } else if ((sense[7] & 0x3F) == 0x03) {
+ /* operation code is FORMAT WRITE -> index orientation */
+ LO_data[0] = 0xC3;
+
+ } else {
+ LO_data[0] = sense[7]; /* operation */
+ }
+
+ LO_data[1] = sense[8]; /* auxiliary */
+ LO_data[2] = sense[9];
+ LO_data[3] = sense[3]; /* count */
+ LO_data[4] = sense[29]; /* seek_addr.cyl */
+ LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
+ LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
+
+ memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+ /* TIC to the failed ccw */
+ ccw = erp->cpaddr; /* addr of DE ccw */
+ ccw++; /* addr of LE ccw */
+ ccw++; /* addr of TIC ccw */
+ ccw->cda = cpa;
+
+ erp->status = DASD_CQR_QUEUED;
+
+ return erp;
+
+} /* end dasd_3990_update_1B */
+
+/*
+ * DASD_3990_ERP_COMPOUND_RETRY
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action retry code.
+ * NOTE: At least one retry is done even if zero is specified
+ * by the sense data. This makes enqueueing of the request
+ * easier.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
+{
+
+ switch (sense[25] & 0x03) {
+ case 0x00: /* no not retry */
+ erp->retries = 1;
+ break;
+
+ case 0x01: /* retry 2 times */
+ erp->retries = 2;
+ break;
+
+ case 0x02: /* retry 10 times */
+ erp->retries = 10;
+ break;
+
+ case 0x03: /* retry 256 times */
+ erp->retries = 256;
+ break;
+
+ default:
+ BUG();
+ }
+
+ erp->function = dasd_3990_erp_compound_retry;
+
+} /* end dasd_3990_erp_compound_retry */
+
+/*
+ * DASD_3990_ERP_COMPOUND_PATH
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for retry on alternate
+ * channel path.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if (sense[25] & DASD_SENSE_BIT_3) {
+ dasd_3990_erp_alternate_path(erp);
+
+ if (erp->status == DASD_CQR_FAILED) {
+ /* reset the lpm and the status to be able to
+ * try further actions. */
+
+ erp->lpm = 0;
+
+ erp->status = DASD_CQR_ERROR;
+
+ }
+ }
+
+ erp->function = dasd_3990_erp_compound_path;
+
+} /* end dasd_3990_erp_compound_path */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CODE
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for retry code.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp NEW ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if (sense[25] & DASD_SENSE_BIT_2) {
+
+ switch (sense[28]) {
+ case 0x17:
+ /* issue a Diagnostic Control command with an
+ * Inhibit Write subcommand and controler modifier */
+ erp = dasd_3990_erp_DCTL(erp, 0x20);
+ break;
+
+ case 0x25:
+ /* wait for 5 seconds and retry again */
+ erp->retries = 1;
+
+ dasd_3990_erp_block_queue (erp, 5*HZ);
+ break;
+
+ default:
+ /* should not happen - continue */
+ break;
+ }
+ }
+
+ erp->function = dasd_3990_erp_compound_code;
+
+ return erp;
+
+} /* end dasd_3990_erp_compound_code */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CONFIG
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for configruation
+ * dependent error.
+ * Note: duplex handling is not implemented (yet).
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
+
+ /* set to suspended duplex state then restart */
+ struct dasd_device *device = erp->device;
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Set device to suspended duplex state should be "
+ "done!\n"
+ "This is not implemented yet (for compound ERP)"
+ " - please report to linux390@de.ibm.com");
+
+ }
+
+ erp->function = dasd_3990_erp_compound_config;
+
+} /* end dasd_3990_erp_compound_config */
+
+/*
+ * DASD_3990_ERP_COMPOUND
+ *
+ * DESCRIPTION
+ * Does the further compound program action if
+ * compound retry was not successful.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the current (failed) ERP
+ *
+ * RETURN VALUES
+ * erp (additional) ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if ((erp->function == dasd_3990_erp_compound_retry) &&
+ (erp->status == DASD_CQR_ERROR)) {
+
+ dasd_3990_erp_compound_path(erp, sense);
+ }
+
+ if ((erp->function == dasd_3990_erp_compound_path) &&
+ (erp->status == DASD_CQR_ERROR)) {
+
+ erp = dasd_3990_erp_compound_code(erp, sense);
+ }
+
+ if ((erp->function == dasd_3990_erp_compound_code) &&
+ (erp->status == DASD_CQR_ERROR)) {
+
+ dasd_3990_erp_compound_config(erp, sense);
+ }
+
+ /* if no compound action ERP specified, the request failed */
+ if (erp->status == DASD_CQR_ERROR) {
+
+ erp->status = DASD_CQR_FAILED;
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_compound */
+
+/*
+ * DASD_3990_ERP_INSPECT_32
+ *
+ * DESCRIPTION
+ * Does a detailed inspection of the 32 byte sense data
+ * and sets up a related error recovery action.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp_filled pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->device;
+
+ erp->function = dasd_3990_erp_inspect_32;
+
+ if (sense[25] & DASD_SENSE_BIT_0) {
+
+ /* compound program action codes (byte25 bit 0 == '1') */
+ dasd_3990_erp_compound_retry(erp, sense);
+
+ } else {
+
+ /* single program action codes (byte25 bit 0 == '0') */
+ switch (sense[25]) {
+
+ case 0x00: /* success - use default ERP for retries */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "ERP called for successful request"
+ " - just retry");
+ break;
+
+ case 0x01: /* fatal error */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Fatal error should have been "
+ "handled within the interrupt handler");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x02: /* intervention required */
+ case 0x03: /* intervention required during dual copy */
+ erp = dasd_3990_erp_int_req(erp);
+ break;
+
+ case 0x0F: /* length mismatch during update write command */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "update write command error - should not "
+ "happen;\n"
+ "Please send this message together with "
+ "the above sense data to linux390@de."
+ "ibm.com");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x10: /* logging required for other channel program */
+ erp = dasd_3990_erp_action_10_32(erp, sense);
+ break;
+
+ case 0x15: /* next track outside defined extend */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "next track outside defined extend - "
+ "should not happen;\n"
+ "Please send this message together with "
+ "the above sense data to linux390@de."
+ "ibm.com");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x1B: /* unexpected condition during write */
+
+ erp = dasd_3990_erp_action_1B_32(erp, sense);
+ break;
+
+ case 0x1C: /* invalid data */
+ DEV_MESSAGE(KERN_EMERG, device, "%s",
+ "Data recovered during retry with PCI "
+ "fetch mode active");
+
+ /* not possible to handle this situation in Linux */
+ panic
+ ("Invalid data - No way to inform application "
+ "about the possibly incorrect data");
+ break;
+
+ case 0x1D: /* state-change pending */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "A State change pending condition exists "
+ "for the subsystem or device");
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+ break;
+
+ case 0x1E: /* busy */
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "Busy condition exists "
+ "for the subsystem or device");
+ erp = dasd_3990_erp_action_4(erp, sense);
+ break;
+
+ default: /* all others errors - default erp */
+ break;
+ }
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_inspect_32 */
+
+/*
+ *****************************************************************************
+ * main ERP control fuctions (24 and 32 byte sense)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_INSPECT
+ *
+ * DESCRIPTION
+ * Does a detailed inspection for sense data by calling either
+ * the 24-byte or the 32-byte inspection routine.
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ * RETURN VALUES
+ * erp_new contens was possibly modified
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_ccw_req *erp_new = NULL;
+ /* sense data are located in the refers record of the */
+ /* already set up new ERP ! */
+ char *sense = erp->refers->irb.ecw;
+
+ /* distinguish between 24 and 32 byte sense data */
+ if (sense[27] & DASD_SENSE_BIT_0) {
+
+ /* inspect the 24 byte sense data */
+ erp_new = dasd_3990_erp_inspect_24(erp, sense);
+
+ } else {
+
+ /* inspect the 32 byte sense data */
+ erp_new = dasd_3990_erp_inspect_32(erp, sense);
+
+ } /* end distinguish between 24 and 32 byte sense data */
+
+ return erp_new;
+}
+
+/*
+ * DASD_3990_ERP_ADD_ERP
+ *
+ * DESCRIPTION
+ * This funtion adds an additional request block (ERP) to the head of
+ * the given cqr (or erp).
+ * This erp is initialized as an default erp (retry TIC)
+ *
+ * PARAMETER
+ * cqr head of the current ERP-chain (or single cqr if
+ * first error)
+ * RETURN VALUES
+ * erp pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
+{
+
+ struct dasd_device *device = cqr->device;
+ struct ccw1 *ccw;
+
+ /* allocate additional request block */
+ struct dasd_ccw_req *erp;
+
+ erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device);
+ if (IS_ERR(erp)) {
+ if (cqr->retries <= 0) {
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Unable to allocate ERP request");
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock ();
+ } else {
+ DEV_MESSAGE (KERN_ERR, device,
+ "Unable to allocate ERP request "
+ "(%i retries left)",
+ cqr->retries);
+ dasd_set_timer(device, (HZ << 3));
+ }
+ return cqr;
+ }
+
+ /* initialize request with default TIC to current ERP/CQR */
+ ccw = erp->cpaddr;
+ ccw->cmd_code = CCW_CMD_NOOP;
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ ccw->cmd_code = CCW_CMD_TIC;
+ ccw->cda = (long)(cqr->cpaddr);
+ erp->function = dasd_3990_erp_add_erp;
+ erp->refers = cqr;
+ erp->device = cqr->device;
+ erp->magic = cqr->magic;
+ erp->expires = 0;
+ erp->retries = 256;
+ erp->buildclk = get_clock();
+
+ erp->status = DASD_CQR_FILLED;
+
+ return erp;
+}
+
+/*
+ * DASD_3990_ERP_ADDITIONAL_ERP
+ *
+ * DESCRIPTION
+ * An additional ERP is needed to handle the current error.
+ * Add ERP to the head of the ERP-chain containing the ERP processing
+ * determined based on the sense data.
+ *
+ * PARAMETER
+ * cqr head of the current ERP-chain (or single cqr if
+ * first error)
+ *
+ * RETURN VALUES
+ * erp pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
+{
+
+ struct dasd_ccw_req *erp = NULL;
+
+ /* add erp and initialize with default TIC */
+ erp = dasd_3990_erp_add_erp(cqr);
+
+ /* inspect sense, determine specific ERP if possible */
+ if (erp != cqr) {
+
+ erp = dasd_3990_erp_inspect(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_additional_erp */
+
+/*
+ * DASD_3990_ERP_ERROR_MATCH
+ *
+ * DESCRIPTION
+ * Check if the device status of the given cqr is the same.
+ * This means that the failed CCW and the relevant sense data
+ * must match.
+ * I don't distinguish between 24 and 32 byte sense because in case of
+ * 24 byte sense byte 25 and 27 is set as well.
+ *
+ * PARAMETER
+ * cqr1 first cqr, which will be compared with the
+ * cqr2 second cqr.
+ *
+ * RETURN VALUES
+ * match 'boolean' for match found
+ * returns 1 if match found, otherwise 0.
+ */
+static int
+dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
+{
+
+ /* check failed CCW */
+ if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) {
+ // return 0; /* CCW doesn't match */
+ }
+
+ /* check sense data; byte 0-2,25,27 */
+ if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
+ (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
+ (cqr1->irb.ecw[25] == cqr2->irb.ecw[25]))) {
+
+ return 0; /* sense doesn't match */
+ }
+
+ return 1; /* match */
+
+} /* end dasd_3990_erp_error_match */
+
+/*
+ * DASD_3990_ERP_IN_ERP
+ *
+ * DESCRIPTION
+ * check if the current error already happened before.
+ * quick exit if current cqr is not an ERP (cqr->refers=NULL)
+ *
+ * PARAMETER
+ * cqr failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ * erp erp-pointer to the already defined error
+ * recovery procedure OR
+ * NULL if a 'new' error occurred.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
+{
+
+ struct dasd_ccw_req *erp_head = cqr, /* save erp chain head */
+ *erp_match = NULL; /* save erp chain head */
+ int match = 0; /* 'boolean' for matching error found */
+
+ if (cqr->refers == NULL) { /* return if not in erp */
+ return NULL;
+ }
+
+ /* check the erp/cqr chain for current error */
+ do {
+ match = dasd_3990_erp_error_match(erp_head, cqr->refers);
+ erp_match = cqr; /* save possible matching erp */
+ cqr = cqr->refers; /* check next erp/cqr in queue */
+
+ } while ((cqr->refers != NULL) && (!match));
+
+ if (!match) {
+ return NULL; /* no match was found */
+ }
+
+ return erp_match; /* return address of matching erp */
+
+} /* END dasd_3990_erp_in_erp */
+
+/*
+ * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
+ *
+ * DESCRIPTION
+ * No retry is left for the current ERP. Check what has to be done
+ * with the ERP.
+ * - do further defined ERP action or
+ * - wait for interrupt or
+ * - exit with permanent error
+ *
+ * PARAMETER
+ * erp ERP which is in progress with no retry left
+ *
+ * RETURN VALUES
+ * erp modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
+{
+
+ struct dasd_device *device = erp->device;
+ char *sense = erp->irb.ecw;
+
+ /* check for 24 byte sense ERP */
+ if ((erp->function == dasd_3990_erp_bus_out) ||
+ (erp->function == dasd_3990_erp_action_1) ||
+ (erp->function == dasd_3990_erp_action_4)) {
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else if (erp->function == dasd_3990_erp_action_5) {
+
+ /* retries have not been successful */
+ /* prepare erp for retry on different channel path */
+ erp = dasd_3990_erp_action_1(erp);
+
+ if (!(sense[2] & DASD_SENSE_BIT_0)) {
+
+ /* issue a Diagnostic Control command with an
+ * Inhibit Write subcommand */
+
+ switch (sense[25]) {
+ case 0x17:
+ case 0x57:{ /* controller */
+ erp = dasd_3990_erp_DCTL(erp, 0x20);
+ break;
+ }
+ case 0x18:
+ case 0x58:{ /* channel path */
+ erp = dasd_3990_erp_DCTL(erp, 0x40);
+ break;
+ }
+ case 0x19:
+ case 0x59:{ /* storage director */
+ erp = dasd_3990_erp_DCTL(erp, 0x80);
+ break;
+ }
+ default:
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "invalid subcommand modifier 0x%x "
+ "for Diagnostic Control Command",
+ sense[25]);
+ }
+ }
+
+ /* check for 32 byte sense ERP */
+ } else if ((erp->function == dasd_3990_erp_compound_retry) ||
+ (erp->function == dasd_3990_erp_compound_path) ||
+ (erp->function == dasd_3990_erp_compound_code) ||
+ (erp->function == dasd_3990_erp_compound_config)) {
+
+ erp = dasd_3990_erp_compound(erp, sense);
+
+ } else {
+ /* No retry left and no additional special handling */
+ /*necessary */
+ DEV_MESSAGE(KERN_ERR, device,
+ "no retries left for erp %p - "
+ "set status to FAILED", erp);
+
+ erp->status = DASD_CQR_FAILED;
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_further_erp */
+
+/*
+ * DASD_3990_ERP_HANDLE_MATCH_ERP
+ *
+ * DESCRIPTION
+ * An error occurred again and an ERP has been detected which is already
+ * used to handle this error (e.g. retries).
+ * All prior ERP's are asumed to be successful and therefore removed
+ * from queue.
+ * If retry counter of matching erp is already 0, it is checked if further
+ * action is needed (besides retry) or if the ERP has failed.
+ *
+ * PARAMETER
+ * erp_head first ERP in ERP-chain
+ * erp ERP that handles the actual error.
+ * (matching erp)
+ *
+ * RETURN VALUES
+ * erp modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ struct dasd_ccw_req *erp)
+{
+
+ struct dasd_device *device = erp_head->device;
+ struct dasd_ccw_req *erp_done = erp_head; /* finished req */
+ struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
+
+ /* loop over successful ERPs and remove them from chanq */
+ while (erp_done != erp) {
+
+ if (erp_done == NULL) /* end of chain reached */
+ panic(PRINTK_HEADER "Programming error in ERP! The "
+ "original request was lost\n");
+
+ /* remove the request from the device queue */
+ list_del(&erp_done->list);
+
+ erp_free = erp_done;
+ erp_done = erp_done->refers;
+
+ /* free the finished erp request */
+ dasd_free_erp_request(erp_free, erp_free->device);
+
+ } /* end while */
+
+ if (erp->retries > 0) {
+
+ char *sense = erp->refers->irb.ecw;
+
+ /* check for special retries */
+ if (erp->function == dasd_3990_erp_action_4) {
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (erp->function == dasd_3990_erp_action_1B_32) {
+
+ erp = dasd_3990_update_1B(erp, sense);
+
+ } else if (erp->function == dasd_3990_erp_int_req) {
+
+ erp = dasd_3990_erp_int_req(erp);
+
+ } else {
+ /* simple retry */
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "%i retries left for erp %p",
+ erp->retries, erp);
+
+ /* handle the request again... */
+ erp->status = DASD_CQR_QUEUED;
+ }
+
+ } else {
+ /* no retry left - check for further necessary action */
+ /* if no further actions, handle rest as permanent error */
+ erp = dasd_3990_erp_further_erp(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_handle_match_erp */
+
+/*
+ * DASD_3990_ERP_ACTION
+ *
+ * DESCRIPTION
+ * controll routine for 3990 erp actions.
+ * Has to be called with the queue lock (namely the s390_irq_lock) acquired.
+ *
+ * PARAMETER
+ * cqr failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ * erp erp-pointer to the head of the ERP action chain.
+ * This means:
+ * - either a ptr to an additional ERP cqr or
+ * - the original given cqr (which's status might
+ * be modified)
+ */
+struct dasd_ccw_req *
+dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+{
+
+ struct dasd_ccw_req *erp = NULL;
+ struct dasd_device *device = cqr->device;
+ __u32 cpa = cqr->irb.scsw.cpa;
+
+#ifdef ERP_DEBUG
+ /* print current erp_chain */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "ERP chain at BEGINNING of ERP-ACTION");
+ {
+ struct dasd_ccw_req *temp_erp = NULL;
+
+ for (temp_erp = cqr;
+ temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+ DEV_MESSAGE(KERN_ERR, device,
+ " erp %p (%02x) refers to %p",
+ temp_erp, temp_erp->status,
+ temp_erp->refers);
+ }
+ }
+#endif /* ERP_DEBUG */
+
+ /* double-check if current erp/cqr was successfull */
+ if ((cqr->irb.scsw.cstat == 0x00) &&
+ (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
+
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "ERP called for successful request %p"
+ " - NO ERP necessary", cqr);
+
+ cqr->status = DASD_CQR_DONE;
+
+ return cqr;
+ }
+ /* check if sense data are available */
+ if (!cqr->irb.ecw) {
+ DEV_MESSAGE(KERN_DEBUG, device,
+ "ERP called witout sense data avail ..."
+ "request %p - NO ERP possible", cqr);
+
+ cqr->status = DASD_CQR_FAILED;
+
+ return cqr;
+
+ }
+
+ /* check if error happened before */
+ erp = dasd_3990_erp_in_erp(cqr);
+
+ if (erp == NULL) {
+ /* no matching erp found - set up erp */
+ erp = dasd_3990_erp_additional_erp(cqr);
+ } else {
+ /* matching erp found - set all leading erp's to DONE */
+ erp = dasd_3990_erp_handle_match_erp(cqr, erp);
+ }
+
+#ifdef ERP_DEBUG
+ /* print current erp_chain */
+ DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION");
+ {
+ struct dasd_ccw_req *temp_erp = NULL;
+ for (temp_erp = erp;
+ temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+ DEV_MESSAGE(KERN_ERR, device,
+ " erp %p (%02x) refers to %p",
+ temp_erp, temp_erp->status,
+ temp_erp->refers);
+ }
+ }
+#endif /* ERP_DEBUG */
+
+ if (erp->status == DASD_CQR_FAILED)
+ dasd_log_ccw(erp, 1, cpa);
+
+ /* enqueue added ERP request */
+ if (erp->status == DASD_CQR_FILLED) {
+ erp->status = DASD_CQR_QUEUED;
+ list_add(&erp->list, &device->ccw_queue);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_action */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
new file mode 100644
index 000000000000..01e87170a3a2
--- /dev/null
+++ b/drivers/s390/block/dasd_9336_erp.c
@@ -0,0 +1,61 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_9336_erp.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ *
+ * $Revision: 1.8 $
+ */
+
+#define PRINTK_HEADER "dasd_erp(9336)"
+
+#include "dasd_int.h"
+
+
+/*
+ * DASD_9336_ERP_EXAMINE
+ *
+ * DESCRIPTION
+ * Checks only for fatal/no/recover error.
+ * A detailed examination of the sense data is done later outside
+ * the interrupt handler.
+ *
+ * The logic is based on the 'IBM 3880 Storage Control Reference' manual
+ * 'Chapter 7. 9336 Sense Data'.
+ *
+ * RETURN VALUES
+ * dasd_era_none no error
+ * dasd_era_fatal for all fatal (unrecoverable errors)
+ * dasd_era_recover for all others.
+ */
+dasd_era_t
+dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+ /* check for successful execution first */
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+ /* examine the 24 byte sense data */
+ return dasd_era_recover;
+
+} /* END dasd_9336_erp_examine */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
new file mode 100644
index 000000000000..2a23b74faf3f
--- /dev/null
+++ b/drivers/s390/block/dasd_9343_erp.c
@@ -0,0 +1,22 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_9345_erp.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ *
+ * $Revision: 1.13 $
+ */
+
+#define PRINTK_HEADER "dasd_erp(9343)"
+
+#include "dasd_int.h"
+
+dasd_era_t
+dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+ return dasd_era_recover;
+}
diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c
new file mode 100644
index 000000000000..ed1ab474c0c6
--- /dev/null
+++ b/drivers/s390/block/dasd_cmb.c
@@ -0,0 +1,145 @@
+/*
+ * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.6 $)
+ *
+ * Linux on zSeries Channel Measurement Facility support
+ * (dasd device driver interface)
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/ioctl32.h>
+#include <linux/module.h>
+#include <asm/ccwdev.h>
+#include <asm/cmb.h>
+
+#include "dasd_int.h"
+
+static int
+dasd_ioctl_cmf_enable(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ device = bdev->bd_disk->private_data;
+ if (!device)
+ return -EINVAL;
+
+ return enable_cmf(device->cdev);
+}
+
+static int
+dasd_ioctl_cmf_disable(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ device = bdev->bd_disk->private_data;
+ if (!device)
+ return -EINVAL;
+
+ return disable_cmf(device->cdev);
+}
+
+static int
+dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct cmbdata __user *udata;
+ struct cmbdata data;
+ size_t size;
+ int ret;
+
+ device = bdev->bd_disk->private_data;
+ if (!device)
+ return -EINVAL;
+ udata = (void __user *) args;
+ size = _IOC_SIZE(no);
+
+ if (!access_ok(VERIFY_WRITE, udata, size))
+ return -EFAULT;
+ ret = cmf_readall(device->cdev, &data);
+ if (ret)
+ return ret;
+ if (copy_to_user(udata, &data, min(size, sizeof(*udata))))
+ return -EFAULT;
+ return 0;
+}
+
+/* module initialization below here. dasd already provides a mechanism
+ * to dynamically register ioctl functions, so we simply use this. */
+static inline int
+ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler)
+{
+ int ret;
+ ret = dasd_ioctl_no_register(THIS_MODULE, no, handler);
+#ifdef CONFIG_COMPAT
+ if (ret)
+ return ret;
+
+ ret = register_ioctl32_conversion(no, NULL);
+ if (ret)
+ dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
+#endif
+ return ret;
+}
+
+static inline void
+ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler)
+{
+ dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
+#ifdef CONFIG_COMPAT
+ unregister_ioctl32_conversion(no);
+#endif
+
+}
+
+static void
+dasd_cmf_exit(void)
+{
+ ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
+ ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
+ ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
+}
+
+static int __init
+dasd_cmf_init(void)
+{
+ int ret;
+ ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
+ if (ret)
+ goto err;
+ ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
+ if (ret)
+ goto err;
+ ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dasd_cmf_exit();
+
+ return ret;
+}
+
+module_init(dasd_cmf_init);
+module_exit(dasd_cmf_exit);
+
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("channel measurement facility interface for dasd\n"
+ "Copyright 2003 IBM Corporation\n");
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 000000000000..ad1841a96c87
--- /dev/null
+++ b/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,772 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_devmap.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * Device mapping and dasd= parameter parsing functions. All devmap
+ * functions may not be called from interrupt context. In particular
+ * dasd_get_device is a no-no from interrupt context.
+ *
+ * $Revision: 1.37 $
+ */
+
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_devmap:"
+
+#include "dasd_int.h"
+
+kmem_cache_t *dasd_page_cache;
+EXPORT_SYMBOL(dasd_page_cache);
+
+/*
+ * dasd_devmap_t is used to store the features and the relation
+ * between device number and device index. To find a dasd_devmap_t
+ * that corresponds to a device number of a device index each
+ * dasd_devmap_t is added to two linked lists, one to search by
+ * the device number and one to search by the device index. As
+ * soon as big minor numbers are available the device index list
+ * can be removed since the device number will then be identical
+ * to the device index.
+ */
+struct dasd_devmap {
+ struct list_head list;
+ char bus_id[BUS_ID_SIZE];
+ unsigned int devindex;
+ unsigned short features;
+ struct dasd_device *device;
+};
+
+/*
+ * Parameter parsing functions for dasd= parameter. The syntax is:
+ * <devno> : (0x)?[0-9a-fA-F]+
+ * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
+ * <feature> : ro
+ * <feature_list> : \(<feature>(:<feature>)*\)
+ * <devno-range> : <devno>(-<devno>)?<feature_list>?
+ * <busid-range> : <busid>(-<busid>)?<feature_list>?
+ * <devices> : <devno-range>|<busid-range>
+ * <dasd_module> : dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
+ *
+ * <dasd> : autodetect|probeonly|<devices>(,<devices>)*
+ */
+
+int dasd_probeonly = 0; /* is true, when probeonly mode is active */
+int dasd_autodetect = 0; /* is true, when autodetection is active */
+
+/*
+ * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
+ * it is named 'dasd' to directly be filled by insmod with the comma separated
+ * strings when running as a module.
+ */
+static char *dasd[256];
+/*
+ * Single spinlock to protect devmap structures and lists.
+ */
+static DEFINE_SPINLOCK(dasd_devmap_lock);
+
+/*
+ * Hash lists for devmap structures.
+ */
+static struct list_head dasd_hashlists[256];
+int dasd_max_devindex;
+
+static struct dasd_devmap *dasd_add_busid(char *, int);
+
+static inline int
+dasd_hash_busid(char *bus_id)
+{
+ int hash, i;
+
+ hash = 0;
+ for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++)
+ hash += *bus_id;
+ return hash & 0xff;
+}
+
+#ifndef MODULE
+/*
+ * The parameter parsing functions for builtin-drivers are called
+ * before kmalloc works. Store the pointers to the parameters strings
+ * into dasd[] for later processing.
+ */
+static int __init
+dasd_call_setup(char *str)
+{
+ static int count = 0;
+
+ if (count < 256)
+ dasd[count++] = str;
+ return 1;
+}
+
+__setup ("dasd=", dasd_call_setup);
+#endif /* #ifndef MODULE */
+
+/*
+ * Read a device busid/devno from a string.
+ */
+static inline int
+dasd_busid(char **str, int *id0, int *id1, int *devno)
+{
+ int val, old_style;
+
+ /* check for leading '0x' */
+ old_style = 0;
+ if ((*str)[0] == '0' && (*str)[1] == 'x') {
+ *str += 2;
+ old_style = 1;
+ }
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ return -EINVAL;
+ val = simple_strtoul(*str, str, 16);
+ if (old_style || (*str)[0] != '.') {
+ *id0 = *id1 = 0;
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+ *devno = val;
+ return 0;
+ }
+ /* New style x.y.z busid */
+ if (val < 0 || val > 0xff)
+ return -EINVAL;
+ *id0 = val;
+ (*str)++;
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ return -EINVAL;
+ val = simple_strtoul(*str, str, 16);
+ if (val < 0 || val > 0xff || (*str)++[0] != '.')
+ return -EINVAL;
+ *id1 = val;
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ return -EINVAL;
+ val = simple_strtoul(*str, str, 16);
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+ *devno = val;
+ return 0;
+}
+
+/*
+ * Read colon separated list of dasd features. Currently there is
+ * only one: "ro" for read-only devices. The default feature set
+ * is empty (value 0).
+ */
+static inline int
+dasd_feature_list(char *str, char **endp)
+{
+ int features, len, rc;
+
+ rc = 0;
+ if (*str != '(') {
+ *endp = str;
+ return DASD_FEATURE_DEFAULT;
+ }
+ str++;
+ features = 0;
+
+ while (1) {
+ for (len = 0;
+ str[len] && str[len] != ':' && str[len] != ')'; len++);
+ if (len == 2 && !strncmp(str, "ro", 2))
+ features |= DASD_FEATURE_READONLY;
+ else if (len == 4 && !strncmp(str, "diag", 4))
+ features |= DASD_FEATURE_USEDIAG;
+ else {
+ MESSAGE(KERN_WARNING,
+ "unsupported feature: %*s, "
+ "ignoring setting", len, str);
+ rc = -EINVAL;
+ }
+ str += len;
+ if (*str != ':')
+ break;
+ str++;
+ }
+ if (*str != ')') {
+ MESSAGE(KERN_WARNING, "%s",
+ "missing ')' in dasd parameter string\n");
+ rc = -EINVAL;
+ } else
+ str++;
+ *endp = str;
+ if (rc != 0)
+ return rc;
+ return features;
+}
+
+/*
+ * Try to match the first element on the comma separated parse string
+ * with one of the known keywords. If a keyword is found, take the approprate
+ * action and return a pointer to the residual string. If the first element
+ * could not be matched to any keyword then return an error code.
+ */
+static char *
+dasd_parse_keyword( char *parsestring ) {
+
+ char *nextcomma, *residual_str;
+ int length;
+
+ nextcomma = strchr(parsestring,',');
+ if (nextcomma) {
+ length = nextcomma - parsestring;
+ residual_str = nextcomma + 1;
+ } else {
+ length = strlen(parsestring);
+ residual_str = parsestring + length;
+ }
+ if (strncmp ("autodetect", parsestring, length) == 0) {
+ dasd_autodetect = 1;
+ MESSAGE (KERN_INFO, "%s",
+ "turning to autodetection mode");
+ return residual_str;
+ }
+ if (strncmp ("probeonly", parsestring, length) == 0) {
+ dasd_probeonly = 1;
+ MESSAGE(KERN_INFO, "%s",
+ "turning to probeonly mode");
+ return residual_str;
+ }
+ if (strncmp ("fixedbuffers", parsestring, length) == 0) {
+ if (dasd_page_cache)
+ return residual_str;
+ dasd_page_cache =
+ kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0,
+ SLAB_CACHE_DMA, NULL, NULL );
+ if (!dasd_page_cache)
+ MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
+ "fixed buffer mode disabled.");
+ else
+ MESSAGE (KERN_INFO, "%s",
+ "turning on fixed buffer mode");
+ return residual_str;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Try to interprete the first element on the comma separated parse string
+ * as a device number or a range of devices. If the interpretation is
+ * successfull, create the matching dasd_devmap entries and return a pointer
+ * to the residual string.
+ * If interpretation fails or in case of an error, return an error code.
+ */
+static char *
+dasd_parse_range( char *parsestring ) {
+
+ struct dasd_devmap *devmap;
+ int from, from_id0, from_id1;
+ int to, to_id0, to_id1;
+ int features, rc;
+ char bus_id[BUS_ID_SIZE+1], *str;
+
+ str = parsestring;
+ rc = dasd_busid(&str, &from_id0, &from_id1, &from);
+ if (rc == 0) {
+ to = from;
+ to_id0 = from_id0;
+ to_id1 = from_id1;
+ if (*str == '-') {
+ str++;
+ rc = dasd_busid(&str, &to_id0, &to_id1, &to);
+ }
+ }
+ if (rc == 0 &&
+ (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
+ rc = -EINVAL;
+ if (rc) {
+ MESSAGE(KERN_ERR, "Invalid device range %s", parsestring);
+ return ERR_PTR(rc);
+ }
+ features = dasd_feature_list(str, &str);
+ if (features < 0)
+ return ERR_PTR(-EINVAL);
+ while (from <= to) {
+ sprintf(bus_id, "%01x.%01x.%04x",
+ from_id0, from_id1, from++);
+ devmap = dasd_add_busid(bus_id, features);
+ if (IS_ERR(devmap))
+ return (char *)devmap;
+ }
+ if (*str == ',')
+ return str + 1;
+ if (*str == '\0')
+ return str;
+ MESSAGE(KERN_WARNING,
+ "junk at end of dasd parameter string: %s\n", str);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline char *
+dasd_parse_next_element( char *parsestring ) {
+ char * residual_str;
+ residual_str = dasd_parse_keyword(parsestring);
+ if (!IS_ERR(residual_str))
+ return residual_str;
+ residual_str = dasd_parse_range(parsestring);
+ return residual_str;
+}
+
+/*
+ * Parse parameters stored in dasd[]
+ * The 'dasd=...' parameter allows to specify a comma separated list of
+ * keywords and device ranges. When the dasd driver is build into the kernel,
+ * the complete list will be stored as one element of the dasd[] array.
+ * When the dasd driver is build as a module, then the list is broken into
+ * it's elements and each dasd[] entry contains one element.
+ */
+int
+dasd_parse(void)
+{
+ int rc, i;
+ char *parsestring;
+
+ rc = 0;
+ for (i = 0; i < 256; i++) {
+ if (dasd[i] == NULL)
+ break;
+ parsestring = dasd[i];
+ /* loop over the comma separated list in the parsestring */
+ while (*parsestring) {
+ parsestring = dasd_parse_next_element(parsestring);
+ if(IS_ERR(parsestring)) {
+ rc = PTR_ERR(parsestring);
+ break;
+ }
+ }
+ if (rc) {
+ DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+ * Add a devmap for the device specified by busid. It is possible that
+ * the devmap already exists (dasd= parameter). The order of the devices
+ * added through this function will define the kdevs for the individual
+ * devices.
+ */
+static struct dasd_devmap *
+dasd_add_busid(char *bus_id, int features)
+{
+ struct dasd_devmap *devmap, *new, *tmp;
+ int hash;
+
+ new = (struct dasd_devmap *)
+ kmalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+ spin_lock(&dasd_devmap_lock);
+ devmap = 0;
+ hash = dasd_hash_busid(bus_id);
+ list_for_each_entry(tmp, &dasd_hashlists[hash], list)
+ if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
+ devmap = tmp;
+ break;
+ }
+ if (!devmap) {
+ /* This bus_id is new. */
+ new->devindex = dasd_max_devindex++;
+ strncpy(new->bus_id, bus_id, BUS_ID_SIZE);
+ new->features = features;
+ new->device = 0;
+ list_add(&new->list, &dasd_hashlists[hash]);
+ devmap = new;
+ new = 0;
+ }
+ spin_unlock(&dasd_devmap_lock);
+ if (new)
+ kfree(new);
+ return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(char *bus_id)
+{
+ struct dasd_devmap *devmap, *tmp;
+ int hash;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = ERR_PTR(-ENODEV);
+ hash = dasd_hash_busid(bus_id);
+ list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
+ if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
+ devmap = tmp;
+ break;
+ }
+ }
+ spin_unlock(&dasd_devmap_lock);
+ return devmap;
+}
+
+/*
+ * Check if busid has been added to the list of dasd ranges.
+ */
+int
+dasd_busid_known(char *bus_id)
+{
+ return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
+}
+
+/*
+ * Forget all about the device numbers added so far.
+ * This may only be called at module unload or system shutdown.
+ */
+static void
+dasd_forget_ranges(void)
+{
+ struct dasd_devmap *devmap, *n;
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ for (i = 0; i < 256; i++) {
+ list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
+ if (devmap->device != NULL)
+ BUG();
+ list_del(&devmap->list);
+ kfree(devmap);
+ }
+ }
+ spin_unlock(&dasd_devmap_lock);
+}
+
+/*
+ * Find the device struct by its device index.
+ */
+struct dasd_device *
+dasd_device_from_devindex(int devindex)
+{
+ struct dasd_devmap *devmap, *tmp;
+ struct dasd_device *device;
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = 0;
+ for (i = 0; (i < 256) && !devmap; i++)
+ list_for_each_entry(tmp, &dasd_hashlists[i], list)
+ if (tmp->devindex == devindex) {
+ /* Found the devmap for the device. */
+ devmap = tmp;
+ break;
+ }
+ if (devmap && devmap->device) {
+ device = devmap->device;
+ dasd_get_device(device);
+ } else
+ device = ERR_PTR(-ENODEV);
+ spin_unlock(&dasd_devmap_lock);
+ return device;
+}
+
+/*
+ * Return devmap for cdev. If no devmap exists yet, create one and
+ * connect it to the cdev.
+ */
+static struct dasd_devmap *
+dasd_devmap_from_cdev(struct ccw_device *cdev)
+{
+ struct dasd_devmap *devmap;
+
+ devmap = dasd_find_busid(cdev->dev.bus_id);
+ if (IS_ERR(devmap))
+ devmap = dasd_add_busid(cdev->dev.bus_id,
+ DASD_FEATURE_DEFAULT);
+ return devmap;
+}
+
+/*
+ * Create a dasd device structure for cdev.
+ */
+struct dasd_device *
+dasd_create_device(struct ccw_device *cdev)
+{
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ int rc;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return (void *) devmap;
+ cdev->dev.driver_data = devmap;
+
+ device = dasd_alloc_device();
+ if (IS_ERR(device))
+ return device;
+ atomic_set(&device->ref_count, 2);
+
+ spin_lock(&dasd_devmap_lock);
+ if (!devmap->device) {
+ devmap->device = device;
+ device->devindex = devmap->devindex;
+ if (devmap->features & DASD_FEATURE_READONLY)
+ set_bit(DASD_FLAG_RO, &device->flags);
+ else
+ clear_bit(DASD_FLAG_RO, &device->flags);
+ if (devmap->features & DASD_FEATURE_USEDIAG)
+ set_bit(DASD_FLAG_USE_DIAG, &device->flags);
+ else
+ clear_bit(DASD_FLAG_USE_DIAG, &device->flags);
+ get_device(&cdev->dev);
+ device->cdev = cdev;
+ rc = 0;
+ } else
+ /* Someone else was faster. */
+ rc = -EBUSY;
+ spin_unlock(&dasd_devmap_lock);
+
+ if (rc) {
+ dasd_free_device(device);
+ return ERR_PTR(rc);
+ }
+ return device;
+}
+
+/*
+ * Wait queue for dasd_delete_device waits.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
+
+/*
+ * Remove a dasd device structure. The passed referenced
+ * is destroyed.
+ */
+void
+dasd_delete_device(struct dasd_device *device)
+{
+ struct ccw_device *cdev;
+ struct dasd_devmap *devmap;
+
+ /* First remove device pointer from devmap. */
+ devmap = dasd_find_busid(device->cdev->dev.bus_id);
+ if (IS_ERR(devmap))
+ BUG();
+ spin_lock(&dasd_devmap_lock);
+ if (devmap->device != device) {
+ spin_unlock(&dasd_devmap_lock);
+ dasd_put_device(device);
+ return;
+ }
+ devmap->device = NULL;
+ spin_unlock(&dasd_devmap_lock);
+
+ /* Drop ref_count by 2, one for the devmap reference and
+ * one for the passed reference. */
+ atomic_sub(2, &device->ref_count);
+
+ /* Wait for reference counter to drop to zero. */
+ wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
+
+ /* Disconnect dasd_device structure from ccw_device structure. */
+ cdev = device->cdev;
+ device->cdev = NULL;
+
+ /* Disconnect dasd_devmap structure from ccw_device structure. */
+ cdev->dev.driver_data = NULL;
+
+ /* Put ccw_device structure. */
+ put_device(&cdev->dev);
+
+ /* Now the device structure can be freed. */
+ dasd_free_device(device);
+}
+
+/*
+ * Reference counter dropped to zero. Wake up waiter
+ * in dasd_delete_device.
+ */
+void
+dasd_put_device_wake(struct dasd_device *device)
+{
+ wake_up(&dasd_delete_wq);
+}
+
+/*
+ * Return dasd_device structure associated with cdev.
+ */
+struct dasd_device *
+dasd_device_from_cdev(struct ccw_device *cdev)
+{
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+
+ device = ERR_PTR(-ENODEV);
+ spin_lock(&dasd_devmap_lock);
+ devmap = cdev->dev.driver_data;
+ if (devmap && devmap->device) {
+ device = devmap->device;
+ dasd_get_device(device);
+ }
+ spin_unlock(&dasd_devmap_lock);
+ return device;
+}
+
+/*
+ * SECTION: files in sysfs
+ */
+
+/*
+ * readonly controls the readonly status of a dasd
+ */
+static ssize_t
+dasd_ro_show(struct device *dev, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int ro_flag;
+
+ devmap = dasd_find_busid(dev->bus_id);
+ if (!IS_ERR(devmap))
+ ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
+ else
+ ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
+ return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_ro_store(struct device *dev, const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ int ro_flag;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+ ro_flag = buf[0] == '1';
+ spin_lock(&dasd_devmap_lock);
+ if (ro_flag)
+ devmap->features |= DASD_FEATURE_READONLY;
+ else
+ devmap->features &= ~DASD_FEATURE_READONLY;
+ if (devmap->device) {
+ if (devmap->device->gdp)
+ set_disk_ro(devmap->device->gdp, ro_flag);
+ if (ro_flag)
+ set_bit(DASD_FLAG_RO, &devmap->device->flags);
+ else
+ clear_bit(DASD_FLAG_RO, &devmap->device->flags);
+ }
+ spin_unlock(&dasd_devmap_lock);
+ return count;
+}
+
+static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
+
+/*
+ * use_diag controls whether the driver should use diag rather than ssch
+ * to talk to the device
+ */
+static ssize_t
+dasd_use_diag_show(struct device *dev, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int use_diag;
+
+ devmap = dasd_find_busid(dev->bus_id);
+ if (!IS_ERR(devmap))
+ use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
+ else
+ use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
+ return sprintf(buf, use_diag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_diag_store(struct device *dev, const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ ssize_t rc;
+ int use_diag;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+ use_diag = buf[0] == '1';
+ spin_lock(&dasd_devmap_lock);
+ /* Changing diag discipline flag is only allowed in offline state. */
+ rc = count;
+ if (!devmap->device) {
+ if (use_diag)
+ devmap->features |= DASD_FEATURE_USEDIAG;
+ else
+ devmap->features &= ~DASD_FEATURE_USEDIAG;
+ } else
+ rc = -EPERM;
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+static
+DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
+
+static ssize_t
+dasd_discipline_show(struct device *dev, char *buf)
+{
+ struct dasd_devmap *devmap;
+ char *dname;
+
+ spin_lock(&dasd_devmap_lock);
+ dname = "none";
+ devmap = dev->driver_data;
+ if (devmap && devmap->device && devmap->device->discipline)
+ dname = devmap->device->discipline->name;
+ spin_unlock(&dasd_devmap_lock);
+ return snprintf(buf, PAGE_SIZE, "%s\n", dname);
+}
+
+static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
+
+static struct attribute * dasd_attrs[] = {
+ &dev_attr_readonly.attr,
+ &dev_attr_discipline.attr,
+ &dev_attr_use_diag.attr,
+ NULL,
+};
+
+static struct attribute_group dasd_attr_group = {
+ .attrs = dasd_attrs,
+};
+
+int
+dasd_add_sysfs_files(struct ccw_device *cdev)
+{
+ return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+void
+dasd_remove_sysfs_files(struct ccw_device *cdev)
+{
+ sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+
+int
+dasd_devmap_init(void)
+{
+ int i;
+
+ /* Initialize devmap structures. */
+ dasd_max_devindex = 0;
+ for (i = 0; i < 256; i++)
+ INIT_LIST_HEAD(&dasd_hashlists[i]);
+ return 0;
+
+}
+
+void
+dasd_devmap_exit(void)
+{
+ dasd_forget_ranges();
+}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
new file mode 100644
index 000000000000..127699830fa1
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,541 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_diag.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.c
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.42 $
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/dasd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/s390_ext.h>
+#include <asm/todclk.h>
+
+#include "dasd_int.h"
+#include "dasd_diag.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(diag):"
+
+MODULE_LICENSE("GPL");
+
+struct dasd_discipline dasd_diag_discipline;
+
+struct dasd_diag_private {
+ struct dasd_diag_characteristics rdc_data;
+ struct dasd_diag_rw_io iob;
+ struct dasd_diag_init_io iib;
+ unsigned int pt_block;
+};
+
+struct dasd_diag_req {
+ int block_count;
+ struct dasd_diag_bio bio[0];
+};
+
+static __inline__ int
+dia250(void *iob, int cmd)
+{
+ int rc;
+
+ __asm__ __volatile__(" lhi %0,3\n"
+ " lr 0,%2\n"
+ " diag 0,%1,0x250\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ " or %0,1\n"
+ "1:\n"
+#ifndef CONFIG_ARCH_S390X
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous\n"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous\n"
+#endif
+ : "=&d" (rc)
+ : "d" (cmd), "d" ((void *) __pa(iob))
+ : "0", "1", "cc");
+ return rc;
+}
+
+static __inline__ int
+mdsk_init_io(struct dasd_device * device, int blocksize, int offset, int size)
+{
+ struct dasd_diag_private *private;
+ struct dasd_diag_init_io *iib;
+ int rc;
+
+ private = (struct dasd_diag_private *) device->private;
+ iib = &private->iib;
+ memset(iib, 0, sizeof (struct dasd_diag_init_io));
+
+ iib->dev_nr = _ccw_device_get_device_number(device->cdev);
+ iib->block_size = blocksize;
+ iib->offset = offset;
+ iib->start_block = 0;
+ iib->end_block = size;
+
+ rc = dia250(iib, INIT_BIO);
+
+ return rc & 3;
+}
+
+static __inline__ int
+mdsk_term_io(struct dasd_device * device)
+{
+ struct dasd_diag_private *private;
+ struct dasd_diag_init_io *iib;
+ int rc;
+
+ private = (struct dasd_diag_private *) device->private;
+ iib = &private->iib;
+ memset(iib, 0, sizeof (struct dasd_diag_init_io));
+ iib->dev_nr = _ccw_device_get_device_number(device->cdev);
+ rc = dia250(iib, TERM_BIO);
+ return rc & 3;
+}
+
+static int
+dasd_start_diag(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+ struct dasd_diag_private *private;
+ struct dasd_diag_req *dreq;
+ int rc;
+
+ device = cqr->device;
+ private = (struct dasd_diag_private *) device->private;
+ dreq = (struct dasd_diag_req *) cqr->data;
+
+ private->iob.dev_nr = _ccw_device_get_device_number(device->cdev);
+ private->iob.key = 0;
+ private->iob.flags = 2; /* do asynchronous io */
+ private->iob.block_count = dreq->block_count;
+ private->iob.interrupt_params = (u32)(addr_t) cqr;
+ private->iob.bio_list = __pa(dreq->bio);
+
+ cqr->startclk = get_clock();
+
+ rc = dia250(&private->iob, RW_BIO);
+ if (rc > 8) {
+ DEV_MESSAGE(KERN_WARNING, device, "dia250 returned CC %d", rc);
+ cqr->status = DASD_CQR_ERROR;
+ } else if (rc == 0) {
+ cqr->status = DASD_CQR_DONE;
+ dasd_schedule_bh(device);
+ } else {
+ cqr->status = DASD_CQR_IN_IO;
+ rc = 0;
+ }
+ return rc;
+}
+
+static void
+dasd_ext_handler(struct pt_regs *regs, __u16 code)
+{
+ struct dasd_ccw_req *cqr, *next;
+ struct dasd_device *device;
+ unsigned long long expires;
+ unsigned long flags;
+ char status;
+ int ip;
+
+ /*
+ * Get the external interruption subcode. VM stores
+ * this in the 'cpu address' field associated with
+ * the external interrupt. For diag 250 the subcode
+ * needs to be 3.
+ */
+ if ((S390_lowcore.cpu_addr & 0xff00) != 0x0300)
+ return;
+ status = *((char *) &S390_lowcore.ext_params + 5);
+ ip = S390_lowcore.ext_params;
+
+ if (!ip) { /* no intparm: unsolicited interrupt */
+ MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt");
+ return;
+ }
+ cqr = (struct dasd_ccw_req *)(addr_t) ip;
+ device = (struct dasd_device *) cqr->device;
+ if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ DEV_MESSAGE(KERN_WARNING, device,
+ " magic number of dasd_ccw_req 0x%08X doesn't"
+ " match discipline 0x%08X",
+ cqr->magic, *(int *) (&device->discipline->name));
+ return;
+ }
+
+ /* get irq lock to modify request queue */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+
+ cqr->stopclk = get_clock();
+
+ expires = 0;
+ if (status == 0) {
+ cqr->status = DASD_CQR_DONE;
+ /* Start first request on queue if possible -> fast_io. */
+ if (!list_empty(&device->ccw_queue)) {
+ next = list_entry(device->ccw_queue.next,
+ struct dasd_ccw_req, list);
+ if (next->status == DASD_CQR_QUEUED) {
+ if (dasd_start_diag(next) == 0)
+ expires = next->expires;
+ else
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Interrupt fastpath "
+ "failed!");
+ }
+ }
+ } else
+ cqr->status = DASD_CQR_FAILED;
+
+ if (expires != 0)
+ dasd_set_timer(device, expires);
+ else
+ dasd_clear_timer(device);
+ dasd_schedule_bh(device);
+
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+static int
+dasd_diag_check_device(struct dasd_device *device)
+{
+ struct dasd_diag_private *private;
+ struct dasd_diag_characteristics *rdc_data;
+ struct dasd_diag_bio bio;
+ long *label;
+ int sb, bsize;
+ int rc;
+
+ private = (struct dasd_diag_private *) device->private;
+ if (private == NULL) {
+ private = kmalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+ if (private == NULL) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "memory allocation failed for private data");
+ return -ENOMEM;
+ }
+ device->private = (void *) private;
+ }
+ /* Read Device Characteristics */
+ rdc_data = (void *) &(private->rdc_data);
+ rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev);
+ rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
+
+ rc = diag210((struct diag210 *) rdc_data);
+ if (rc)
+ return -ENOTSUPP;
+
+ /* Figure out position of label block */
+ switch (private->rdc_data.vdev_class) {
+ case DEV_CLASS_FBA:
+ private->pt_block = 1;
+ break;
+ case DEV_CLASS_ECKD:
+ private->pt_block = 2;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "%04X: %04X on real %04X/%02X",
+ rdc_data->dev_nr,
+ rdc_data->vdev_type,
+ rdc_data->rdev_type, rdc_data->rdev_model);
+
+ /* terminate all outstanding operations */
+ mdsk_term_io(device);
+
+ /* figure out blocksize of device */
+ label = (long *) get_zeroed_page(GFP_KERNEL);
+ if (label == NULL) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "No memory to allocate initialization request");
+ return -ENOMEM;
+ }
+ /* try all sizes - needed for ECKD devices */
+ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
+ mdsk_init_io(device, bsize, 0, 64);
+ memset(&bio, 0, sizeof (struct dasd_diag_bio));
+ bio.type = MDSK_READ_REQ;
+ bio.block_number = private->pt_block + 1;
+ bio.buffer = __pa(label);
+ memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
+ private->iob.dev_nr = rdc_data->dev_nr;
+ private->iob.key = 0;
+ private->iob.flags = 0; /* do synchronous io */
+ private->iob.block_count = 1;
+ private->iob.interrupt_params = 0;
+ private->iob.bio_list = __pa(&bio);
+ if (dia250(&private->iob, RW_BIO) == 0)
+ break;
+ mdsk_term_io(device);
+ }
+ if (bsize <= PAGE_SIZE && label[0] == 0xc3d4e2f1) {
+ /* get formatted blocksize from label block */
+ bsize = (int) label[3];
+ device->blocks = label[7];
+ device->bp_block = bsize;
+ device->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < bsize; sb = sb << 1)
+ device->s2b_shift++;
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "capacity (%dkB blks): %ldkB",
+ (device->bp_block >> 10),
+ (device->blocks << device->s2b_shift) >> 1);
+ rc = 0;
+ } else {
+ if (bsize > PAGE_SIZE)
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "DIAG access failed");
+ else
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "volume is not CMS formatted");
+ rc = -EMEDIUMTYPE;
+ }
+ free_page((long) label);
+ return rc;
+}
+
+static int
+dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
+{
+ if (dasd_check_blocksize(device->bp_block) != 0)
+ return -EINVAL;
+ geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
+ geo->heads = 16;
+ geo->sectors = 128 >> device->s2b_shift;
+ return 0;
+}
+
+static dasd_era_t
+dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
+{
+ return dasd_era_fatal;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_action(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_postaction;
+}
+
+static struct dasd_ccw_req *
+dasd_diag_build_cp(struct dasd_device * device, struct request *req)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_diag_req *dreq;
+ struct dasd_diag_bio *dbio;
+ struct bio *bio;
+ struct bio_vec *bv;
+ char *dst;
+ int count, datasize;
+ sector_t recid, first_rec, last_rec;
+ unsigned blksize, off;
+ unsigned char rw_cmd;
+ int i;
+
+ if (rq_data_dir(req) == READ)
+ rw_cmd = MDSK_READ_REQ;
+ else if (rq_data_dir(req) == WRITE)
+ rw_cmd = MDSK_WRITE_REQ;
+ else
+ return ERR_PTR(-EINVAL);
+ blksize = device->bp_block;
+ /* Calculate record id of first and last block. */
+ first_rec = req->sector >> device->s2b_shift;
+ last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
+ }
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+ /* Build the request */
+ datasize = sizeof(struct dasd_diag_req) +
+ count*sizeof(struct dasd_diag_bio);
+ cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
+ datasize, device);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ dreq = (struct dasd_diag_req *) cqr->data;
+ dreq->block_count = count;
+ dbio = dreq->bio;
+ recid = first_rec;
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ memset(dbio, 0, sizeof (struct dasd_diag_bio));
+ dbio->type = rw_cmd;
+ dbio->block_number = recid + 1;
+ dbio->buffer = __pa(dst);
+ dbio++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ }
+ cqr->buildclk = get_clock();
+ cqr->device = device;
+ cqr->expires = 50 * HZ; /* 50 seconds */
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static int
+dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ int status;
+
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->device);
+ return status;
+}
+
+static int
+dasd_diag_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ struct dasd_diag_private *private;
+
+ private = (struct dasd_diag_private *) device->private;
+ info->label_block = private->pt_block;
+ info->FBA_layout = 1;
+ info->format = DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof (struct dasd_diag_characteristics);
+ memcpy(info->characteristics,
+ &((struct dasd_diag_private *) device->private)->rdc_data,
+ sizeof (struct dasd_diag_characteristics));
+ info->confdata_size = 0;
+ return 0;
+}
+
+static void
+dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ struct irb *stat)
+{
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "dump sense not available for DIAG data");
+}
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). dasd diag is only relevant for 31 bit.
+ * The struct dasd_ccw_req has 96 bytes, the struct dasd_diag_req has
+ * 8 bytes and the struct dasd_diag_bio for each block has 16 bytes.
+ * That makes:
+ * (8192 - 96 - 8) / 16 = 505.5 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 252.75 blocks
+ * for one request. Give a little safety and the result is 240.
+ */
+struct dasd_discipline dasd_diag_discipline = {
+ .owner = THIS_MODULE,
+ .name = "DIAG",
+ .ebcname = "DIAG",
+ .max_blocks = 240,
+ .check_device = dasd_diag_check_device,
+ .fill_geometry = dasd_diag_fill_geometry,
+ .start_IO = dasd_start_diag,
+ .examine_error = dasd_diag_examine_error,
+ .erp_action = dasd_diag_erp_action,
+ .erp_postaction = dasd_diag_erp_postaction,
+ .build_cp = dasd_diag_build_cp,
+ .free_cp = dasd_diag_free_cp,
+ .dump_sense = dasd_diag_dump_sense,
+ .fill_info = dasd_diag_fill_info,
+};
+
+static int __init
+dasd_diag_init(void)
+{
+ if (!MACHINE_IS_VM) {
+ MESSAGE_LOG(KERN_INFO,
+ "Machine is not VM: %s "
+ "discipline not initializing",
+ dasd_diag_discipline.name);
+ return -EINVAL;
+ }
+ ASCEBC(dasd_diag_discipline.ebcname, 4);
+
+ ctl_set_bit(0, 9);
+ register_external_interrupt(0x2603, dasd_ext_handler);
+ dasd_diag_discipline_pointer = &dasd_diag_discipline;
+ return 0;
+}
+
+static void __exit
+dasd_diag_cleanup(void)
+{
+ if (!MACHINE_IS_VM) {
+ MESSAGE_LOG(KERN_INFO,
+ "Machine is not VM: %s "
+ "discipline not cleaned",
+ dasd_diag_discipline.name);
+ return;
+ }
+ unregister_external_interrupt(0x2603, dasd_ext_handler);
+ ctl_clear_bit(0, 9);
+ dasd_diag_discipline_pointer = NULL;
+}
+
+module_init(dasd_diag_init);
+module_exit(dasd_diag_cleanup);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
new file mode 100644
index 000000000000..a0c38e303979
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,66 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_diag.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.h
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.6 $
+ */
+
+#define MDSK_WRITE_REQ 0x01
+#define MDSK_READ_REQ 0x02
+
+#define INIT_BIO 0x00
+#define RW_BIO 0x01
+#define TERM_BIO 0x02
+
+#define DEV_CLASS_FBA 0x01
+#define DEV_CLASS_ECKD 0x04
+
+struct dasd_diag_characteristics {
+ u16 dev_nr;
+ u16 rdc_len;
+ u8 vdev_class;
+ u8 vdev_type;
+ u8 vdev_status;
+ u8 vdev_flags;
+ u8 rdev_class;
+ u8 rdev_type;
+ u8 rdev_model;
+ u8 rdev_features;
+} __attribute__ ((packed, aligned(4)));
+
+struct dasd_diag_bio {
+ u8 type;
+ u8 status;
+ u16 spare1;
+ u32 block_number;
+ u32 alet;
+ u32 buffer;
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_init_io {
+ u16 dev_nr;
+ u16 spare1[11];
+ u32 block_size;
+ u32 offset;
+ u32 start_block;
+ u32 end_block;
+ u32 spare2[6];
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_rw_io {
+ u16 dev_nr;
+ u16 spare1[11];
+ u8 key;
+ u8 flags;
+ u16 spare2;
+ u32 block_count;
+ u32 alet;
+ u32 bio_list;
+ u32 interrupt_params;
+ u32 spare3[5];
+} __attribute__ ((packed, aligned(8)));
+
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 000000000000..838aedf78a56
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,1722 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_eckd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.69 $
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/todclk.h>
+#include <asm/uaccess.h>
+#include <asm/ccwdev.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+#define ECKD_C0(i) (i->home_bytes)
+#define ECKD_F(i) (i->formula)
+#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
+ (i->factors.f_0x02.f1))
+#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
+ (i->factors.f_0x02.f2))
+#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
+ (i->factors.f_0x02.f3))
+#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
+#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
+#define ECKD_F6(i) (i->factor6)
+#define ECKD_F7(i) (i->factor7)
+#define ECKD_F8(i) (i->factor8)
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_eckd_discipline;
+
+struct dasd_eckd_private {
+ struct dasd_eckd_characteristics rdc_data;
+ struct dasd_eckd_confdata conf_data;
+ struct dasd_eckd_path path_data;
+ struct eckd_count count_area[5];
+ int init_cqr_status;
+ int uses_cdl;
+ struct attrib_data_t attrib; /* e.g. cache operations */
+};
+
+/* The ccw bus type uses this table to find devices that it sends to
+ * dasd_eckd_probe */
+static struct ccw_device_id dasd_eckd_ids[] = {
+ { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), driver_info: 0x1},
+ { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), driver_info: 0x2},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), driver_info: 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), driver_info: 0x4},
+ { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), driver_info: 0x5},
+ { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), driver_info: 0x6},
+ { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), driver_info: 0x7},
+ { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), driver_info: 0x8},
+ { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), driver_info: 0x9},
+ { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), driver_info: 0xa},
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
+
+static struct ccw_driver dasd_eckd_driver; /* see below */
+
+/* initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone */
+static int
+dasd_eckd_probe (struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = dasd_generic_probe (cdev, &dasd_eckd_discipline);
+ if (ret)
+ return ret;
+ ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_ALLOW_FORCE);
+ return 0;
+}
+
+static int
+dasd_eckd_set_online(struct ccw_device *cdev)
+{
+ return dasd_generic_set_online (cdev, &dasd_eckd_discipline);
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+ .name = "dasd-eckd",
+ .owner = THIS_MODULE,
+ .ids = dasd_eckd_ids,
+ .probe = dasd_eckd_probe,
+ .remove = dasd_generic_remove,
+ .set_offline = dasd_generic_set_offline,
+ .set_online = dasd_eckd_set_online,
+ .notify = dasd_generic_notify,
+};
+
+static const int sizes_trk0[] = { 28, 148, 84 };
+#define LABEL_SIZE 140
+
+static inline unsigned int
+round_up_multiple(unsigned int no, unsigned int mult)
+{
+ int rem = no % mult;
+ return (rem ? no - rem + mult : no);
+}
+
+static inline unsigned int
+ceil_quot(unsigned int d1, unsigned int d2)
+{
+ return (d1 + (d2 - 1)) / d2;
+}
+
+static inline int
+bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
+{
+ unsigned int fl1, fl2, int1, int2;
+ int bpr;
+
+ switch (rdc->formula) {
+ case 0x01:
+ fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
+ fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
+ ECKD_F1(rdc));
+ bpr = fl1 + fl2;
+ break;
+ case 0x02:
+ int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
+ int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
+ fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
+ ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
+ ECKD_F1(rdc));
+ fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
+ ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
+ ECKD_F1(rdc));
+ bpr = fl1 + fl2;
+ break;
+ default:
+ bpr = 0;
+ break;
+ }
+ return bpr;
+}
+
+static inline unsigned int
+bytes_per_track(struct dasd_eckd_characteristics *rdc)
+{
+ return *(unsigned int *) (rdc->byte_per_track) >> 8;
+}
+
+static inline unsigned int
+recs_per_track(struct dasd_eckd_characteristics * rdc,
+ unsigned int kl, unsigned int dl)
+{
+ int dn, kn;
+
+ switch (rdc->dev_type) {
+ case 0x3380:
+ if (kl)
+ return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
+ ceil_quot(dl + 12, 32));
+ else
+ return 1499 / (15 + ceil_quot(dl + 12, 32));
+ case 0x3390:
+ dn = ceil_quot(dl + 6, 232) + 1;
+ if (kl) {
+ kn = ceil_quot(kl + 6, 232) + 1;
+ return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
+ 9 + ceil_quot(dl + 6 * dn, 34));
+ } else
+ return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
+ case 0x9345:
+ dn = ceil_quot(dl + 6, 232) + 1;
+ if (kl) {
+ kn = ceil_quot(kl + 6, 232) + 1;
+ return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
+ ceil_quot(dl + 6 * dn, 34));
+ } else
+ return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
+ }
+ return 0;
+}
+
+static inline void
+check_XRC (struct ccw1 *de_ccw,
+ struct DE_eckd_data *data,
+ struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ /* switch on System Time Stamp - needed for XRC Support */
+ if (private->rdc_data.facilities.XRC_supported) {
+
+ data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
+ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
+
+ data->ep_sys_time = get_clock ();
+
+ de_ccw->count = sizeof (struct DE_eckd_data);
+ de_ccw->flags |= CCW_FLAG_SLI;
+ }
+
+ return;
+
+} /* end check_XRC */
+
+static inline void
+define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
+ int totrk, int cmd, struct dasd_device * device)
+{
+ struct dasd_eckd_private *private;
+ struct ch_t geo, beg, end;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32) __pa(data);
+
+ memset(data, 0, sizeof (struct DE_eckd_data));
+ switch (cmd) {
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->mask.perm = 0x1;
+ data->attributes.operation = private->attrib.operation;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->mask.perm = 0x02;
+ data->attributes.operation = private->attrib.operation;
+ check_XRC (ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ check_XRC (ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->mask.perm = 0x3;
+ data->mask.auth = 0x1;
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ check_XRC (ccw, data, device);
+ break;
+ default:
+ DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
+ break;
+ }
+
+ data->attributes.mode = 0x3; /* ECKD */
+
+ if ((private->rdc_data.cu_type == 0x2105 ||
+ private->rdc_data.cu_type == 0x2107 ||
+ private->rdc_data.cu_type == 0x1750)
+ && !(private->uses_cdl && trk < 2))
+ data->ga_extended |= 0x40; /* Regular Data Format Mode */
+
+ geo.cyl = private->rdc_data.no_cyl;
+ geo.head = private->rdc_data.trk_per_cyl;
+ beg.cyl = trk / geo.head;
+ beg.head = trk % geo.head;
+ end.cyl = totrk / geo.head;
+ end.head = totrk % geo.head;
+
+ /* check for sequential prestage - enhance cylinder range */
+ if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
+ data->attributes.operation == DASD_SEQ_ACCESS) {
+
+ if (end.cyl + private->attrib.nr_cyl < geo.cyl)
+ end.cyl += private->attrib.nr_cyl;
+ else
+ end.cyl = (geo.cyl - 1);
+ }
+
+ data->beg_ext.cyl = beg.cyl;
+ data->beg_ext.head = beg.head;
+ data->end_ext.cyl = end.cyl;
+ data->end_ext.head = end.head;
+}
+
+static inline void
+locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
+ int rec_on_trk, int no_rec, int cmd,
+ struct dasd_device * device, int reclen)
+{
+ struct dasd_eckd_private *private;
+ int sector;
+ int dn, d;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
+ trk, rec_on_trk, no_rec, cmd, reclen);
+
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32) __pa(data);
+
+ memset(data, 0, sizeof (struct LO_eckd_data));
+ sector = 0;
+ if (rec_on_trk) {
+ switch (private->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(reclen + 6, 232);
+ d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(reclen + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+ data->sector = sector;
+ data->count = no_rec;
+ switch (cmd) {
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->operation.orientation = 0x1;
+ data->operation.operation = 0x03;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x01;
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ data->length = reclen;
+ data->auxiliary.last_bytes_used = 0x1;
+ data->operation.operation = 0x0b;
+ break;
+ default:
+ DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
+ }
+ data->seek_addr.cyl = data->search_arg.cyl =
+ trk / private->rdc_data.trk_per_cyl;
+ data->seek_addr.head = data->search_arg.head =
+ trk % private->rdc_data.trk_per_cyl;
+ data->search_arg.record = rec_on_trk;
+}
+
+/*
+ * Returns 1 if the block is one of the special blocks that needs
+ * to get read/written with the KD variant of the command.
+ * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
+ * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
+ * Luckily the KD variants differ only by one bit (0x08) from the
+ * normal variant. So don't wonder about code like:
+ * if (dasd_eckd_cdl_special(blk_per_trk, recid))
+ * ccw->cmd_code |= 0x8;
+ */
+static inline int
+dasd_eckd_cdl_special(int blk_per_trk, int recid)
+{
+ if (recid < 3)
+ return 1;
+ if (recid < blk_per_trk)
+ return 0;
+ if (recid < 2 * blk_per_trk)
+ return 1;
+ return 0;
+}
+
+/*
+ * Returns the record size for the special blocks of the cdl format.
+ * Only returns something useful if dasd_eckd_cdl_special is true
+ * for the recid.
+ */
+static inline int
+dasd_eckd_cdl_reclen(int recid)
+{
+ if (recid < 3)
+ return sizes_trk0[recid];
+ return LABEL_SIZE;
+}
+
+static int
+dasd_eckd_read_conf(struct dasd_device *device)
+{
+ void *conf_data;
+ int conf_len, conf_data_saved;
+ int rc;
+ __u8 lpm;
+ struct dasd_eckd_private *private;
+ struct dasd_eckd_path *path_data;
+
+ private = (struct dasd_eckd_private *) device->private;
+ path_data = (struct dasd_eckd_path *) &private->path_data;
+ path_data->opm = ccw_device_get_path_mask(device->cdev);
+ lpm = 0x80;
+ conf_data_saved = 0;
+
+ /* get configuration data per operational path */
+ for (lpm = 0x80; lpm; lpm>>= 1) {
+ if (lpm & path_data->opm){
+ rc = read_conf_data_lpm(device->cdev, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+ MESSAGE(KERN_WARNING,
+ "Read configuration data returned "
+ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+ MESSAGE(KERN_WARNING, "%s", "No configuration "
+ "data retrieved");
+ continue; /* no errror */
+ }
+ if (conf_len != sizeof (struct dasd_eckd_confdata)) {
+ MESSAGE(KERN_WARNING,
+ "sizes of configuration data mismatch"
+ "%d (read) vs %ld (expected)",
+ conf_len,
+ sizeof (struct dasd_eckd_confdata));
+ kfree(conf_data);
+ continue; /* no errror */
+ }
+ /* save first valid configuration data */
+ if (!conf_data_saved){
+ memcpy(&private->conf_data, conf_data,
+ sizeof (struct dasd_eckd_confdata));
+ conf_data_saved++;
+ }
+ switch (((char *)conf_data)[242] & 0x07){
+ case 0x02:
+ path_data->npm |= lpm;
+ break;
+ case 0x03:
+ path_data->ppm |= lpm;
+ break;
+ }
+ kfree(conf_data);
+ }
+ }
+ return 0;
+}
+
+
+static int
+dasd_eckd_check_characteristics(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ void *rdc_data;
+ int rc;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (private == NULL) {
+ private = kmalloc(sizeof(struct dasd_eckd_private),
+ GFP_KERNEL | GFP_DMA);
+ if (private == NULL) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "memory allocation failed for private "
+ "data");
+ return -ENOMEM;
+ }
+ memset(private, 0, sizeof(struct dasd_eckd_private));
+ device->private = (void *) private;
+ }
+ /* Invalidate status of initial analysis. */
+ private->init_cqr_status = -1;
+ /* Set default cache operations. */
+ private->attrib.operation = DASD_NORMAL_CACHE;
+ private->attrib.nr_cyl = 0;
+
+ /* Read Device Characteristics */
+ rdc_data = (void *) &(private->rdc_data);
+ rc = read_dev_chars(device->cdev, &rdc_data, 64);
+ if (rc) {
+ DEV_MESSAGE(KERN_WARNING, device,
+ "Read device characteristics returned error %d",
+ rc);
+ return rc;
+ }
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
+ private->rdc_data.dev_type,
+ private->rdc_data.dev_model,
+ private->rdc_data.cu_type,
+ private->rdc_data.cu_model.model,
+ private->rdc_data.no_cyl,
+ private->rdc_data.trk_per_cyl,
+ private->rdc_data.sec_per_trk);
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf (device);
+ return rc;
+
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_analysis_ccw(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ struct eckd_count *count_data;
+ struct LO_eckd_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int cplength, datasize;
+ int i;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ cplength = 8;
+ datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ cplength, datasize, device);
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* Define extent for the first 3 tracks. */
+ define_extent(ccw++, cqr->data, 0, 2,
+ DASD_ECKD_CCW_READ_COUNT, device);
+ LO_data = cqr->data + sizeof (struct DE_eckd_data);
+ /* Locate record for the first 4 records on track 0. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, 0, 0, 4,
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
+
+ count_data = private->count_area;
+ for (i = 0; i < 4; i++) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) count_data;
+ ccw++;
+ count_data++;
+ }
+
+ /* Locate record for the first record on track 2. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, 2, 0, 1,
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
+ /* Read count ccw. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) count_data;
+
+ cqr->device = device;
+ cqr->retries = 0;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+/*
+ * This is the callback function for the init_analysis cqr. It saves
+ * the status of the initial analysis ccw before it frees it and kicks
+ * the device to continue the startup sequence. This will call
+ * dasd_eckd_do_analysis again (if the devices has not been marked
+ * for deletion in the meantime).
+ */
+static void
+dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_device *device;
+
+ device = init_cqr->device;
+ private = (struct dasd_eckd_private *) device->private;
+ private->init_cqr_status = init_cqr->status;
+ dasd_sfree_request(init_cqr, device);
+ dasd_kick_device(device);
+}
+
+static int
+dasd_eckd_start_analysis(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_ccw_req *init_cqr;
+
+ private = (struct dasd_eckd_private *) device->private;
+ init_cqr = dasd_eckd_analysis_ccw(device);
+ if (IS_ERR(init_cqr))
+ return PTR_ERR(init_cqr);
+ init_cqr->callback = dasd_eckd_analysis_callback;
+ init_cqr->callback_data = NULL;
+ init_cqr->expires = 5*HZ;
+ dasd_add_request_head(init_cqr);
+ return -EAGAIN;
+}
+
+static int
+dasd_eckd_end_analysis(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ struct eckd_count *count_area;
+ unsigned int sb, blk_per_trk;
+ int status, i;
+
+ private = (struct dasd_eckd_private *) device->private;
+ status = private->init_cqr_status;
+ private->init_cqr_status = -1;
+ if (status != DASD_CQR_DONE) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "volume analysis returned unformatted disk");
+ return -EMEDIUMTYPE;
+ }
+
+ private->uses_cdl = 1;
+ /* Calculate number of blocks/records per track. */
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
+ /* Check Track 0 for Compatible Disk Layout */
+ count_area = NULL;
+ for (i = 0; i < 3; i++) {
+ if (private->count_area[i].kl != 4 ||
+ private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
+ private->uses_cdl = 0;
+ break;
+ }
+ }
+ if (i == 3)
+ count_area = &private->count_area[4];
+
+ if (private->uses_cdl == 0) {
+ for (i = 0; i < 5; i++) {
+ if ((private->count_area[i].kl != 0) ||
+ (private->count_area[i].dl !=
+ private->count_area[0].dl))
+ break;
+ }
+ if (i == 5)
+ count_area = &private->count_area[0];
+ } else {
+ if (private->count_area[3].record == 1)
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Trk 0: no records after VTOC!");
+ }
+ if (count_area != NULL && count_area->kl == 0) {
+ /* we found notthing violating our disk layout */
+ if (dasd_check_blocksize(count_area->dl) == 0)
+ device->bp_block = count_area->dl;
+ }
+ if (device->bp_block == 0) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Volume has incompatible disk layout");
+ return -EMEDIUMTYPE;
+ }
+ device->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < device->bp_block; sb = sb << 1)
+ device->s2b_shift++;
+
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
+ device->blocks = (private->rdc_data.no_cyl *
+ private->rdc_data.trk_per_cyl *
+ blk_per_trk);
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "(%dkB blks): %dkB at %dkB/trk %s",
+ (device->bp_block >> 10),
+ ((private->rdc_data.no_cyl *
+ private->rdc_data.trk_per_cyl *
+ blk_per_trk * (device->bp_block >> 9)) >> 1),
+ ((blk_per_trk * device->bp_block) >> 10),
+ private->uses_cdl ?
+ "compatible disk layout" : "linux disk layout");
+
+ return 0;
+}
+
+static int
+dasd_eckd_do_analysis(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (private->init_cqr_status < 0)
+ return dasd_eckd_start_analysis(device);
+ else
+ return dasd_eckd_end_analysis(device);
+}
+
+static int
+dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
+{
+ struct dasd_eckd_private *private;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (dasd_check_blocksize(device->bp_block) == 0) {
+ geo->sectors = recs_per_track(&private->rdc_data,
+ 0, device->bp_block);
+ }
+ geo->cylinders = private->rdc_data.no_cyl;
+ geo->heads = private->rdc_data.trk_per_cyl;
+ return 0;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_format_device(struct dasd_device * device,
+ struct format_data_t * fdata)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_ccw_req *fcp;
+ struct eckd_count *ect;
+ struct ccw1 *ccw;
+ void *data;
+ int rpt, cyl, head;
+ int cplength, datasize;
+ int i;
+
+ private = (struct dasd_eckd_private *) device->private;
+ rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
+ cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
+ head = fdata->start_unit % private->rdc_data.trk_per_cyl;
+
+ /* Sanity checks. */
+ if (fdata->start_unit >=
+ (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
+ DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
+ fdata->start_unit);
+ return ERR_PTR(-EINVAL);
+ }
+ if (fdata->start_unit > fdata->stop_unit) {
+ DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
+ fdata->start_unit);
+ return ERR_PTR(-EINVAL);
+ }
+ if (dasd_check_blocksize(fdata->blksize) != 0) {
+ DEV_MESSAGE(KERN_WARNING, device,
+ "Invalid blocksize %d...terminating!",
+ fdata->blksize);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * fdata->intensity is a bit string that tells us what to do:
+ * Bit 0: write record zero
+ * Bit 1: write home address, currently not supported
+ * Bit 2: invalidate tracks
+ * Bit 3: use OS/390 compatible disk layout (cdl)
+ * Only some bit combinations do make sense.
+ */
+ switch (fdata->intensity) {
+ case 0x00: /* Normal format */
+ case 0x08: /* Normal format, use cdl. */
+ cplength = 2 + rpt;
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ rpt * sizeof(struct eckd_count);
+ break;
+ case 0x01: /* Write record zero and format track. */
+ case 0x09: /* Write record zero and format track, use cdl. */
+ cplength = 3 + rpt;
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count) +
+ rpt * sizeof(struct eckd_count);
+ break;
+ case 0x04: /* Invalidate track. */
+ case 0x0c: /* Invalidate track, use cdl. */
+ cplength = 3;
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count);
+ break;
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
+ fdata->intensity);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Allocate the format ccw request. */
+ fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
+ cplength, datasize, device);
+ if (IS_ERR(fcp))
+ return fcp;
+
+ data = fcp->data;
+ ccw = fcp->cpaddr;
+
+ switch (fdata->intensity & ~0x08) {
+ case 0x00: /* Normal format. */
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->start_unit,
+ DASD_ECKD_CCW_WRITE_CKD, device);
+ data += sizeof(struct DE_eckd_data);
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, rpt,
+ DASD_ECKD_CCW_WRITE_CKD, device,
+ fdata->blksize);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ case 0x01: /* Write record zero + format track. */
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->start_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+ device);
+ data += sizeof(struct DE_eckd_data);
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, rpt + 1,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
+ device->bp_block);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ case 0x04: /* Invalidate track. */
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->start_unit,
+ DASD_ECKD_CCW_WRITE_CKD, device);
+ data += sizeof(struct DE_eckd_data);
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, 1,
+ DASD_ECKD_CCW_WRITE_CKD, device, 8);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ }
+ if (fdata->intensity & 0x01) { /* write record zero */
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = cyl;
+ ect->head = head;
+ ect->record = 0;
+ ect->kl = 0;
+ ect->dl = 8;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) ect;
+ ccw++;
+ }
+ if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = cyl;
+ ect->head = head;
+ ect->record = 1;
+ ect->kl = 0;
+ ect->dl = 0;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) ect;
+ } else { /* write remaining records */
+ for (i = 0; i < rpt; i++) {
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = cyl;
+ ect->head = head;
+ ect->record = i + 1;
+ ect->kl = 0;
+ ect->dl = fdata->blksize;
+ /* Check for special tracks 0-1 when formatting CDL */
+ if ((fdata->intensity & 0x08) &&
+ fdata->start_unit == 0) {
+ if (i < 3) {
+ ect->kl = 4;
+ ect->dl = sizes_trk0[i] - 4;
+ }
+ }
+ if ((fdata->intensity & 0x08) &&
+ fdata->start_unit == 1) {
+ ect->kl = 44;
+ ect->dl = LABEL_SIZE - 44;
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) ect;
+ ccw++;
+ }
+ }
+ fcp->device = device;
+ fcp->retries = 2; /* set retry counter to enable ERP */
+ fcp->buildclk = get_clock();
+ fcp->status = DASD_CQR_FILLED;
+ return fcp;
+}
+
+static dasd_era_t
+dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+ struct dasd_device *device = (struct dasd_device *) cqr->device;
+ struct ccw_device *cdev = device->cdev;
+
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+ switch (cdev->id.cu_type) {
+ case 0x3990:
+ case 0x2105:
+ case 0x2107:
+ case 0x1750:
+ return dasd_3990_erp_examine(cqr, irb);
+ case 0x9343:
+ return dasd_9343_erp_examine(cqr, irb);
+ case 0x3880:
+ default:
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "default (unknown CU type) - RECOVERABLE return");
+ return dasd_era_recover;
+ }
+}
+
+static dasd_erp_fn_t
+dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device = (struct dasd_device *) cqr->device;
+ struct ccw_device *cdev = device->cdev;
+
+ switch (cdev->id.cu_type) {
+ case 0x3990:
+ case 0x2105:
+ case 0x2107:
+ case 0x1750:
+ return dasd_3990_erp_action;
+ case 0x9343:
+ case 0x3880:
+ default:
+ return dasd_default_erp_action;
+ }
+}
+
+static dasd_erp_fn_t
+dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_postaction;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+{
+ struct dasd_eckd_private *private;
+ unsigned long *idaws;
+ struct LO_eckd_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct bio *bio;
+ struct bio_vec *bv;
+ char *dst;
+ unsigned int blksize, blk_per_trk, off;
+ int count, cidaw, cplength, datasize;
+ sector_t recid, first_rec, last_rec;
+ sector_t first_trk, last_trk;
+ unsigned int first_offs, last_offs;
+ unsigned char cmd, rcmd;
+ int i;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (rq_data_dir(req) == READ)
+ cmd = DASD_ECKD_CCW_READ_MT;
+ else if (rq_data_dir(req) == WRITE)
+ cmd = DASD_ECKD_CCW_WRITE_MT;
+ else
+ return ERR_PTR(-EINVAL);
+ /* Calculate number of blocks/records per track. */
+ blksize = device->bp_block;
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ /* Calculate record id of first and last block. */
+ first_rec = first_trk = req->sector >> device->s2b_shift;
+ first_offs = sector_div(first_trk, blk_per_trk);
+ last_rec = last_trk =
+ (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
+ last_offs = sector_div(last_trk, blk_per_trk);
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ cidaw = 0;
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ if (bv->bv_len & (blksize - 1))
+ /* Eckd can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
+#if defined(CONFIG_ARCH_S390X)
+ if (idal_is_needed (page_address(bv->bv_page),
+ bv->bv_len))
+ cidaw += bv->bv_len >> (device->s2b_shift + 9);
+#endif
+ }
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+ /* 1x define extent + 1x locate record + number of blocks */
+ cplength = 2 + count;
+ /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
+ datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
+ cidaw * sizeof(unsigned long);
+ /* Find out the number of additional locate record ccws for cdl. */
+ if (private->uses_cdl && first_rec < 2*blk_per_trk) {
+ if (last_rec >= 2*blk_per_trk)
+ count = 2*blk_per_trk - first_rec;
+ cplength += count;
+ datasize += count*sizeof(struct LO_eckd_data);
+ }
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ cplength, datasize, device);
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* First ccw is define extent. */
+ define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device);
+ /* Build locate_record+read/write/ccws. */
+ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
+ LO_data = (struct LO_eckd_data *) (idaws + cidaw);
+ recid = first_rec;
+ if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
+ /* Only standard blocks so there is just one locate record. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
+ last_rec - recid + 1, cmd, device, blksize);
+ }
+ rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ if (dasd_page_cache) {
+ char *copy = kmem_cache_alloc(dasd_page_cache,
+ SLAB_DMA | __GFP_NOWARN);
+ if (copy && rq_data_dir(req) == WRITE)
+ memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ if (copy)
+ dst = copy + bv->bv_offset;
+ }
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ sector_t trkid = recid;
+ unsigned int recoffs = sector_div(trkid, blk_per_trk);
+ rcmd = cmd;
+ count = blksize;
+ /* Locate record for cdl special block ? */
+ if (private->uses_cdl && recid < 2*blk_per_trk) {
+ if (dasd_eckd_cdl_special(blk_per_trk, recid)){
+ rcmd |= 0x8;
+ count = dasd_eckd_cdl_reclen(recid);
+ if (count < blksize)
+ memset(dst + count, 0xe5,
+ blksize - count);
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++,
+ trkid, recoffs + 1,
+ 1, rcmd, device, count);
+ }
+ /* Locate record for standard blocks ? */
+ if (private->uses_cdl && recid == 2*blk_per_trk) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++,
+ trkid, recoffs + 1,
+ last_rec - recid + 1,
+ cmd, device, count);
+ }
+ /* Read/write ccw. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = rcmd;
+ ccw->count = count;
+ if (idal_is_needed(dst, blksize)) {
+ ccw->cda = (__u32)(addr_t) idaws;
+ ccw->flags = CCW_FLAG_IDA;
+ idaws = idal_create_words(idaws, dst, blksize);
+ } else {
+ ccw->cda = (__u32)(addr_t) dst;
+ ccw->flags = 0;
+ }
+ ccw++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ cqr->device = device;
+ cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->lpm = private->path_data.ppm;
+ cqr->retries = 256;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static int
+dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ struct dasd_eckd_private *private;
+ struct ccw1 *ccw;
+ struct bio *bio;
+ struct bio_vec *bv;
+ char *dst, *cda;
+ unsigned int blksize, blk_per_trk, off;
+ sector_t recid;
+ int i, status;
+
+ if (!dasd_page_cache)
+ goto out;
+ private = (struct dasd_eckd_private *) cqr->device->private;
+ blksize = cqr->device->bp_block;
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ recid = req->sector >> cqr->device->s2b_shift;
+ ccw = cqr->cpaddr;
+ /* Skip over define extent & locate record. */
+ ccw++;
+ if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
+ ccw++;
+ rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ /* Skip locate record. */
+ if (private->uses_cdl && recid <= 2*blk_per_trk)
+ ccw++;
+ if (dst) {
+ if (ccw->flags & CCW_FLAG_IDA)
+ cda = *((char **)((addr_t) ccw->cda));
+ else
+ cda = (char *)((addr_t) ccw->cda);
+ if (dst != cda) {
+ if (rq_data_dir(req) == READ)
+ memcpy(dst, cda, bv->bv_len);
+ kmem_cache_free(dasd_page_cache,
+ (void *)((addr_t)cda & PAGE_MASK));
+ }
+ dst = NULL;
+ }
+ ccw++;
+ recid++;
+ }
+ }
+out:
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->device);
+ return status;
+}
+
+static int
+dasd_eckd_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ struct dasd_eckd_private *private;
+
+ private = (struct dasd_eckd_private *) device->private;
+ info->label_block = 2;
+ info->FBA_layout = private->uses_cdl ? 0 : 1;
+ info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(struct dasd_eckd_characteristics));
+ info->confdata_size = sizeof (struct dasd_eckd_confdata);
+ memcpy(info->configuration_data, &private->conf_data,
+ sizeof (struct dasd_eckd_confdata));
+ return 0;
+}
+
+/*
+ * SECTION: ioctl functions for eckd devices.
+ */
+
+/*
+ * Release device ioctl.
+ * Buils a channel programm to releases a prior reserved
+ * (see dasd_eckd_reserve) device.
+ */
+static int
+dasd_eckd_release(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 1, 32, device);
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
+ cqr->cpaddr->flags |= CCW_FLAG_SLI;
+ cqr->cpaddr->count = 32;
+ cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ cqr->device = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 0;
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+
+ dasd_sfree_request(cqr, cqr->device);
+ return rc;
+}
+
+/*
+ * Reserve device ioctl.
+ * Options are set to 'synchronous wait for interrupt' and
+ * 'timeout the request'. This leads to a terminate IO if
+ * the interrupt is outstanding for a certain time.
+ */
+static int
+dasd_eckd_reserve(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 1, 32, device);
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
+ cqr->cpaddr->flags |= CCW_FLAG_SLI;
+ cqr->cpaddr->count = 32;
+ cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ cqr->device = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 0;
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+
+ dasd_sfree_request(cqr, cqr->device);
+ return rc;
+}
+
+/*
+ * Steal lock ioctl - unconditional reserve device.
+ * Buils a channel programm to break a device's reservation.
+ * (unconditional reserve)
+ */
+static int
+dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 1, 32, device);
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
+ cqr->cpaddr->flags |= CCW_FLAG_SLI;
+ cqr->cpaddr->count = 32;
+ cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+ cqr->device = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 0;
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+
+ dasd_sfree_request(cqr, cqr->device);
+ return rc;
+}
+
+/*
+ * Read performance statistics
+ */
+static int
+dasd_eckd_performance(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_perf_stats_t *stats;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 1 /* PSF */ + 1 /* RSSD */ ,
+ (sizeof (struct dasd_psf_prssd_data) +
+ sizeof (struct dasd_rssd_perf_stats_t)),
+ device);
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->device = device;
+ cqr->retries = 0;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x01; /* Perfomance Statistics */
+ prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof (struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)(addr_t) prssdp;
+
+ /* Read Subsystem Data - Performance Statistics */
+ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+ memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
+ ccw->cda = (__u32)(addr_t) stats;
+
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on(cqr);
+ if (rc == 0) {
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+ rc = copy_to_user((long __user *) args, (long *) stats,
+ sizeof(struct dasd_rssd_perf_stats_t));
+ }
+ dasd_sfree_request(cqr, cqr->device);
+ return rc;
+}
+
+/*
+ * Get attributes (cache operations)
+ * Returnes the cache attributes used in Define Extend (DE).
+ */
+static int
+dasd_eckd_get_attrib (struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_eckd_private *private;
+ struct attrib_data_t attrib;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!args)
+ return -EINVAL;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ private = (struct dasd_eckd_private *) device->private;
+ attrib = private->attrib;
+
+ rc = copy_to_user((long __user *) args, (long *) &attrib,
+ sizeof (struct attrib_data_t));
+
+ return rc;
+}
+
+/*
+ * Set attributes (cache operations)
+ * Stores the attributes for cache operation to be used in Define Extend (DE).
+ */
+static int
+dasd_eckd_set_attrib(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_eckd_private *private;
+ struct attrib_data_t attrib;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!args)
+ return -EINVAL;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ if (copy_from_user(&attrib, (void __user *) args,
+ sizeof (struct attrib_data_t))) {
+ return -EFAULT;
+ }
+ private = (struct dasd_eckd_private *) device->private;
+ private->attrib = attrib;
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "cache operation mode set to %x (%i cylinder prestage)",
+ private->attrib.operation, private->attrib.nr_cyl);
+ return 0;
+}
+
+/*
+ * Print sense data and related channel program.
+ * Parts are printed because printk buffer is only 1024 bytes.
+ */
+static void
+dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ struct irb *irb)
+{
+ char *page;
+ struct ccw1 *act, *end, *last;
+ int len, sl, sct, count;
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DEV_MESSAGE(KERN_ERR, device, " %s",
+ "No memory to dump sense data");
+ return;
+ }
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ device->cdev->dev.bus_id);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
+ irb->scsw.cstat, irb->scsw.dstat);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " device %s: Failing CCW: %p\n",
+ device->cdev->dev.bus_id,
+ (void *) (addr_t) irb->scsw.cpa);
+ if (irb->esw.esw0.erw.cons) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ irb->ecw[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+
+ if (irb->ecw[27] & DASD_SENSE_BIT_0) {
+ /* 24 Byte Sense Data */
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 24 Byte: %x MSG %x, "
+ "%s MSGb to SYSOP\n",
+ irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
+ irb->ecw[1] & 0x10 ? "" : "no");
+ } else {
+ /* 32 Byte Sense Data */
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 32 Byte: Format: %x "
+ "Exception class %x\n",
+ irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
+ }
+ } else {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+
+ /* dump the Channel Program */
+ /* print first CCWs (maximum 8) */
+ act = req->cpaddr;
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ end = min(act + 8, last);
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
+ " Related CP in req: %p\n", req);
+ while (act <= end) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+
+ /* print failing CCW area */
+ len = 0;
+ if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
+ act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ }
+ end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
+ while (act <= end) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+
+ /* print last CCWs */
+ if (act < last - 2) {
+ act = last - 2;
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ }
+ while (act <= last) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ if (len > 0)
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+ free_page((unsigned long) page);
+}
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and one
+ * locate record ccw + 16 bytes of data. That makes:
+ * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 249.5 blocks
+ * for one request. Give a little safety and the result is 240.
+ */
+static struct dasd_discipline dasd_eckd_discipline = {
+ .owner = THIS_MODULE,
+ .name = "ECKD",
+ .ebcname = "ECKD",
+ .max_blocks = 240,
+ .check_device = dasd_eckd_check_characteristics,
+ .do_analysis = dasd_eckd_do_analysis,
+ .fill_geometry = dasd_eckd_fill_geometry,
+ .start_IO = dasd_start_IO,
+ .term_IO = dasd_term_IO,
+ .format_device = dasd_eckd_format_device,
+ .examine_error = dasd_eckd_examine_error,
+ .erp_action = dasd_eckd_erp_action,
+ .erp_postaction = dasd_eckd_erp_postaction,
+ .build_cp = dasd_eckd_build_cp,
+ .free_cp = dasd_eckd_free_cp,
+ .dump_sense = dasd_eckd_dump_sense,
+ .fill_info = dasd_eckd_fill_info,
+};
+
+static int __init
+dasd_eckd_init(void)
+{
+ int ret;
+
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDGATTR,
+ dasd_eckd_get_attrib);
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDSATTR,
+ dasd_eckd_set_attrib);
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDPSRD,
+ dasd_eckd_performance);
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDRLSE,
+ dasd_eckd_release);
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDRSRV,
+ dasd_eckd_reserve);
+ dasd_ioctl_no_register(THIS_MODULE, BIODASDSLCK,
+ dasd_eckd_steal_lock);
+
+ ASCEBC(dasd_eckd_discipline.ebcname, 4);
+
+ ret = ccw_driver_register(&dasd_eckd_driver);
+ if (ret) {
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
+ dasd_eckd_get_attrib);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
+ dasd_eckd_set_attrib);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
+ dasd_eckd_performance);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
+ dasd_eckd_release);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
+ dasd_eckd_reserve);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
+ dasd_eckd_steal_lock);
+ return ret;
+ }
+
+ dasd_generic_auto_online(&dasd_eckd_driver);
+ return 0;
+}
+
+static void __exit
+dasd_eckd_cleanup(void)
+{
+ ccw_driver_unregister(&dasd_eckd_driver);
+
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
+ dasd_eckd_get_attrib);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
+ dasd_eckd_set_attrib);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
+ dasd_eckd_performance);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
+ dasd_eckd_release);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
+ dasd_eckd_reserve);
+ dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
+ dasd_eckd_steal_lock);
+}
+
+module_init(dasd_eckd_init);
+module_exit(dasd_eckd_cleanup);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 000000000000..b6888c68b224
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,346 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_eckd.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.10 $
+ */
+
+#ifndef DASD_ECKD_H
+#define DASD_ECKD_H
+
+/*****************************************************************************
+ * SECTION: CCW Definitions
+ ****************************************************************************/
+#define DASD_ECKD_CCW_WRITE 0x05
+#define DASD_ECKD_CCW_READ 0x06
+#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
+#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
+#define DASD_ECKD_CCW_WRITE_KD 0x0d
+#define DASD_ECKD_CCW_READ_KD 0x0e
+#define DASD_ECKD_CCW_ERASE 0x11
+#define DASD_ECKD_CCW_READ_COUNT 0x12
+#define DASD_ECKD_CCW_SLCK 0x14
+#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
+#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
+#define DASD_ECKD_CCW_WRITE_CKD 0x1d
+#define DASD_ECKD_CCW_READ_CKD 0x1e
+#define DASD_ECKD_CCW_PSF 0x27
+#define DASD_ECKD_CCW_RSSD 0x3e
+#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
+#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
+#define DASD_ECKD_CCW_WRITE_MT 0x85
+#define DASD_ECKD_CCW_READ_MT 0x86
+#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
+#define DASD_ECKD_CCW_READ_KD_MT 0x8e
+#define DASD_ECKD_CCW_RELEASE 0x94
+#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
+#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
+#define DASD_ECKD_CCW_RESERVE 0xB4
+
+/*
+ *Perform Subsystem Function / Sub-Orders
+ */
+#define PSF_ORDER_PRSSD 0x18
+
+/*****************************************************************************
+ * SECTION: Type Definitions
+ ****************************************************************************/
+
+struct eckd_count {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+ __u8 kl;
+ __u16 dl;
+} __attribute__ ((packed));
+
+struct ch_t {
+ __u16 cyl;
+ __u16 head;
+} __attribute__ ((packed));
+
+struct chs_t {
+ __u16 cyl;
+ __u16 head;
+ __u32 sector;
+} __attribute__ ((packed));
+
+struct chr_t {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+} __attribute__ ((packed));
+
+struct geom_t {
+ __u16 cyl;
+ __u16 head;
+ __u32 sector;
+} __attribute__ ((packed));
+
+struct eckd_home {
+ __u8 skip_control[14];
+ __u16 cell_number;
+ __u8 physical_addr[3];
+ __u8 flag;
+ struct ch_t track_addr;
+ __u8 reserved;
+ __u8 key_length;
+ __u8 reserved2[2];
+} __attribute__ ((packed));
+
+struct DE_eckd_data {
+ struct {
+ unsigned char perm:2; /* Permissions on this extent */
+ unsigned char reserved:1;
+ unsigned char seek:2; /* Seek control */
+ unsigned char auth:2; /* Access authorization */
+ unsigned char pci:1; /* PCI Fetch mode */
+ } __attribute__ ((packed)) mask;
+ struct {
+ unsigned char mode:2; /* Architecture mode */
+ unsigned char ckd:1; /* CKD Conversion */
+ unsigned char operation:3; /* Operation mode */
+ unsigned char cfw:1; /* Cache fast write */
+ unsigned char dfw:1; /* DASD fast write */
+ } __attribute__ ((packed)) attributes;
+ __u16 blk_size; /* Blocksize */
+ __u16 fast_write_id;
+ __u8 ga_additional; /* Global Attributes Additional */
+ __u8 ga_extended; /* Global Attributes Extended */
+ struct ch_t beg_ext;
+ struct ch_t end_ext;
+ unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
+ __u8 ep_format; /* Extended Parameter format byte */
+ __u8 ep_prio; /* Extended Parameter priority I/O byte */
+ __u8 ep_reserved[6]; /* Extended Parameter Reserved */
+} __attribute__ ((packed));
+
+struct LO_eckd_data {
+ struct {
+ unsigned char orientation:2;
+ unsigned char operation:6;
+ } __attribute__ ((packed)) operation;
+ struct {
+ unsigned char last_bytes_used:1;
+ unsigned char reserved:6;
+ unsigned char read_count_suffix:1;
+ } __attribute__ ((packed)) auxiliary;
+ __u8 unused;
+ __u8 count;
+ struct ch_t seek_addr;
+ struct chr_t search_arg;
+ __u8 sector;
+ __u16 length;
+} __attribute__ ((packed));
+
+struct dasd_eckd_characteristics {
+ __u16 cu_type;
+ struct {
+ unsigned char support:2;
+ unsigned char async:1;
+ unsigned char reserved:1;
+ unsigned char cache_info:1;
+ unsigned char model:3;
+ } __attribute__ ((packed)) cu_model;
+ __u16 dev_type;
+ __u8 dev_model;
+ struct {
+ unsigned char mult_burst:1;
+ unsigned char RT_in_LR:1;
+ unsigned char reserved1:1;
+ unsigned char RD_IN_LR:1;
+ unsigned char reserved2:4;
+ unsigned char reserved3:8;
+ unsigned char defect_wr:1;
+ unsigned char XRC_supported:1;
+ unsigned char reserved4:1;
+ unsigned char striping:1;
+ unsigned char reserved5:4;
+ unsigned char cfw:1;
+ unsigned char reserved6:2;
+ unsigned char cache:1;
+ unsigned char dual_copy:1;
+ unsigned char dfw:1;
+ unsigned char reset_alleg:1;
+ unsigned char sense_down:1;
+ } __attribute__ ((packed)) facilities;
+ __u8 dev_class;
+ __u8 unit_type;
+ __u16 no_cyl;
+ __u16 trk_per_cyl;
+ __u8 sec_per_trk;
+ __u8 byte_per_track[3];
+ __u16 home_bytes;
+ __u8 formula;
+ union {
+ struct {
+ __u8 f1;
+ __u16 f2;
+ __u16 f3;
+ } __attribute__ ((packed)) f_0x01;
+ struct {
+ __u8 f1;
+ __u8 f2;
+ __u8 f3;
+ __u8 f4;
+ __u8 f5;
+ } __attribute__ ((packed)) f_0x02;
+ } __attribute__ ((packed)) factors;
+ __u16 first_alt_trk;
+ __u16 no_alt_trk;
+ __u16 first_dia_trk;
+ __u16 no_dia_trk;
+ __u16 first_sup_trk;
+ __u16 no_sup_trk;
+ __u8 MDR_ID;
+ __u8 OBR_ID;
+ __u8 director;
+ __u8 rd_trk_set;
+ __u16 max_rec_zero;
+ __u8 reserved1;
+ __u8 RWANY_in_LR;
+ __u8 factor6;
+ __u8 factor7;
+ __u8 factor8;
+ __u8 reserved2[3];
+ __u8 reserved3[10];
+} __attribute__ ((packed));
+
+struct dasd_eckd_confdata {
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 dev_class;
+ __u8 reserved;
+ unsigned char dev_type[6];
+ unsigned char dev_model[3];
+ unsigned char HDA_manufacturer[3];
+ unsigned char HDA_location[2];
+ unsigned char HDA_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned1;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char dev_type[6];
+ unsigned char dev_model[3];
+ unsigned char DASD_manufacturer[3];
+ unsigned char DASD_location[2];
+ unsigned char DASD_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned2;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char cont_type[6];
+ unsigned char cont_model[3];
+ unsigned char cont_manufacturer[3];
+ unsigned char cont_location[2];
+ unsigned char cont_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned3;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char cont_type[6];
+ unsigned char empty[3];
+ unsigned char cont_manufacturer[3];
+ unsigned char cont_location[2];
+ unsigned char cont_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned4;
+ unsigned char ned5[32];
+ unsigned char ned6[32];
+ unsigned char ned7[32];
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char reserved:6;
+ } __attribute__ ((packed)) flags;
+ __u8 selector;
+ __u16 interfaceID;
+ __u32 reserved;
+ __u16 subsystemID;
+ struct {
+ unsigned char sp0:1;
+ unsigned char sp1:1;
+ unsigned char reserved:5;
+ unsigned char scluster:1;
+ } __attribute__ ((packed)) spathID;
+ __u8 unit_address;
+ __u8 dev_ID;
+ __u8 dev_address;
+ __u8 adapterID;
+ __u16 link_address;
+ struct {
+ unsigned char parallel:1;
+ unsigned char escon:1;
+ unsigned char reserved:1;
+ unsigned char ficon:1;
+ unsigned char reserved2:4;
+ } __attribute__ ((packed)) protocol_type;
+ struct {
+ unsigned char PID_in_236:1;
+ unsigned char reserved:7;
+ } __attribute__ ((packed)) format_flags;
+ __u8 log_dev_address;
+ unsigned char reserved2[12];
+ } __attribute__ ((packed)) neq;
+} __attribute__ ((packed));
+
+struct dasd_eckd_path {
+ __u8 opm;
+ __u8 ppm;
+ __u8 npm;
+};
+
+/*
+ * Perform Subsystem Function - Prepare for Read Subsystem Data
+ */
+struct dasd_psf_prssd_data {
+ unsigned char order;
+ unsigned char flags;
+ unsigned char reserved[4];
+ unsigned char suborder;
+ unsigned char varies[9];
+} __attribute__ ((packed));
+
+#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 000000000000..7cb98d25f341
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,254 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * $Revision: 1.14 $
+ */
+
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_erp:"
+
+#include "dasd_int.h"
+
+struct dasd_ccw_req *
+dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+ struct dasd_device * device)
+{
+ unsigned long flags;
+ struct dasd_ccw_req *cqr;
+ char *data;
+ int size;
+
+ /* Sanity checks */
+ if ( magic == NULL || datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
+ BUG();
+
+ size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+ spin_lock_irqsave(&device->mem_lock, flags);
+ cqr = (struct dasd_ccw_req *)
+ dasd_alloc_chunk(&device->erp_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (cqr == NULL)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(struct dasd_ccw_req));
+ data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = (struct ccw1 *) data;
+ data += cplength*sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+ strncpy((char *) &cqr->magic, magic, 4);
+ ASCEBC((char *) &cqr->magic, 4);
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+ return cqr;
+}
+
+void
+dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->erp_chunks, cqr);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ atomic_dec(&device->ref_count);
+}
+
+
+/*
+ * dasd_default_erp_action just retries the current cqr
+ */
+struct dasd_ccw_req *
+dasd_default_erp_action(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+
+ device = cqr->device;
+
+ /* just retry - there is nothing to save ... I got no sense data.... */
+ if (cqr->retries > 0) {
+ DEV_MESSAGE (KERN_DEBUG, device,
+ "default ERP called (%i retries left)",
+ cqr->retries);
+ cqr->lpm = LPM_ANYPATH;
+ cqr->status = DASD_CQR_QUEUED;
+ } else {
+ DEV_MESSAGE (KERN_WARNING, device, "%s",
+ "default ERP called (NO retry left)");
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock ();
+ }
+ return cqr;
+} /* end dasd_default_erp_action */
+
+/*
+ * DESCRIPTION
+ * Frees all ERPs of the current ERP Chain and set the status
+ * of the original CQR either to DASD_CQR_DONE if ERP was successful
+ * or to DASD_CQR_FAILED if ERP was NOT successful.
+ * NOTE: This function is only called if no discipline postaction
+ * is available
+ *
+ * PARAMETER
+ * erp current erp_head
+ *
+ * RETURN VALUES
+ * cqr pointer to the original CQR
+ */
+struct dasd_ccw_req *
+dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+ int success;
+
+ if (cqr->refers == NULL || cqr->function == NULL)
+ BUG();
+
+ device = cqr->device;
+ success = cqr->status == DASD_CQR_DONE;
+
+ /* free all ERPs - but NOT the original cqr */
+ while (cqr->refers != NULL) {
+ struct dasd_ccw_req *refers;
+
+ refers = cqr->refers;
+ /* remove the request from the device queue */
+ list_del(&cqr->list);
+ /* free the finished erp request */
+ dasd_free_erp_request(cqr, device);
+ cqr = refers;
+ }
+
+ /* set corresponding status to original cqr */
+ if (success)
+ cqr->status = DASD_CQR_DONE;
+ else {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_clock();
+ }
+
+ return cqr;
+
+} /* end default_erp_postaction */
+
+/*
+ * Print the hex dump of the memory used by a request. This includes
+ * all error recovery ccws that have been chained in from of the
+ * real request.
+ */
+static inline void
+hex_dump_memory(struct dasd_device *device, void *data, int len)
+{
+ int *pint;
+
+ pint = (int *) data;
+ while (len > 0) {
+ DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
+ pint, pint[0], pint[1], pint[2], pint[3]);
+ pint += 4;
+ len -= 16;
+ }
+}
+
+void
+dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = cqr->device;
+ /* dump sense data */
+ if (device->discipline && device->discipline->dump_sense)
+ device->discipline->dump_sense(device, cqr, irb);
+}
+
+void
+dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *lcqr;
+ struct ccw1 *ccw;
+ int cplength;
+
+ device = cqr->device;
+ /* log the channel program */
+ for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
+ DEV_MESSAGE(KERN_ERR, device,
+ "(%s) ERP chain report for req: %p",
+ caller == 0 ? "EXAMINE" : "ACTION", lcqr);
+ hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
+
+ cplength = 1;
+ ccw = lcqr->cpaddr;
+ while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
+ cplength++;
+
+ if (cplength > 40) { /* log only parts of the CP */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Start of channel program:");
+ hex_dump_memory(device, lcqr->cpaddr,
+ 40*sizeof(struct ccw1));
+
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "End of channel program:");
+ hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
+ 10*sizeof(struct ccw1));
+ } else { /* log the whole CP */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "Channel program (complete):");
+ hex_dump_memory(device, lcqr->cpaddr,
+ cplength*sizeof(struct ccw1));
+ }
+
+ if (lcqr != cqr)
+ continue;
+
+ /*
+ * Log bytes arround failed CCW but only if we did
+ * not log the whole CP of the CCW is outside the
+ * logged CP.
+ */
+ if (cplength > 40 ||
+ ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
+ (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
+
+ DEV_MESSAGE(KERN_ERR, device,
+ "Failed CCW (%p) (area):",
+ (void *) (long) cpa);
+ hex_dump_memory(device, cqr->cpaddr - 10,
+ 20*sizeof(struct ccw1));
+ }
+ }
+
+} /* end log_erp_chain */
+
+EXPORT_SYMBOL(dasd_default_erp_action);
+EXPORT_SYMBOL(dasd_default_erp_postaction);
+EXPORT_SYMBOL(dasd_alloc_erp_request);
+EXPORT_SYMBOL(dasd_free_erp_request);
+EXPORT_SYMBOL(dasd_log_sense);
+EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
new file mode 100644
index 000000000000..7963ae343eef
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,607 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_fba.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.39 $
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <asm/debug.h>
+
+#include <linux/slab.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/todclk.h>
+#include <asm/ccwdev.h>
+
+#include "dasd_int.h"
+#include "dasd_fba.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(fba):"
+
+#define DASD_FBA_CCW_WRITE 0x41
+#define DASD_FBA_CCW_READ 0x42
+#define DASD_FBA_CCW_LOCATE 0x43
+#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_fba_discipline;
+
+struct dasd_fba_private {
+ struct dasd_fba_characteristics rdc_data;
+};
+
+static struct ccw_device_id dasd_fba_ids[] = {
+ { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), driver_info: 0x1},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), driver_info: 0x2},
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
+
+static struct ccw_driver dasd_fba_driver; /* see below */
+static int
+dasd_fba_probe(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = dasd_generic_probe (cdev, &dasd_fba_discipline);
+ if (ret)
+ return ret;
+ ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
+ return 0;
+}
+
+static int
+dasd_fba_set_online(struct ccw_device *cdev)
+{
+ return dasd_generic_set_online (cdev, &dasd_fba_discipline);
+}
+
+static struct ccw_driver dasd_fba_driver = {
+ .name = "dasd-fba",
+ .owner = THIS_MODULE,
+ .ids = dasd_fba_ids,
+ .probe = dasd_fba_probe,
+ .remove = dasd_generic_remove,
+ .set_offline = dasd_generic_set_offline,
+ .set_online = dasd_fba_set_online,
+ .notify = dasd_generic_notify,
+};
+
+static inline void
+define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
+ int blksize, int beg, int nr)
+{
+ ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32) __pa(data);
+ memset(data, 0, sizeof (struct DE_fba_data));
+ if (rw == WRITE)
+ (data->mask).perm = 0x0;
+ else if (rw == READ)
+ (data->mask).perm = 0x1;
+ else
+ data->mask.perm = 0x2;
+ data->blk_size = blksize;
+ data->ext_loc = beg;
+ data->ext_end = nr - 1;
+}
+
+static inline void
+locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
+ int block_nr, int block_ct)
+{
+ ccw->cmd_code = DASD_FBA_CCW_LOCATE;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32) __pa(data);
+ memset(data, 0, sizeof (struct LO_fba_data));
+ if (rw == WRITE)
+ data->operation.cmd = 0x5;
+ else if (rw == READ)
+ data->operation.cmd = 0x6;
+ else
+ data->operation.cmd = 0x8;
+ data->blk_nr = block_nr;
+ data->blk_ct = block_ct;
+}
+
+static int
+dasd_fba_check_characteristics(struct dasd_device *device)
+{
+ struct dasd_fba_private *private;
+ struct ccw_device *cdev = device->cdev;
+ void *rdc_data;
+ int rc;
+
+ private = (struct dasd_fba_private *) device->private;
+ if (private == NULL) {
+ private = kmalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
+ if (private == NULL) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "memory allocation failed for private "
+ "data");
+ return -ENOMEM;
+ }
+ device->private = (void *) private;
+ }
+ /* Read Device Characteristics */
+ rdc_data = (void *) &(private->rdc_data);
+ rc = read_dev_chars(device->cdev, &rdc_data, 32);
+ if (rc) {
+ DEV_MESSAGE(KERN_WARNING, device,
+ "Read device characteristics returned error %d",
+ rc);
+ return rc;
+ }
+
+ DEV_MESSAGE(KERN_INFO, device,
+ "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)",
+ cdev->id.dev_type,
+ cdev->id.dev_model,
+ cdev->id.cu_type,
+ cdev->id.cu_model,
+ ((private->rdc_data.blk_bdsa *
+ (private->rdc_data.blk_size >> 9)) >> 11),
+ private->rdc_data.blk_size);
+ return 0;
+}
+
+static int
+dasd_fba_do_analysis(struct dasd_device *device)
+{
+ struct dasd_fba_private *private;
+ int sb, rc;
+
+ private = (struct dasd_fba_private *) device->private;
+ rc = dasd_check_blocksize(private->rdc_data.blk_size);
+ if (rc) {
+ DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d",
+ private->rdc_data.blk_size);
+ return rc;
+ }
+ device->blocks = private->rdc_data.blk_bdsa;
+ device->bp_block = private->rdc_data.blk_size;
+ device->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
+ device->s2b_shift++;
+ return 0;
+}
+
+static int
+dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
+{
+ if (dasd_check_blocksize(device->bp_block) != 0)
+ return -EINVAL;
+ geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
+ geo->heads = 16;
+ geo->sectors = 128 >> device->s2b_shift;
+ return 0;
+}
+
+static dasd_era_t
+dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
+{
+ struct dasd_device *device;
+ struct ccw_device *cdev;
+
+ device = (struct dasd_device *) cqr->device;
+ if (irb->scsw.cstat == 0x00 &&
+ irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+ cdev = device->cdev;
+ switch (cdev->id.dev_type) {
+ case 0x3370:
+ return dasd_3370_erp_examine(cqr, irb);
+ case 0x9336:
+ return dasd_9336_erp_examine(cqr, irb);
+ default:
+ return dasd_era_recover;
+ }
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_action(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ if (cqr->function == dasd_default_erp_action)
+ return dasd_default_erp_postaction;
+
+ DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p",
+ cqr->function);
+ return NULL;
+}
+
+static struct dasd_ccw_req *
+dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+{
+ struct dasd_fba_private *private;
+ unsigned long *idaws;
+ struct LO_fba_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct bio *bio;
+ struct bio_vec *bv;
+ char *dst;
+ int count, cidaw, cplength, datasize;
+ sector_t recid, first_rec, last_rec;
+ unsigned int blksize, off;
+ unsigned char cmd;
+ int i;
+
+ private = (struct dasd_fba_private *) device->private;
+ if (rq_data_dir(req) == READ) {
+ cmd = DASD_FBA_CCW_READ;
+ } else if (rq_data_dir(req) == WRITE) {
+ cmd = DASD_FBA_CCW_WRITE;
+ } else
+ return ERR_PTR(-EINVAL);
+ blksize = device->bp_block;
+ /* Calculate record id of first and last block. */
+ first_rec = req->sector >> device->s2b_shift;
+ last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ cidaw = 0;
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
+#if defined(CONFIG_ARCH_S390X)
+ if (idal_is_needed (page_address(bv->bv_page),
+ bv->bv_len))
+ cidaw += bv->bv_len / blksize;
+#endif
+ }
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+ /* 1x define extent + 1x locate record + number of blocks */
+ cplength = 2 + count;
+ /* 1x define extent + 1x locate record */
+ datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
+ cidaw * sizeof(unsigned long);
+ /*
+ * Find out number of additional locate record ccws if the device
+ * can't do data chaining.
+ */
+ if (private->rdc_data.mode.bits.data_chain == 0) {
+ cplength += count - 1;
+ datasize += (count - 1)*sizeof(struct LO_fba_data);
+ }
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(dasd_fba_discipline.name,
+ cplength, datasize, device);
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* First ccw is define extent. */
+ define_extent(ccw++, cqr->data, rq_data_dir(req),
+ device->bp_block, req->sector, req->nr_sectors);
+ /* Build locate_record + read/write ccws. */
+ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ LO_data = (struct LO_fba_data *) (idaws + cidaw);
+ /* Locate record for all blocks for smart devices. */
+ if (private->rdc_data.mode.bits.data_chain != 0) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
+ }
+ recid = first_rec;
+ rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ if (dasd_page_cache) {
+ char *copy = kmem_cache_alloc(dasd_page_cache,
+ SLAB_DMA | __GFP_NOWARN);
+ if (copy && rq_data_dir(req) == WRITE)
+ memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ if (copy)
+ dst = copy + bv->bv_offset;
+ }
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ /* Locate record for stupid devices. */
+ if (private->rdc_data.mode.bits.data_chain == 0) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw, LO_data++,
+ rq_data_dir(req),
+ recid - first_rec, 1);
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ } else {
+ if (recid > first_rec)
+ ccw[-1].flags |= CCW_FLAG_DC;
+ else
+ ccw[-1].flags |= CCW_FLAG_CC;
+ }
+ ccw->cmd_code = cmd;
+ ccw->count = device->bp_block;
+ if (idal_is_needed(dst, blksize)) {
+ ccw->cda = (__u32)(addr_t) idaws;
+ ccw->flags = CCW_FLAG_IDA;
+ idaws = idal_create_words(idaws, dst, blksize);
+ } else {
+ ccw->cda = (__u32)(addr_t) dst;
+ ccw->flags = 0;
+ }
+ ccw++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ cqr->device = device;
+ cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static int
+dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ struct dasd_fba_private *private;
+ struct ccw1 *ccw;
+ struct bio *bio;
+ struct bio_vec *bv;
+ char *dst, *cda;
+ unsigned int blksize, off;
+ int i, status;
+
+ if (!dasd_page_cache)
+ goto out;
+ private = (struct dasd_fba_private *) cqr->device->private;
+ blksize = cqr->device->bp_block;
+ ccw = cqr->cpaddr;
+ /* Skip over define extent & locate record. */
+ ccw++;
+ if (private->rdc_data.mode.bits.data_chain != 0)
+ ccw++;
+ rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ /* Skip locate record. */
+ if (private->rdc_data.mode.bits.data_chain == 0)
+ ccw++;
+ if (dst) {
+ if (ccw->flags & CCW_FLAG_IDA)
+ cda = *((char **)((addr_t) ccw->cda));
+ else
+ cda = (char *)((addr_t) ccw->cda);
+ if (dst != cda) {
+ if (rq_data_dir(req) == READ)
+ memcpy(dst, cda, bv->bv_len);
+ kmem_cache_free(dasd_page_cache,
+ (void *)((addr_t)cda & PAGE_MASK));
+ }
+ dst = NULL;
+ }
+ ccw++;
+ }
+ }
+out:
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->device);
+ return status;
+}
+
+static int
+dasd_fba_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ info->label_block = 1;
+ info->FBA_layout = 1;
+ info->format = DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof(struct dasd_fba_characteristics);
+ memcpy(info->characteristics,
+ &((struct dasd_fba_private *) device->private)->rdc_data,
+ sizeof (struct dasd_fba_characteristics));
+ info->confdata_size = 0;
+ return 0;
+}
+
+static void
+dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ struct irb *irb)
+{
+ char *page;
+ struct ccw1 *act, *end, *last;
+ int len, sl, sct, count;
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DEV_MESSAGE(KERN_ERR, device, " %s",
+ "No memory to dump sense data");
+ return;
+ }
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ device->cdev->dev.bus_id);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
+ irb->scsw.cstat, irb->scsw.dstat);
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " device %s: Failing CCW: %p\n",
+ device->cdev->dev.bus_id,
+ (void *) (addr_t) irb->scsw.cpa);
+ if (irb->esw.esw0.erw.cons) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ irb->ecw[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+ } else {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+
+ /* dump the Channel Program */
+ /* print first CCWs (maximum 8) */
+ act = req->cpaddr;
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ end = min(act + 8, last);
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
+ " Related CP in req: %p\n", req);
+ while (act <= end) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+
+
+ /* print failing CCW area */
+ len = 0;
+ if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
+ act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ }
+ end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
+ while (act <= end) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+
+ /* print last CCWs */
+ if (act < last - 2) {
+ act = last - 2;
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ }
+ while (act <= last) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ if (len > 0)
+ MESSAGE_LOG(KERN_ERR, "%s",
+ page + sizeof(KERN_ERR PRINTK_HEADER));
+ free_page((unsigned long) page);
+}
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and a
+ * locate record ccw for each block (stupid devices!) + 16 bytes of data.
+ * That makes:
+ * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 100.1 blocks
+ * for one request. Give a little safety and the result is 96.
+ */
+static struct dasd_discipline dasd_fba_discipline = {
+ .owner = THIS_MODULE,
+ .name = "FBA ",
+ .ebcname = "FBA ",
+ .max_blocks = 96,
+ .check_device = dasd_fba_check_characteristics,
+ .do_analysis = dasd_fba_do_analysis,
+ .fill_geometry = dasd_fba_fill_geometry,
+ .start_IO = dasd_start_IO,
+ .term_IO = dasd_term_IO,
+ .examine_error = dasd_fba_examine_error,
+ .erp_action = dasd_fba_erp_action,
+ .erp_postaction = dasd_fba_erp_postaction,
+ .build_cp = dasd_fba_build_cp,
+ .free_cp = dasd_fba_free_cp,
+ .dump_sense = dasd_fba_dump_sense,
+ .fill_info = dasd_fba_fill_info,
+};
+
+static int __init
+dasd_fba_init(void)
+{
+ int ret;
+
+ ASCEBC(dasd_fba_discipline.ebcname, 4);
+
+ ret = ccw_driver_register(&dasd_fba_driver);
+ if (ret)
+ return ret;
+
+ dasd_generic_auto_online(&dasd_fba_driver);
+ return 0;
+}
+
+static void __exit
+dasd_fba_cleanup(void)
+{
+ ccw_driver_unregister(&dasd_fba_driver);
+}
+
+module_init(dasd_fba_init);
+module_exit(dasd_fba_cleanup);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
new file mode 100644
index 000000000000..624f0402ee22
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,73 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_fba.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.6 $
+ */
+
+#ifndef DASD_FBA_H
+#define DASD_FBA_H
+
+struct DE_fba_data {
+ struct {
+ unsigned char perm:2; /* Permissions on this extent */
+ unsigned char zero:2; /* Must be zero */
+ unsigned char da:1; /* usually zero */
+ unsigned char diag:1; /* allow diagnose */
+ unsigned char zero2:2; /* zero */
+ } __attribute__ ((packed)) mask;
+ __u8 zero; /* Must be zero */
+ __u16 blk_size; /* Blocksize */
+ __u32 ext_loc; /* Extent locator */
+ __u32 ext_beg; /* logical number of block 0 in extent */
+ __u32 ext_end; /* logocal number of last block in extent */
+} __attribute__ ((packed));
+
+struct LO_fba_data {
+ struct {
+ unsigned char zero:4;
+ unsigned char cmd:4;
+ } __attribute__ ((packed)) operation;
+ __u8 auxiliary;
+ __u16 blk_ct;
+ __u32 blk_nr;
+} __attribute__ ((packed));
+
+struct dasd_fba_characteristics {
+ union {
+ __u8 c;
+ struct {
+ unsigned char reserved:1;
+ unsigned char overrunnable:1;
+ unsigned char burst_byte:1;
+ unsigned char data_chain:1;
+ unsigned char zeros:4;
+ } __attribute__ ((packed)) bits;
+ } __attribute__ ((packed)) mode;
+ union {
+ __u8 c;
+ struct {
+ unsigned char zero0:1;
+ unsigned char removable:1;
+ unsigned char shared:1;
+ unsigned char zero1:1;
+ unsigned char mam:1;
+ unsigned char zeros:3;
+ } __attribute__ ((packed)) bits;
+ } __attribute__ ((packed)) features;
+ __u8 dev_class;
+ __u8 unit_type;
+ __u16 blk_size;
+ __u32 blk_per_cycl;
+ __u32 blk_per_bound;
+ __u32 blk_bdsa;
+ __u32 reserved0;
+ __u16 reserved1;
+ __u16 blk_ce;
+ __u32 reserved2;
+ __u16 reserved3;
+} __attribute__ ((packed));
+
+#endif /* DASD_FBA_H */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 000000000000..1d52db406b2e
--- /dev/null
+++ b/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,185 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_genhd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * gendisk related functions for the dasd driver.
+ *
+ * $Revision: 1.48 $
+ */
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_gendisk:"
+
+#include "dasd_int.h"
+
+/*
+ * Allocate and register gendisk structure for device.
+ */
+int
+dasd_gendisk_alloc(struct dasd_device *device)
+{
+ struct gendisk *gdp;
+ int len;
+
+ /* Make sure the minor for this device exists. */
+ if (device->devindex >= DASD_PER_MAJOR)
+ return -EBUSY;
+
+ gdp = alloc_disk(1 << DASD_PARTN_BITS);
+ if (!gdp)
+ return -ENOMEM;
+
+ /* Initialize gendisk structure. */
+ gdp->major = DASD_MAJOR;
+ gdp->first_minor = device->devindex << DASD_PARTN_BITS;
+ gdp->fops = &dasd_device_operations;
+ gdp->driverfs_dev = &device->cdev->dev;
+
+ /*
+ * Set device name.
+ * dasda - dasdz : 26 devices
+ * dasdaa - dasdzz : 676 devices, added up = 702
+ * dasdaaa - dasdzzz : 17576 devices, added up = 18278
+ * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
+ */
+ len = sprintf(gdp->disk_name, "dasd");
+ if (device->devindex > 25) {
+ if (device->devindex > 701) {
+ if (device->devindex > 18277)
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((device->devindex-18278)
+ /17576)%26));
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((device->devindex-702)/676)%26));
+ }
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((device->devindex-26)/26)%26));
+ }
+ len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26));
+
+ sprintf(gdp->devfs_name, "dasd/%s", device->cdev->dev.bus_id);
+
+ if (test_bit(DASD_FLAG_RO, &device->flags))
+ set_disk_ro(gdp, 1);
+ gdp->private_data = device;
+ gdp->queue = device->request_queue;
+ device->gdp = gdp;
+ set_capacity(device->gdp, 0);
+ add_disk(device->gdp);
+ return 0;
+}
+
+/*
+ * Unregister and free gendisk structure for device.
+ */
+void
+dasd_gendisk_free(struct dasd_device *device)
+{
+ del_gendisk(device->gdp);
+ device->gdp->queue = 0;
+ put_disk(device->gdp);
+ device->gdp = 0;
+}
+
+/*
+ * Trigger a partition detection.
+ */
+int
+dasd_scan_partitions(struct dasd_device * device)
+{
+ struct block_device *bdev;
+
+ /* Make the disk known. */
+ set_capacity(device->gdp, device->blocks << device->s2b_shift);
+ bdev = bdget_disk(device->gdp, 0);
+ if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
+ return -ENODEV;
+ /*
+ * See fs/partition/check.c:register_disk,rescan_partitions
+ * Can't call rescan_partitions directly. Use ioctl.
+ */
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
+ /*
+ * Since the matching blkdev_put call to the blkdev_get in
+ * this function is not called before dasd_destroy_partitions
+ * the offline open_count limit needs to be increased from
+ * 0 to 1. This is done by setting device->bdev (see
+ * dasd_generic_set_offline). As long as the partition
+ * detection is running no offline should be allowed. That
+ * is why the assignment to device->bdev is done AFTER
+ * the BLKRRPART ioctl.
+ */
+ device->bdev = bdev;
+ return 0;
+}
+
+/*
+ * Remove all inodes in the system for a device, delete the
+ * partitions and make device unusable by setting its size to zero.
+ */
+void
+dasd_destroy_partitions(struct dasd_device * device)
+{
+ /* The two structs have 168/176 byte on 31/64 bit. */
+ struct blkpg_partition bpart;
+ struct blkpg_ioctl_arg barg;
+ struct block_device *bdev;
+
+ /*
+ * Get the bdev pointer from the device structure and clear
+ * device->bdev to lower the offline open_count limit again.
+ */
+ bdev = device->bdev;
+ device->bdev = 0;
+
+ /*
+ * See fs/partition/check.c:delete_partition
+ * Can't call delete_partitions directly. Use ioctl.
+ * The ioctl also does locking and invalidation.
+ */
+ memset(&bpart, 0, sizeof(struct blkpg_partition));
+ memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
+ barg.data = &bpart;
+ barg.op = BLKPG_DEL_PARTITION;
+ for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
+ ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
+
+ invalidate_partition(device->gdp, 0);
+ /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
+ blkdev_put(bdev);
+ set_capacity(device->gdp, 0);
+}
+
+int
+dasd_gendisk_init(void)
+{
+ int rc;
+
+ /* Register to static dasd major 94 */
+ rc = register_blkdev(DASD_MAJOR, "dasd");
+ if (rc != 0) {
+ MESSAGE(KERN_WARNING,
+ "Couldn't register successfully to "
+ "major no %d", DASD_MAJOR);
+ return rc;
+ }
+ return 0;
+}
+
+void
+dasd_gendisk_exit(void)
+{
+ unregister_blkdev(DASD_MAJOR, "dasd");
+}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
new file mode 100644
index 000000000000..4586e0ecc526
--- /dev/null
+++ b/drivers/s390/block/dasd_int.h
@@ -0,0 +1,576 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_int.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ * $Revision: 1.63 $
+ */
+
+#ifndef DASD_INT_H
+#define DASD_INT_H
+
+#ifdef __KERNEL__
+
+/* erp debugging in dasd.c and dasd_3990_erp.c */
+#define ERP_DEBUG
+
+
+/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
+#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
+#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
+
+/*
+ * States a dasd device can have:
+ * new: the dasd_device structure is allocated.
+ * known: the discipline for the device is identified.
+ * basic: the device can do basic i/o.
+ * accept: the device is analysed (format is known).
+ * ready: partition detection is done and the device is can do block io.
+ * online: the device accepts requests from the block device queue.
+ *
+ * Things to do for startup state transitions:
+ * new -> known: find discipline for the device and create devfs entries.
+ * known -> basic: request irq line for the device.
+ * basic -> ready: do the initial analysis, e.g. format detection,
+ * do block device setup and detect partitions.
+ * ready -> online: schedule the device tasklet.
+ * Things to do for shutdown state transitions:
+ * online -> ready: just set the new device state.
+ * ready -> basic: flush requests from the block device layer, clear
+ * partition information and reset format information.
+ * basic -> known: terminate all requests and free irq.
+ * known -> new: remove devfs entries and forget discipline.
+ */
+
+#define DASD_STATE_NEW 0
+#define DASD_STATE_KNOWN 1
+#define DASD_STATE_BASIC 2
+#define DASD_STATE_READY 3
+#define DASD_STATE_ONLINE 4
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/interrupt.h>
+#include <asm/ccwdev.h>
+#include <linux/workqueue.h>
+#include <asm/debug.h>
+#include <asm/dasd.h>
+#include <asm/idals.h>
+
+/*
+ * SECTION: Type definitions
+ */
+struct dasd_device;
+
+typedef int (*dasd_ioctl_fn_t) (struct block_device *bdev, int no, long args);
+
+struct dasd_ioctl {
+ struct list_head list;
+ struct module *owner;
+ int no;
+ dasd_ioctl_fn_t handler;
+};
+
+typedef enum {
+ dasd_era_fatal = -1, /* no chance to recover */
+ dasd_era_none = 0, /* don't recover, everything alright */
+ dasd_era_msg = 1, /* don't recover, just report... */
+ dasd_era_recover = 2 /* recovery action recommended */
+} dasd_era_t;
+
+/* BIT DEFINITIONS FOR SENSE DATA */
+#define DASD_SENSE_BIT_0 0x80
+#define DASD_SENSE_BIT_1 0x40
+#define DASD_SENSE_BIT_2 0x20
+#define DASD_SENSE_BIT_3 0x10
+
+/*
+ * SECTION: MACROs for klogd and s390 debug feature (dbf)
+ */
+#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
+do { \
+ debug_sprintf_event(d_device->debug_area, \
+ d_level, \
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
+do { \
+ debug_sprintf_exception(d_device->debug_area, \
+ d_level, \
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+#define DBF_EVENT(d_level, d_str, d_data...)\
+do { \
+ debug_sprintf_event(dasd_debug_area, \
+ d_level,\
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+#define DBF_EXC(d_level, d_str, d_data...)\
+do { \
+ debug_sprintf_exception(dasd_debug_area, \
+ d_level,\
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+/* definition of dbf debug levels */
+#define DBF_EMERG 0 /* system is unusable */
+#define DBF_ALERT 1 /* action must be taken immediately */
+#define DBF_CRIT 2 /* critical conditions */
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARNING 4 /* warning conditions */
+#define DBF_NOTICE 5 /* normal but significant condition */
+#define DBF_INFO 6 /* informational */
+#define DBF_DEBUG 6 /* debug-level messages */
+
+/* messages to be written via klogd and dbf */
+#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+ d_device->cdev->dev.bus_id, d_args); \
+ DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
+} while(0)
+
+#define MESSAGE(d_loglevel,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+ DBF_EVENT(DBF_ALERT, d_string, d_args); \
+} while(0)
+
+/* messages to be written via klogd only */
+#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+ d_device->cdev->dev.bus_id, d_args); \
+} while(0)
+
+#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+} while(0)
+
+struct dasd_ccw_req {
+ unsigned int magic; /* Eye catcher */
+ struct list_head list; /* list_head for request queueing. */
+
+ /* Where to execute what... */
+ struct dasd_device *device; /* device the request is for */
+ struct ccw1 *cpaddr; /* address of channel program */
+ char status; /* status of this request */
+ short retries; /* A retry counter */
+ unsigned long flags; /* flags of this request */
+
+ /* ... and how */
+ unsigned long starttime; /* jiffies time of request start */
+ int expires; /* expiration period in jiffies */
+ char lpm; /* logical path mask */
+ void *data; /* pointer to data area */
+
+ /* these are important for recovering erroneous requests */
+ struct irb irb; /* device status in case of an error */
+ struct dasd_ccw_req *refers; /* ERP-chain queueing. */
+ void *function; /* originating ERP action */
+
+ /* these are for statistics only */
+ unsigned long long buildclk; /* TOD-clock of request generation */
+ unsigned long long startclk; /* TOD-clock of request start */
+ unsigned long long stopclk; /* TOD-clock of request interrupt */
+ unsigned long long endclk; /* TOD-clock of request termination */
+
+ /* Callback that is called after reaching final status. */
+ void (*callback)(struct dasd_ccw_req *, void *data);
+ void *callback_data;
+};
+
+/*
+ * dasd_ccw_req -> status can be:
+ */
+#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
+#define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */
+#define DASD_CQR_IN_IO 0x02 /* request is currently in IO */
+#define DASD_CQR_DONE 0x03 /* request is completed successfully */
+#define DASD_CQR_ERROR 0x04 /* request is completed with error */
+#define DASD_CQR_FAILED 0x05 /* request is finally failed */
+#define DASD_CQR_CLEAR 0x06 /* request is clear pending */
+
+/* per dasd_ccw_req flags */
+#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
+
+/* Signature for error recovery functions. */
+typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
+
+/*
+ * the struct dasd_discipline is
+ * sth like a table of virtual functions, if you think of dasd_eckd
+ * inheriting dasd...
+ * no, currently we are not planning to reimplement the driver in C++
+ */
+struct dasd_discipline {
+ struct module *owner;
+ char ebcname[8]; /* a name used for tagging and printks */
+ char name[8]; /* a name used for tagging and printks */
+ int max_blocks; /* maximum number of blocks to be chained */
+
+ struct list_head list; /* used for list of disciplines */
+
+ /*
+ * Device recognition functions. check_device is used to verify
+ * the sense data and the information returned by read device
+ * characteristics. It returns 0 if the discipline can be used
+ * for the device in question.
+ * do_analysis is used in the step from device state "basic" to
+ * state "accept". It returns 0 if the device can be made ready,
+ * it returns -EMEDIUMTYPE if the device can't be made ready or
+ * -EAGAIN if do_analysis started a ccw that needs to complete
+ * before the analysis may be repeated.
+ */
+ int (*check_device)(struct dasd_device *);
+ int (*do_analysis) (struct dasd_device *);
+
+ /*
+ * Device operation functions. build_cp creates a ccw chain for
+ * a block device request, start_io starts the request and
+ * term_IO cancels it (e.g. in case of a timeout). format_device
+ * returns a ccw chain to be used to format the device.
+ */
+ struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
+ struct request *);
+ int (*start_IO) (struct dasd_ccw_req *);
+ int (*term_IO) (struct dasd_ccw_req *);
+ struct dasd_ccw_req *(*format_device) (struct dasd_device *,
+ struct format_data_t *);
+ int (*free_cp) (struct dasd_ccw_req *, struct request *);
+ /*
+ * Error recovery functions. examine_error() returns a value that
+ * indicates what to do for an error condition. If examine_error()
+ * returns 'dasd_era_recover' erp_action() is called to create a
+ * special error recovery ccw. erp_postaction() is called after
+ * an error recovery ccw has finished its execution. dump_sense
+ * is called for every error condition to print the sense data
+ * to the console.
+ */
+ dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
+ dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
+ dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
+ void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
+ struct irb *);
+
+ /* i/o control functions. */
+ int (*fill_geometry) (struct dasd_device *, struct hd_geometry *);
+ int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
+};
+
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
+struct dasd_device {
+ /* Block device stuff. */
+ struct gendisk *gdp;
+ request_queue_t *request_queue;
+ spinlock_t request_queue_lock;
+ struct block_device *bdev;
+ unsigned int devindex;
+ unsigned long blocks; /* size of volume in blocks */
+ unsigned int bp_block; /* bytes per block */
+ unsigned int s2b_shift; /* log2 (bp_block/512) */
+ unsigned long flags; /* per device flags */
+
+ /* Device discipline stuff. */
+ struct dasd_discipline *discipline;
+ char *private;
+
+ /* Device state and target state. */
+ int state, target;
+ int stopped; /* device (ccw_device_start) was stopped */
+
+ /* Open and reference count. */
+ atomic_t ref_count;
+ atomic_t open_count;
+
+ /* ccw queue and memory for static ccw/erp buffers. */
+ struct list_head ccw_queue;
+ spinlock_t mem_lock;
+ void *ccw_mem;
+ void *erp_mem;
+ struct list_head ccw_chunks;
+ struct list_head erp_chunks;
+
+ atomic_t tasklet_scheduled;
+ struct tasklet_struct tasklet;
+ struct work_struct kick_work;
+ struct timer_list timer;
+
+ debug_info_t *debug_area;
+
+ struct ccw_device *cdev;
+
+#ifdef CONFIG_DASD_PROFILE
+ struct dasd_profile_info_t profile;
+#endif
+};
+
+/* reasons why device (ccw_device_start) was stopped */
+#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
+#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
+#define DASD_STOPPED_PENDING 4 /* long busy */
+#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
+#define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */
+
+/* per device flags */
+#define DASD_FLAG_RO 0 /* device is read-only */
+#define DASD_FLAG_USE_DIAG 1 /* use diag disciplnie */
+#define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */
+#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
+
+void dasd_put_device_wake(struct dasd_device *);
+
+/*
+ * Reference count inliners
+ */
+static inline void
+dasd_get_device(struct dasd_device *device)
+{
+ atomic_inc(&device->ref_count);
+}
+
+static inline void
+dasd_put_device(struct dasd_device *device)
+{
+ if (atomic_dec_return(&device->ref_count) == 0)
+ dasd_put_device_wake(device);
+}
+
+/*
+ * The static memory in ccw_mem and erp_mem is managed by a sorted
+ * list of free memory chunks.
+ */
+struct dasd_mchunk
+{
+ struct list_head list;
+ unsigned long size;
+} __attribute__ ((aligned(8)));
+
+static inline void
+dasd_init_chunklist(struct list_head *chunk_list, void *mem,
+ unsigned long size)
+{
+ struct dasd_mchunk *chunk;
+
+ INIT_LIST_HEAD(chunk_list);
+ chunk = (struct dasd_mchunk *) mem;
+ chunk->size = size - sizeof(struct dasd_mchunk);
+ list_add(&chunk->list, chunk_list);
+}
+
+static inline void *
+dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
+{
+ struct dasd_mchunk *chunk, *tmp;
+
+ size = (size + 7L) & -8L;
+ list_for_each_entry(chunk, chunk_list, list) {
+ if (chunk->size < size)
+ continue;
+ if (chunk->size > size + sizeof(struct dasd_mchunk)) {
+ char *endaddr = (char *) (chunk + 1) + chunk->size;
+ tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
+ tmp->size = size;
+ chunk->size -= size + sizeof(struct dasd_mchunk);
+ chunk = tmp;
+ } else
+ list_del(&chunk->list);
+ return (void *) (chunk + 1);
+ }
+ return NULL;
+}
+
+static inline void
+dasd_free_chunk(struct list_head *chunk_list, void *mem)
+{
+ struct dasd_mchunk *chunk, *tmp;
+ struct list_head *p, *left;
+
+ chunk = (struct dasd_mchunk *)
+ ((char *) mem - sizeof(struct dasd_mchunk));
+ /* Find out the left neighbour in chunk_list. */
+ left = chunk_list;
+ list_for_each(p, chunk_list) {
+ if (list_entry(p, struct dasd_mchunk, list) > chunk)
+ break;
+ left = p;
+ }
+ /* Try to merge with right neighbour = next element from left. */
+ if (left->next != chunk_list) {
+ tmp = list_entry(left->next, struct dasd_mchunk, list);
+ if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
+ list_del(&tmp->list);
+ chunk->size += tmp->size + sizeof(struct dasd_mchunk);
+ }
+ }
+ /* Try to merge with left neighbour. */
+ if (left != chunk_list) {
+ tmp = list_entry(left, struct dasd_mchunk, list);
+ if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
+ tmp->size += chunk->size + sizeof(struct dasd_mchunk);
+ return;
+ }
+ }
+ __list_add(&chunk->list, left, left->next);
+}
+
+/*
+ * Check if bsize is in { 512, 1024, 2048, 4096 }
+ */
+static inline int
+dasd_check_blocksize(int bsize)
+{
+ if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0)
+ return -EMEDIUMTYPE;
+ return 0;
+}
+
+/* externals in dasd.c */
+#define DASD_PROFILE_ON 1
+#define DASD_PROFILE_OFF 0
+
+extern debug_info_t *dasd_debug_area;
+extern struct dasd_profile_info_t dasd_global_profile;
+extern unsigned int dasd_profile_level;
+extern struct block_device_operations dasd_device_operations;
+
+extern kmem_cache_t *dasd_page_cache;
+
+struct dasd_ccw_req *
+dasd_kmalloc_request(char *, int, int, struct dasd_device *);
+struct dasd_ccw_req *
+dasd_smalloc_request(char *, int, int, struct dasd_device *);
+void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+
+static inline int
+dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
+{
+ return set_normalized_cda(ccw, cda);
+}
+
+struct dasd_device *dasd_alloc_device(void);
+void dasd_free_device(struct dasd_device *);
+
+void dasd_enable_device(struct dasd_device *);
+void dasd_set_target_state(struct dasd_device *, int);
+void dasd_kick_device(struct dasd_device *);
+
+void dasd_add_request_head(struct dasd_ccw_req *);
+void dasd_add_request_tail(struct dasd_ccw_req *);
+int dasd_start_IO(struct dasd_ccw_req *);
+int dasd_term_IO(struct dasd_ccw_req *);
+void dasd_schedule_bh(struct dasd_device *);
+int dasd_sleep_on(struct dasd_ccw_req *);
+int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
+void dasd_set_timer(struct dasd_device *, int);
+void dasd_clear_timer(struct dasd_device *);
+int dasd_cancel_req(struct dasd_ccw_req *);
+int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+void dasd_generic_remove (struct ccw_device *cdev);
+int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
+int dasd_generic_set_offline (struct ccw_device *cdev);
+int dasd_generic_notify(struct ccw_device *, int);
+void dasd_generic_auto_online (struct ccw_driver *);
+
+/* externals in dasd_devmap.c */
+extern int dasd_max_devindex;
+extern int dasd_probeonly;
+extern int dasd_autodetect;
+
+int dasd_devmap_init(void);
+void dasd_devmap_exit(void);
+
+struct dasd_device *dasd_create_device(struct ccw_device *);
+void dasd_delete_device(struct dasd_device *);
+
+int dasd_add_sysfs_files(struct ccw_device *);
+void dasd_remove_sysfs_files(struct ccw_device *);
+
+struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
+struct dasd_device *dasd_device_from_devindex(int);
+
+int dasd_parse(void);
+int dasd_busid_known(char *);
+
+/* externals in dasd_gendisk.c */
+int dasd_gendisk_init(void);
+void dasd_gendisk_exit(void);
+int dasd_gendisk_alloc(struct dasd_device *);
+void dasd_gendisk_free(struct dasd_device *);
+int dasd_scan_partitions(struct dasd_device *);
+void dasd_destroy_partitions(struct dasd_device *);
+
+/* externals in dasd_ioctl.c */
+int dasd_ioctl_init(void);
+void dasd_ioctl_exit(void);
+int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t);
+int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t);
+int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+/* externals in dasd_proc.c */
+int dasd_proc_init(void);
+void dasd_proc_exit(void);
+
+/* externals in dasd_erp.c */
+struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+ struct dasd_device *);
+void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
+
+/* externals in dasd_3370_erp.c */
+dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
+
+/* externals in dasd_3990_erp.c */
+dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
+struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+
+/* externals in dasd_9336_erp.c */
+dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
+
+/* externals in dasd_9336_erp.c */
+dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
+struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
+
+#endif /* __KERNEL__ */
+
+#endif /* DASD_H */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: 1
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 000000000000..f1892baa3b18
--- /dev/null
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,554 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_ioctl.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * i/o controls for the dasd driver.
+ */
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+
+#include <asm/ccwdev.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_ioctl:"
+
+#include "dasd_int.h"
+
+/*
+ * SECTION: ioctl functions.
+ */
+static struct list_head dasd_ioctl_list = LIST_HEAD_INIT(dasd_ioctl_list);
+
+/*
+ * Find the ioctl with number no.
+ */
+static struct dasd_ioctl *
+dasd_find_ioctl(int no)
+{
+ struct dasd_ioctl *ioctl;
+
+ list_for_each_entry (ioctl, &dasd_ioctl_list, list)
+ if (ioctl->no == no)
+ return ioctl;
+ return NULL;
+}
+
+/*
+ * Register ioctl with number no.
+ */
+int
+dasd_ioctl_no_register(struct module *owner, int no, dasd_ioctl_fn_t handler)
+{
+ struct dasd_ioctl *new;
+ if (dasd_find_ioctl(no))
+ return -EBUSY;
+ new = kmalloc(sizeof (struct dasd_ioctl), GFP_KERNEL);
+ if (new == NULL)
+ return -ENOMEM;
+ new->owner = owner;
+ new->no = no;
+ new->handler = handler;
+ list_add(&new->list, &dasd_ioctl_list);
+ return 0;
+}
+
+/*
+ * Deregister ioctl with number no.
+ */
+int
+dasd_ioctl_no_unregister(struct module *owner, int no, dasd_ioctl_fn_t handler)
+{
+ struct dasd_ioctl *old = dasd_find_ioctl(no);
+ if (old == NULL)
+ return -ENOENT;
+ if (old->no != no || old->handler != handler || owner != old->owner)
+ return -EINVAL;
+ list_del(&old->list);
+ kfree(old);
+ return 0;
+}
+
+int
+dasd_ioctl(struct inode *inp, struct file *filp,
+ unsigned int no, unsigned long data)
+{
+ struct block_device *bdev = inp->i_bdev;
+ struct dasd_device *device = bdev->bd_disk->private_data;
+ struct dasd_ioctl *ioctl;
+ const char *dir;
+ int rc;
+
+ if ((_IOC_DIR(no) != _IOC_NONE) && (data == 0)) {
+ PRINT_DEBUG("empty data ptr");
+ return -EINVAL;
+ }
+ dir = _IOC_DIR (no) == _IOC_NONE ? "0" :
+ _IOC_DIR (no) == _IOC_READ ? "r" :
+ _IOC_DIR (no) == _IOC_WRITE ? "w" :
+ _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u";
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", no,
+ dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
+ /* Search for ioctl no in the ioctl list. */
+ list_for_each_entry(ioctl, &dasd_ioctl_list, list) {
+ if (ioctl->no == no) {
+ /* Found a matching ioctl. Call it. */
+ if (!try_module_get(ioctl->owner))
+ continue;
+ rc = ioctl->handler(bdev, no, data);
+ module_put(ioctl->owner);
+ return rc;
+ }
+ }
+ /* No ioctl with number no. */
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", no,
+ dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
+ return -EINVAL;
+}
+
+static int
+dasd_ioctl_api_version(struct block_device *bdev, int no, long args)
+{
+ int ver = DASD_API_VERSION;
+ return put_user(ver, (int __user *) args);
+}
+
+/*
+ * Enable device.
+ * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
+ */
+static int
+dasd_ioctl_enable(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+ dasd_enable_device(device);
+ /* Formatting the dasd device can change the capacity. */
+ down(&bdev->bd_sem);
+ i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9);
+ up(&bdev->bd_sem);
+ return 0;
+}
+
+/*
+ * Disable device.
+ * Used by dasdfmt. Disable I/O operations but allow ioctls.
+ */
+static int
+dasd_ioctl_disable(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+ /*
+ * Man this is sick. We don't do a real disable but only downgrade
+ * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
+ * BIODASDDISABLE to disable accesses to the device via the block
+ * device layer but it still wants to do i/o on the device by
+ * using the BIODASDFMT ioctl. Therefore the correct state for the
+ * device is DASD_STATE_BASIC that allows to do basic i/o.
+ */
+ dasd_set_target_state(device, DASD_STATE_BASIC);
+ /*
+ * Set i_size to zero, since read, write, etc. check against this
+ * value.
+ */
+ down(&bdev->bd_sem);
+ i_size_write(bdev->bd_inode, 0);
+ up(&bdev->bd_sem);
+ return 0;
+}
+
+/*
+ * Quiesce device.
+ */
+static int
+dasd_ioctl_quiesce(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ if (!capable (CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ DEV_MESSAGE (KERN_DEBUG, device, "%s",
+ "Quiesce IO on device");
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ device->stopped |= DASD_STOPPED_QUIESCE;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+}
+
+
+/*
+ * Quiesce device.
+ */
+static int
+dasd_ioctl_resume(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ if (!capable (CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ DEV_MESSAGE (KERN_DEBUG, device, "%s",
+ "resume IO on device");
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ device->stopped &= ~DASD_STOPPED_QUIESCE;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ dasd_schedule_bh (device);
+ return 0;
+}
+
+/*
+ * performs formatting of _device_ according to _fdata_
+ * Note: The discipline's format_function is assumed to deliver formatting
+ * commands to format a single unit of the device. In terms of the ECKD
+ * devices this means CCWs are generated to format a single track.
+ */
+static int
+dasd_format(struct dasd_device * device, struct format_data_t * fdata)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (device->discipline->format_device == NULL)
+ return -EPERM;
+
+ if (device->state != DASD_STATE_BASIC) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "dasd_format: device is not disabled! ");
+ return -EBUSY;
+ }
+
+ DBF_DEV_EVENT(DBF_NOTICE, device,
+ "formatting units %d to %d (%d B blocks) flags %d",
+ fdata->start_unit,
+ fdata->stop_unit, fdata->blksize, fdata->intensity);
+
+ /* Since dasdfmt keeps the device open after it was disabled,
+ * there still exists an inode for this device.
+ * We must update i_blkbits, otherwise we might get errors when
+ * enabling the device later.
+ */
+ if (fdata->start_unit == 0) {
+ struct block_device *bdev = bdget_disk(device->gdp, 0);
+ bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
+ bdput(bdev);
+ }
+
+ while (fdata->start_unit <= fdata->stop_unit) {
+ cqr = device->discipline->format_device(device, fdata);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+ rc = dasd_sleep_on_interruptible(cqr);
+ dasd_sfree_request(cqr, cqr->device);
+ if (rc) {
+ if (rc != -ERESTARTSYS)
+ DEV_MESSAGE(KERN_ERR, device,
+ " Formatting of unit %d failed "
+ "with rc = %d",
+ fdata->start_unit, rc);
+ return rc;
+ }
+ fdata->start_unit++;
+ }
+ return 0;
+}
+
+/*
+ * Format device.
+ */
+static int
+dasd_ioctl_format(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct format_data_t fdata;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!args)
+ return -EINVAL;
+ /* fdata == NULL is no longer a valid arg to dasd_format ! */
+ device = bdev->bd_disk->private_data;
+
+ if (device == NULL)
+ return -ENODEV;
+ if (test_bit(DASD_FLAG_RO, &device->flags))
+ return -EROFS;
+ if (copy_from_user(&fdata, (void __user *) args,
+ sizeof (struct format_data_t)))
+ return -EFAULT;
+ if (bdev != bdev->bd_contains) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Cannot low-level format a partition");
+ return -EINVAL;
+ }
+ return dasd_format(device, &fdata);
+}
+
+#ifdef CONFIG_DASD_PROFILE
+/*
+ * Reset device profile information
+ */
+static int
+dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ memset(&device->profile, 0, sizeof (struct dasd_profile_info_t));
+ return 0;
+}
+
+/*
+ * Return device profile information
+ */
+static int
+dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ if (copy_to_user((long __user *) args, (long *) &device->profile,
+ sizeof (struct dasd_profile_info_t)))
+ return -EFAULT;
+ return 0;
+}
+#else
+static int
+dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
+{
+ return -ENOSYS;
+}
+
+static int
+dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
+ */
+static int
+dasd_ioctl_information(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ struct dasd_information2_t *dasd_info;
+ unsigned long flags;
+ int rc;
+ struct ccw_device *cdev;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ if (!device->discipline->fill_info)
+ return -EINVAL;
+
+ dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
+ if (dasd_info == NULL)
+ return -ENOMEM;
+
+ rc = device->discipline->fill_info(device, dasd_info);
+ if (rc) {
+ kfree(dasd_info);
+ return rc;
+ }
+
+ cdev = device->cdev;
+
+ dasd_info->devno = _ccw_device_get_device_number(device->cdev);
+ dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
+ dasd_info->cu_type = cdev->id.cu_type;
+ dasd_info->cu_model = cdev->id.cu_model;
+ dasd_info->dev_type = cdev->id.dev_type;
+ dasd_info->dev_model = cdev->id.dev_model;
+ dasd_info->open_count = atomic_read(&device->open_count);
+ dasd_info->status = device->state;
+
+ /*
+ * check if device is really formatted
+ * LDL / CDL was returned by 'fill_info'
+ */
+ if ((device->state < DASD_STATE_READY) ||
+ (dasd_check_blocksize(device->bp_block)))
+ dasd_info->format = DASD_FORMAT_NONE;
+
+ dasd_info->features |= test_bit(DASD_FLAG_RO, &device->flags) ?
+ DASD_FEATURE_READONLY : DASD_FEATURE_DEFAULT;
+
+ if (device->discipline)
+ memcpy(dasd_info->type, device->discipline->name, 4);
+ else
+ memcpy(dasd_info->type, "none", 4);
+ dasd_info->req_queue_len = 0;
+ dasd_info->chanq_len = 0;
+ if (device->request_queue->request_fn) {
+ struct list_head *l;
+#ifdef DASD_EXTENDED_PROFILING
+ {
+ struct list_head *l;
+ spin_lock_irqsave(&device->lock, flags);
+ list_for_each(l, &device->request_queue->queue_head)
+ dasd_info->req_queue_len++;
+ spin_unlock_irqrestore(&device->lock, flags);
+ }
+#endif /* DASD_EXTENDED_PROFILING */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ list_for_each(l, &device->ccw_queue)
+ dasd_info->chanq_len++;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
+ }
+
+ rc = 0;
+ if (copy_to_user((long __user *) args, (long *) dasd_info,
+ ((no == (unsigned int) BIODASDINFO2) ?
+ sizeof (struct dasd_information2_t) :
+ sizeof (struct dasd_information_t))))
+ rc = -EFAULT;
+ kfree(dasd_info);
+ return rc;
+}
+
+/*
+ * Set read only
+ */
+static int
+dasd_ioctl_set_ro(struct block_device *bdev, int no, long args)
+{
+ struct dasd_device *device;
+ int intval;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (bdev != bdev->bd_contains)
+ // ro setting is not allowed for partitions
+ return -EINVAL;
+ if (get_user(intval, (int __user *) args))
+ return -EFAULT;
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+ set_disk_ro(bdev->bd_disk, intval);
+ if (intval)
+ set_bit(DASD_FLAG_RO, &device->flags);
+ else
+ clear_bit(DASD_FLAG_RO, &device->flags);
+ return 0;
+}
+
+/*
+ * Return disk geometry.
+ */
+static int
+dasd_ioctl_getgeo(struct block_device *bdev, int no, long args)
+{
+ struct hd_geometry geo = { 0, };
+ struct dasd_device *device;
+
+ device = bdev->bd_disk->private_data;
+ if (device == NULL)
+ return -ENODEV;
+
+ if (device == NULL || device->discipline == NULL ||
+ device->discipline->fill_geometry == NULL)
+ return -EINVAL;
+
+ geo = (struct hd_geometry) {};
+ device->discipline->fill_geometry(device, &geo);
+ geo.start = get_start_sect(bdev) >> device->s2b_shift;
+ if (copy_to_user((struct hd_geometry __user *) args, &geo,
+ sizeof (struct hd_geometry)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * List of static ioctls.
+ */
+static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] =
+{
+ { BIODASDDISABLE, dasd_ioctl_disable },
+ { BIODASDENABLE, dasd_ioctl_enable },
+ { BIODASDQUIESCE, dasd_ioctl_quiesce },
+ { BIODASDRESUME, dasd_ioctl_resume },
+ { BIODASDFMT, dasd_ioctl_format },
+ { BIODASDINFO, dasd_ioctl_information },
+ { BIODASDINFO2, dasd_ioctl_information },
+ { BIODASDPRRD, dasd_ioctl_read_profile },
+ { BIODASDPRRST, dasd_ioctl_reset_profile },
+ { BLKROSET, dasd_ioctl_set_ro },
+ { DASDAPIVER, dasd_ioctl_api_version },
+ { HDIO_GETGEO, dasd_ioctl_getgeo },
+ { -1, NULL }
+};
+
+int
+dasd_ioctl_init(void)
+{
+ int i;
+
+ for (i = 0; dasd_ioctls[i].no != -1; i++)
+ dasd_ioctl_no_register(NULL, dasd_ioctls[i].no,
+ dasd_ioctls[i].fn);
+ return 0;
+
+}
+
+void
+dasd_ioctl_exit(void)
+{
+ int i;
+
+ for (i = 0; dasd_ioctls[i].no != -1; i++)
+ dasd_ioctl_no_unregister(NULL, dasd_ioctls[i].no,
+ dasd_ioctls[i].fn);
+
+}
+
+EXPORT_SYMBOL(dasd_ioctl_no_register);
+EXPORT_SYMBOL(dasd_ioctl_no_unregister);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 000000000000..353d41118c62
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,319 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_proc.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
+ *
+ * /proc interface for the dasd driver.
+ *
+ * $Revision: 1.30 $
+ */
+
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_proc:"
+
+#include "dasd_int.h"
+
+static struct proc_dir_entry *dasd_proc_root_entry = NULL;
+static struct proc_dir_entry *dasd_devices_entry = NULL;
+static struct proc_dir_entry *dasd_statistics_entry = NULL;
+
+static inline char *
+dasd_get_user_string(const char __user *user_buf, size_t user_len)
+{
+ char *buffer;
+
+ buffer = kmalloc(user_len + 1, GFP_KERNEL);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(buffer, user_buf, user_len) != 0) {
+ kfree(buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ /* got the string, now strip linefeed. */
+ if (buffer[user_len - 1] == '\n')
+ buffer[user_len - 1] = 0;
+ else
+ buffer[user_len] = 0;
+ return buffer;
+}
+
+static int
+dasd_devices_show(struct seq_file *m, void *v)
+{
+ struct dasd_device *device;
+ char *substr;
+
+ device = dasd_device_from_devindex((unsigned long) v - 1);
+ if (IS_ERR(device))
+ return 0;
+ /* Print device number. */
+ seq_printf(m, "%s", device->cdev->dev.bus_id);
+ /* Print discipline string. */
+ if (device != NULL && device->discipline != NULL)
+ seq_printf(m, "(%s)", device->discipline->name);
+ else
+ seq_printf(m, "(none)");
+ /* Print kdev. */
+ if (device->gdp)
+ seq_printf(m, " at (%3d:%6d)",
+ device->gdp->major, device->gdp->first_minor);
+ else
+ seq_printf(m, " at (???:??????)");
+ /* Print device name. */
+ if (device->gdp)
+ seq_printf(m, " is %-8s", device->gdp->disk_name);
+ else
+ seq_printf(m, " is ????????");
+ /* Print devices features. */
+ substr = test_bit(DASD_FLAG_RO, &device->flags) ? "(ro)" : " ";
+ seq_printf(m, "%4s: ", substr);
+ /* Print device status information. */
+ switch ((device != NULL) ? device->state : -1) {
+ case -1:
+ seq_printf(m, "unknown");
+ break;
+ case DASD_STATE_NEW:
+ seq_printf(m, "new");
+ break;
+ case DASD_STATE_KNOWN:
+ seq_printf(m, "detected");
+ break;
+ case DASD_STATE_BASIC:
+ seq_printf(m, "basic");
+ break;
+ case DASD_STATE_READY:
+ case DASD_STATE_ONLINE:
+ seq_printf(m, "active ");
+ if (dasd_check_blocksize(device->bp_block))
+ seq_printf(m, "n/f ");
+ else
+ seq_printf(m,
+ "at blocksize: %d, %ld blocks, %ld MB",
+ device->bp_block, device->blocks,
+ ((device->bp_block >> 9) *
+ device->blocks) >> 11);
+ break;
+ default:
+ seq_printf(m, "no stat");
+ break;
+ }
+ dasd_put_device(device);
+ if (dasd_probeonly)
+ seq_printf(m, "(probeonly)");
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos >= dasd_max_devindex)
+ return NULL;
+ return (void *)((unsigned long) *pos + 1);
+}
+
+static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return dasd_devices_start(m, pos);
+}
+
+static void dasd_devices_stop(struct seq_file *m, void *v)
+{
+}
+
+static struct seq_operations dasd_devices_seq_ops = {
+ .start = dasd_devices_start,
+ .next = dasd_devices_next,
+ .stop = dasd_devices_stop,
+ .show = dasd_devices_show,
+};
+
+static int dasd_devices_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &dasd_devices_seq_ops);
+}
+
+static struct file_operations dasd_devices_file_ops = {
+ .open = dasd_devices_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static inline int
+dasd_calc_metrics(char *page, char **start, off_t off,
+ int count, int *eof, int len)
+{
+ len = (len > off) ? len - off : 0;
+ if (len > count)
+ len = count;
+ if (len < count)
+ *eof = 1;
+ *start = page + off;
+ return len;
+}
+
+static inline char *
+dasd_statistics_array(char *str, int *array, int shift)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ str += sprintf(str, "%7d ", array[i] >> shift);
+ if (i == 15)
+ str += sprintf(str, "\n");
+ }
+ str += sprintf(str,"\n");
+ return str;
+}
+
+static int
+dasd_statistics_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long len;
+#ifdef CONFIG_DASD_PROFILE
+ struct dasd_profile_info_t *prof;
+ char *str;
+ int shift;
+
+ /* check for active profiling */
+ if (dasd_profile_level == DASD_PROFILE_OFF) {
+ len = sprintf(page, "Statistics are off - they might be "
+ "switched on using 'echo set on > "
+ "/proc/dasd/statistics'\n");
+ return dasd_calc_metrics(page, start, off, count, eof, len);
+ }
+
+ prof = &dasd_global_profile;
+ /* prevent couter 'overflow' on output */
+ for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++);
+
+ str = page;
+ str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
+ str += sprintf(str, "with %d sectors(512B each)\n",
+ prof->dasd_io_sects);
+ str += sprintf(str,
+ " __<4 ___8 __16 __32 __64 _128 "
+ " _256 _512 __1k __2k __4k __8k "
+ " _16k _32k _64k 128k\n");
+ str += sprintf(str,
+ " _256 _512 __1M __2M __4M __8M "
+ " _16M _32M _64M 128M 256M 512M "
+ " __1G __2G __4G " " _>4G\n");
+
+ str += sprintf(str, "Histogram of sizes (512B secs)\n");
+ str = dasd_statistics_array(str, prof->dasd_io_secs, shift);
+ str += sprintf(str, "Histogram of I/O times (microseconds)\n");
+ str = dasd_statistics_array(str, prof->dasd_io_times, shift);
+ str += sprintf(str, "Histogram of I/O times per sector\n");
+ str = dasd_statistics_array(str, prof->dasd_io_timps, shift);
+ str += sprintf(str, "Histogram of I/O time till ssch\n");
+ str = dasd_statistics_array(str, prof->dasd_io_time1, shift);
+ str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
+ str = dasd_statistics_array(str, prof->dasd_io_time2, shift);
+ str += sprintf(str, "Histogram of I/O time between ssch "
+ "and irq per sector\n");
+ str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift);
+ str += sprintf(str, "Histogram of I/O time between irq and end\n");
+ str = dasd_statistics_array(str, prof->dasd_io_time3, shift);
+ str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
+ str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift);
+ len = str - page;
+#else
+ len = sprintf(page, "Statistics are not activated in this kernel\n");
+#endif
+ return dasd_calc_metrics(page, start, off, count, eof, len);
+}
+
+static int
+dasd_statistics_write(struct file *file, const char __user *user_buf,
+ unsigned long user_len, void *data)
+{
+#ifdef CONFIG_DASD_PROFILE
+ char *buffer, *str;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buffer = dasd_get_user_string(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+ MESSAGE_LOG(KERN_INFO, "/proc/dasd/statictics: '%s'", buffer);
+
+ /* check for valid verbs */
+ for (str = buffer; isspace(*str); str++);
+ if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
+ /* 'set xxx' was given */
+ for (str = str + 4; isspace(*str); str++);
+ if (strcmp(str, "on") == 0) {
+ /* switch on statistics profiling */
+ dasd_profile_level = DASD_PROFILE_ON;
+ MESSAGE(KERN_INFO, "%s", "Statistics switched on");
+ } else if (strcmp(str, "off") == 0) {
+ /* switch off and reset statistics profiling */
+ memset(&dasd_global_profile,
+ 0, sizeof (struct dasd_profile_info_t));
+ dasd_profile_level = DASD_PROFILE_OFF;
+ MESSAGE(KERN_INFO, "%s", "Statistics switched off");
+ } else
+ goto out_error;
+ } else if (strncmp(str, "reset", 5) == 0) {
+ /* reset the statistics */
+ memset(&dasd_global_profile, 0,
+ sizeof (struct dasd_profile_info_t));
+ MESSAGE(KERN_INFO, "%s", "Statistics reset");
+ } else
+ goto out_error;
+ kfree(buffer);
+ return user_len;
+out_error:
+ MESSAGE(KERN_WARNING, "%s",
+ "/proc/dasd/statistics: only 'set on', 'set off' "
+ "and 'reset' are supported verbs");
+ kfree(buffer);
+ return -EINVAL;
+#else
+ MESSAGE(KERN_WARNING, "%s",
+ "/proc/dasd/statistics: is not activated in this kernel");
+ return user_len;
+#endif /* CONFIG_DASD_PROFILE */
+}
+
+int
+dasd_proc_init(void)
+{
+ dasd_proc_root_entry = proc_mkdir("dasd", &proc_root);
+ dasd_proc_root_entry->owner = THIS_MODULE;
+ dasd_devices_entry = create_proc_entry("devices",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ dasd_proc_root_entry);
+ dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
+ dasd_devices_entry->owner = THIS_MODULE;
+ dasd_statistics_entry = create_proc_entry("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ dasd_proc_root_entry);
+ dasd_statistics_entry->read_proc = dasd_statistics_read;
+ dasd_statistics_entry->write_proc = dasd_statistics_write;
+ dasd_statistics_entry->owner = THIS_MODULE;
+ return 0;
+}
+
+void
+dasd_proc_exit(void)
+{
+ remove_proc_entry("devices", dasd_proc_root_entry);
+ remove_proc_entry("statistics", dasd_proc_root_entry);
+ remove_proc_entry("dasd", &proc_root);
+}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
new file mode 100644
index 000000000000..a66b17b65296
--- /dev/null
+++ b/drivers/s390/block/dcssblk.c
@@ -0,0 +1,775 @@
+/*
+ * dcssblk.c -- the S/390 block driver for dcss memory
+ *
+ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <asm/extmem.h>
+#include <asm/io.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <asm/ccwdev.h> // for s390_root_dev_(un)register()
+
+//#define DCSSBLK_DEBUG /* Debug messages on/off */
+#define DCSSBLK_NAME "dcssblk"
+#define DCSSBLK_MINORS_PER_DISK 1
+#define DCSSBLK_PARM_LEN 400
+
+#ifdef DCSSBLK_DEBUG
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
+#else
+#define PRINT_DEBUG(x...) do {} while (0)
+#endif
+#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
+#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
+#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
+
+
+static int dcssblk_open(struct inode *inode, struct file *filp);
+static int dcssblk_release(struct inode *inode, struct file *filp);
+static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
+
+static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
+
+static int dcssblk_major;
+static struct block_device_operations dcssblk_devops = {
+ .owner = THIS_MODULE,
+ .open = dcssblk_open,
+ .release = dcssblk_release,
+};
+
+static ssize_t dcssblk_add_store(struct device * dev, const char * buf,
+ size_t count);
+static ssize_t dcssblk_remove_store(struct device * dev, const char * buf,
+ size_t count);
+static ssize_t dcssblk_save_store(struct device * dev, const char * buf,
+ size_t count);
+static ssize_t dcssblk_save_show(struct device *dev, char *buf);
+static ssize_t dcssblk_shared_store(struct device * dev, const char * buf,
+ size_t count);
+static ssize_t dcssblk_shared_show(struct device *dev, char *buf);
+
+static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
+static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
+static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
+ dcssblk_save_store);
+static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
+ dcssblk_shared_store);
+
+static struct device *dcssblk_root_dev;
+
+struct dcssblk_dev_info {
+ struct list_head lh;
+ struct device dev;
+ char segment_name[BUS_ID_SIZE];
+ atomic_t use_count;
+ struct gendisk *gd;
+ unsigned long start;
+ unsigned long end;
+ int segment_type;
+ unsigned char save_pending;
+ unsigned char is_shared;
+ struct request_queue *dcssblk_queue;
+};
+
+static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
+static struct rw_semaphore dcssblk_devices_sem;
+
+/*
+ * release function for segment device.
+ */
+static void
+dcssblk_release_segment(struct device *dev)
+{
+ PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
+ kfree(container_of(dev, struct dcssblk_dev_info, dev));
+ module_put(THIS_MODULE);
+}
+
+/*
+ * get a minor number. needs to be called with
+ * down_write(&dcssblk_devices_sem) and the
+ * device needs to be enqueued before the semaphore is
+ * freed.
+ */
+static inline int
+dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
+{
+ int minor, found;
+ struct dcssblk_dev_info *entry;
+
+ if (dev_info == NULL)
+ return -EINVAL;
+ for (minor = 0; minor < (1<<MINORBITS); minor++) {
+ found = 0;
+ // test if minor available
+ list_for_each_entry(entry, &dcssblk_devices, lh)
+ if (minor == entry->gd->first_minor)
+ found++;
+ if (!found) break; // got unused minor
+ }
+ if (found)
+ return -EBUSY;
+ dev_info->gd->first_minor = minor;
+ return 0;
+}
+
+/*
+ * get the struct dcssblk_dev_info from dcssblk_devices
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct dcssblk_dev_info *
+dcssblk_get_device_by_name(char *name)
+{
+ struct dcssblk_dev_info *entry;
+
+ list_for_each_entry(entry, &dcssblk_devices, lh) {
+ if (!strcmp(name, entry->segment_name)) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * print appropriate error message for segment_load()/segment_type()
+ * return code
+ */
+static void
+dcssblk_segment_warn(int rc, char* seg_name)
+{
+ switch (rc) {
+ case -ENOENT:
+ PRINT_WARN("cannot load/query segment %s, does not exist\n",
+ seg_name);
+ break;
+ case -ENOSYS:
+ PRINT_WARN("cannot load/query segment %s, not running on VM\n",
+ seg_name);
+ break;
+ case -EIO:
+ PRINT_WARN("cannot load/query segment %s, hardware error\n",
+ seg_name);
+ break;
+ case -ENOTSUPP:
+ PRINT_WARN("cannot load/query segment %s, is a multi-part "
+ "segment\n", seg_name);
+ break;
+ case -ENOSPC:
+ PRINT_WARN("cannot load/query segment %s, overlaps with "
+ "storage\n", seg_name);
+ break;
+ case -EBUSY:
+ PRINT_WARN("cannot load/query segment %s, overlaps with "
+ "already loaded dcss\n", seg_name);
+ break;
+ case -EPERM:
+ PRINT_WARN("cannot load/query segment %s, already loaded in "
+ "incompatible mode\n", seg_name);
+ break;
+ case -ENOMEM:
+ PRINT_WARN("cannot load/query segment %s, out of memory\n",
+ seg_name);
+ break;
+ case -ERANGE:
+ PRINT_WARN("cannot load/query segment %s, exceeds kernel "
+ "mapping range\n", seg_name);
+ break;
+ default:
+ PRINT_WARN("cannot load/query segment %s, return value %i\n",
+ seg_name, rc);
+ break;
+ }
+}
+
+/*
+ * device attribute for switching shared/nonshared (exclusive)
+ * operation (show + store)
+ */
+static ssize_t
+dcssblk_shared_show(struct device *dev, char *buf)
+{
+ struct dcssblk_dev_info *dev_info;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_shared_store(struct device *dev, const char *inbuf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+ int rc;
+
+ if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
+ PRINT_WARN("Invalid value, must be 0 or 1\n");
+ return -EINVAL;
+ }
+ down_write(&dcssblk_devices_sem);
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ if (atomic_read(&dev_info->use_count)) {
+ PRINT_ERR("share: segment %s is busy!\n",
+ dev_info->segment_name);
+ rc = -EBUSY;
+ goto out;
+ }
+ if (inbuf[0] == '1') {
+ // reload segment in shared mode
+ rc = segment_modify_shared(dev_info->segment_name,
+ SEGMENT_SHARED);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc == -EIO || rc == -ENOENT)
+ goto removeseg;
+ } else {
+ dev_info->is_shared = 1;
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ set_disk_ro(dev_info->gd,1);
+ }
+ }
+ } else if (inbuf[0] == '0') {
+ // reload segment in exclusive mode
+ if (dev_info->segment_type == SEG_TYPE_SC) {
+ PRINT_ERR("Segment type SC (%s) cannot be loaded in "
+ "non-shared mode\n", dev_info->segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = segment_modify_shared(dev_info->segment_name,
+ SEGMENT_EXCLUSIVE);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc == -EIO || rc == -ENOENT)
+ goto removeseg;
+ } else {
+ dev_info->is_shared = 0;
+ set_disk_ro(dev_info->gd, 0);
+ }
+ } else {
+ PRINT_WARN("Invalid value, must be 0 or 1\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = count;
+ goto out;
+
+removeseg:
+ PRINT_ERR("Could not reload segment %s, removing it now!\n",
+ dev_info->segment_name);
+ list_del(&dev_info->lh);
+
+ del_gendisk(dev_info->gd);
+ blk_put_queue(dev_info->dcssblk_queue);
+ dev_info->gd->queue = NULL;
+ put_disk(dev_info->gd);
+ device_unregister(dev);
+ put_device(dev);
+out:
+ up_write(&dcssblk_devices_sem);
+ return rc;
+}
+
+/*
+ * device attribute for save operation on current copy
+ * of the segment. If the segment is busy, saving will
+ * become pending until it gets released, which can be
+ * undone by storing a non-true value to this entry.
+ * (show + store)
+ */
+static ssize_t
+dcssblk_save_show(struct device *dev, char *buf)
+{
+ struct dcssblk_dev_info *dev_info;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_save_store(struct device *dev, const char *inbuf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+
+ if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
+ PRINT_WARN("Invalid value, must be 0 or 1\n");
+ return -EINVAL;
+ }
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+
+ down_write(&dcssblk_devices_sem);
+ if (inbuf[0] == '1') {
+ if (atomic_read(&dev_info->use_count) == 0) {
+ // device is idle => we save immediately
+ PRINT_INFO("Saving segment %s\n",
+ dev_info->segment_name);
+ segment_save(dev_info->segment_name);
+ } else {
+ // device is busy => we save it when it becomes
+ // idle in dcssblk_release
+ PRINT_INFO("Segment %s is currently busy, it will "
+ "be saved when it becomes idle...\n",
+ dev_info->segment_name);
+ dev_info->save_pending = 1;
+ }
+ } else if (inbuf[0] == '0') {
+ if (dev_info->save_pending) {
+ // device is busy & the user wants to undo his save
+ // request
+ dev_info->save_pending = 0;
+ PRINT_INFO("Pending save for segment %s deactivated\n",
+ dev_info->segment_name);
+ }
+ } else {
+ up_write(&dcssblk_devices_sem);
+ PRINT_WARN("Invalid value, must be 0 or 1\n");
+ return -EINVAL;
+ }
+ up_write(&dcssblk_devices_sem);
+ return count;
+}
+
+/*
+ * device attribute for adding devices
+ */
+static ssize_t
+dcssblk_add_store(struct device *dev, const char *buf, size_t count)
+{
+ int rc, i;
+ struct dcssblk_dev_info *dev_info;
+ char *local_buf;
+ unsigned long seg_byte_size;
+
+ dev_info = NULL;
+ if (dev != dcssblk_root_dev) {
+ rc = -EINVAL;
+ goto out_nobuf;
+ }
+ local_buf = kmalloc(count + 1, GFP_KERNEL);
+ if (local_buf == NULL) {
+ rc = -ENOMEM;
+ goto out_nobuf;
+ }
+ /*
+ * parse input
+ */
+ for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
+ local_buf[i] = toupper(buf[i]);
+ }
+ local_buf[i] = '\0';
+ if ((i == 0) || (i > 8)) {
+ rc = -ENAMETOOLONG;
+ goto out;
+ }
+ /*
+ * already loaded?
+ */
+ down_read(&dcssblk_devices_sem);
+ dev_info = dcssblk_get_device_by_name(local_buf);
+ up_read(&dcssblk_devices_sem);
+ if (dev_info != NULL) {
+ PRINT_WARN("Segment %s already loaded!\n", local_buf);
+ rc = -EEXIST;
+ goto out;
+ }
+ /*
+ * get a struct dcssblk_dev_info
+ */
+ dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
+ if (dev_info == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memset(dev_info, 0, sizeof(struct dcssblk_dev_info));
+
+ strcpy(dev_info->segment_name, local_buf);
+ strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
+ dev_info->dev.release = dcssblk_release_segment;
+ INIT_LIST_HEAD(&dev_info->lh);
+
+ dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
+ if (dev_info->gd == NULL) {
+ rc = -ENOMEM;
+ goto free_dev_info;
+ }
+ dev_info->gd->major = dcssblk_major;
+ dev_info->gd->fops = &dcssblk_devops;
+ dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
+ dev_info->gd->queue = dev_info->dcssblk_queue;
+ dev_info->gd->private_data = dev_info;
+ dev_info->gd->driverfs_dev = &dev_info->dev;
+ /*
+ * load the segment
+ */
+ rc = segment_load(local_buf, SEGMENT_SHARED,
+ &dev_info->start, &dev_info->end);
+ if (rc < 0) {
+ dcssblk_segment_warn(rc, dev_info->segment_name);
+ goto dealloc_gendisk;
+ }
+ seg_byte_size = (dev_info->end - dev_info->start + 1);
+ set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+ PRINT_INFO("Loaded segment %s, size = %lu Byte, "
+ "capacity = %lu (512 Byte) sectors\n", local_buf,
+ seg_byte_size, seg_byte_size >> 9);
+
+ dev_info->segment_type = rc;
+ dev_info->save_pending = 0;
+ dev_info->is_shared = 1;
+ dev_info->dev.parent = dcssblk_root_dev;
+
+ /*
+ * get minor, add to list
+ */
+ down_write(&dcssblk_devices_sem);
+ rc = dcssblk_assign_free_minor(dev_info);
+ if (rc) {
+ up_write(&dcssblk_devices_sem);
+ PRINT_ERR("No free minor number available! "
+ "Unloading segment...\n");
+ goto unload_seg;
+ }
+ sprintf(dev_info->gd->disk_name, "dcssblk%d",
+ dev_info->gd->first_minor);
+ list_add_tail(&dev_info->lh, &dcssblk_devices);
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -ENODEV;
+ goto list_del;
+ }
+ /*
+ * register the device
+ */
+ rc = device_register(&dev_info->dev);
+ if (rc) {
+ PRINT_ERR("Segment %s could not be registered RC=%d\n",
+ local_buf, rc);
+ module_put(THIS_MODULE);
+ goto list_del;
+ }
+ get_device(&dev_info->dev);
+ rc = device_create_file(&dev_info->dev, &dev_attr_shared);
+ if (rc)
+ goto unregister_dev;
+ rc = device_create_file(&dev_info->dev, &dev_attr_save);
+ if (rc)
+ goto unregister_dev;
+
+ add_disk(dev_info->gd);
+
+ blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
+ blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
+
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ set_disk_ro(dev_info->gd,1);
+ break;
+ default:
+ set_disk_ro(dev_info->gd,0);
+ break;
+ }
+ PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
+ up_write(&dcssblk_devices_sem);
+ rc = count;
+ goto out;
+
+unregister_dev:
+ PRINT_ERR("device_create_file() failed!\n");
+ list_del(&dev_info->lh);
+ blk_put_queue(dev_info->dcssblk_queue);
+ dev_info->gd->queue = NULL;
+ put_disk(dev_info->gd);
+ device_unregister(&dev_info->dev);
+ segment_unload(dev_info->segment_name);
+ put_device(&dev_info->dev);
+ up_write(&dcssblk_devices_sem);
+ goto out;
+list_del:
+ list_del(&dev_info->lh);
+ up_write(&dcssblk_devices_sem);
+unload_seg:
+ segment_unload(local_buf);
+dealloc_gendisk:
+ blk_put_queue(dev_info->dcssblk_queue);
+ dev_info->gd->queue = NULL;
+ put_disk(dev_info->gd);
+free_dev_info:
+ kfree(dev_info);
+out:
+ kfree(local_buf);
+out_nobuf:
+ return rc;
+}
+
+/*
+ * device attribute for removing devices
+ */
+static ssize_t
+dcssblk_remove_store(struct device *dev, const char *buf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+ int rc, i;
+ char *local_buf;
+
+ if (dev != dcssblk_root_dev) {
+ return -EINVAL;
+ }
+ local_buf = kmalloc(count + 1, GFP_KERNEL);
+ if (local_buf == NULL) {
+ return -ENOMEM;
+ }
+ /*
+ * parse input
+ */
+ for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
+ local_buf[i] = toupper(buf[i]);
+ }
+ local_buf[i] = '\0';
+ if ((i == 0) || (i > 8)) {
+ rc = -ENAMETOOLONG;
+ goto out_buf;
+ }
+
+ down_write(&dcssblk_devices_sem);
+ dev_info = dcssblk_get_device_by_name(local_buf);
+ if (dev_info == NULL) {
+ up_write(&dcssblk_devices_sem);
+ PRINT_WARN("Segment %s is not loaded!\n", local_buf);
+ rc = -ENODEV;
+ goto out_buf;
+ }
+ if (atomic_read(&dev_info->use_count) != 0) {
+ up_write(&dcssblk_devices_sem);
+ PRINT_WARN("Segment %s is in use!\n", local_buf);
+ rc = -EBUSY;
+ goto out_buf;
+ }
+ list_del(&dev_info->lh);
+
+ del_gendisk(dev_info->gd);
+ blk_put_queue(dev_info->dcssblk_queue);
+ dev_info->gd->queue = NULL;
+ put_disk(dev_info->gd);
+ device_unregister(&dev_info->dev);
+ segment_unload(dev_info->segment_name);
+ PRINT_DEBUG("Segment %s unloaded successfully\n",
+ dev_info->segment_name);
+ put_device(&dev_info->dev);
+ up_write(&dcssblk_devices_sem);
+
+ rc = count;
+out_buf:
+ kfree(local_buf);
+ return rc;
+}
+
+static int
+dcssblk_open(struct inode *inode, struct file *filp)
+{
+ struct dcssblk_dev_info *dev_info;
+ int rc;
+
+ dev_info = inode->i_bdev->bd_disk->private_data;
+ if (NULL == dev_info) {
+ rc = -ENODEV;
+ goto out;
+ }
+ atomic_inc(&dev_info->use_count);
+ inode->i_bdev->bd_block_size = 4096;
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+dcssblk_release(struct inode *inode, struct file *filp)
+{
+ struct dcssblk_dev_info *dev_info;
+ int rc;
+
+ dev_info = inode->i_bdev->bd_disk->private_data;
+ if (NULL == dev_info) {
+ rc = -ENODEV;
+ goto out;
+ }
+ down_write(&dcssblk_devices_sem);
+ if (atomic_dec_and_test(&dev_info->use_count)
+ && (dev_info->save_pending)) {
+ PRINT_INFO("Segment %s became idle and is being saved now\n",
+ dev_info->segment_name);
+ segment_save(dev_info->segment_name);
+ dev_info->save_pending = 0;
+ }
+ up_write(&dcssblk_devices_sem);
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+dcssblk_make_request(request_queue_t *q, struct bio *bio)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct bio_vec *bvec;
+ unsigned long index;
+ unsigned long page_addr;
+ unsigned long source_addr;
+ unsigned long bytes_done;
+ int i;
+
+ bytes_done = 0;
+ dev_info = bio->bi_bdev->bd_disk->private_data;
+ if (dev_info == NULL)
+ goto fail;
+ if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ /* Request is not page-aligned. */
+ goto fail;
+ if (((bio->bi_size >> 9) + bio->bi_sector)
+ > get_capacity(bio->bi_bdev->bd_disk)) {
+ /* Request beyond end of DCSS segment. */
+ goto fail;
+ }
+ index = (bio->bi_sector >> 3);
+ bio_for_each_segment(bvec, bio, i) {
+ page_addr = (unsigned long)
+ page_address(bvec->bv_page) + bvec->bv_offset;
+ source_addr = dev_info->start + (index<<12) + bytes_done;
+ if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0)
+ // More paranoia.
+ goto fail;
+ if (bio_data_dir(bio) == READ) {
+ memcpy((void*)page_addr, (void*)source_addr,
+ bvec->bv_len);
+ } else {
+ memcpy((void*)source_addr, (void*)page_addr,
+ bvec->bv_len);
+ }
+ bytes_done += bvec->bv_len;
+ }
+ bio_endio(bio, bytes_done, 0);
+ return 0;
+fail:
+ bio_io_error(bio, bytes_done);
+ return 0;
+}
+
+static void
+dcssblk_check_params(void)
+{
+ int rc, i, j, k;
+ char buf[9];
+ struct dcssblk_dev_info *dev_info;
+
+ for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
+ i++) {
+ for (j = i; (dcssblk_segments[j] != ',') &&
+ (dcssblk_segments[j] != '\0') &&
+ (dcssblk_segments[j] != '(') &&
+ (j - i) < 8; j++)
+ {
+ buf[j-i] = dcssblk_segments[j];
+ }
+ buf[j-i] = '\0';
+ rc = dcssblk_add_store(dcssblk_root_dev, buf, j-i);
+ if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
+ for (k = 0; buf[k] != '\0'; k++)
+ buf[k] = toupper(buf[k]);
+ if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
+ down_read(&dcssblk_devices_sem);
+ dev_info = dcssblk_get_device_by_name(buf);
+ up_read(&dcssblk_devices_sem);
+ if (dev_info)
+ dcssblk_shared_store(&dev_info->dev,
+ "0\n", 2);
+ }
+ }
+ while ((dcssblk_segments[j] != ',') &&
+ (dcssblk_segments[j] != '\0'))
+ {
+ j++;
+ }
+ if (dcssblk_segments[j] == '\0')
+ break;
+ i = j;
+ }
+}
+
+/*
+ * The init/exit functions.
+ */
+static void __exit
+dcssblk_exit(void)
+{
+ int rc;
+
+ PRINT_DEBUG("DCSSBLOCK EXIT...\n");
+ s390_root_dev_unregister(dcssblk_root_dev);
+ rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
+ if (rc) {
+ PRINT_ERR("unregister_blkdev() failed!\n");
+ }
+ PRINT_DEBUG("...finished!\n");
+}
+
+static int __init
+dcssblk_init(void)
+{
+ int rc;
+
+ PRINT_DEBUG("DCSSBLOCK INIT...\n");
+ dcssblk_root_dev = s390_root_dev_register("dcssblk");
+ if (IS_ERR(dcssblk_root_dev)) {
+ PRINT_ERR("device_register() failed!\n");
+ return PTR_ERR(dcssblk_root_dev);
+ }
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
+ if (rc) {
+ PRINT_ERR("device_create_file(add) failed!\n");
+ s390_root_dev_unregister(dcssblk_root_dev);
+ return rc;
+ }
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
+ if (rc) {
+ PRINT_ERR("device_create_file(remove) failed!\n");
+ s390_root_dev_unregister(dcssblk_root_dev);
+ return rc;
+ }
+ rc = register_blkdev(0, DCSSBLK_NAME);
+ if (rc < 0) {
+ PRINT_ERR("Can't get dynamic major!\n");
+ s390_root_dev_unregister(dcssblk_root_dev);
+ return rc;
+ }
+ dcssblk_major = rc;
+ init_rwsem(&dcssblk_devices_sem);
+
+ dcssblk_check_params();
+
+ PRINT_DEBUG("...finished!\n");
+ return 0;
+}
+
+module_init(dcssblk_init);
+module_exit(dcssblk_exit);
+
+module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
+MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
+ "comma-separated list, each name max. 8 chars.\n"
+ "Adding \"(local)\" to segment name equals echoing 0 to "
+ "/sys/devices/dcssblk/<segment name>/shared after loading "
+ "the segment - \n"
+ "e.g. segments=\"mydcss1,mydcss2,mydcss3(local)\"");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
new file mode 100644
index 000000000000..d428c909b8a0
--- /dev/null
+++ b/drivers/s390/block/xpram.c
@@ -0,0 +1,539 @@
+/*
+ * Xpram.c -- the S/390 expanded memory RAM-disk
+ *
+ * significant parts of this code are based on
+ * the sbull device driver presented in
+ * A. Rubini: Linux Device Drivers
+ *
+ * Author of XPRAM specific coding: Reinhard Buendgen
+ * buendgen@de.ibm.com
+ * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * External interfaces:
+ * Interfaces to linux kernel
+ * xpram_setup: read kernel parameters
+ * Device specific file operations
+ * xpram_iotcl
+ * xpram_open
+ *
+ * "ad-hoc" partitioning:
+ * the expanded memory can be partitioned among several devices
+ * (with different minors). The partitioning set up can be
+ * set by kernel or module parameters (int devs & int sizes[])
+ *
+ * Potential future improvements:
+ * generic hard disk support to replace ad-hoc partitioning
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h> /* isdigit, isxdigit */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/sysdev.h>
+#include <linux/bio.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/uaccess.h>
+
+#define XPRAM_NAME "xpram"
+#define XPRAM_DEVS 1 /* one partition */
+#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
+
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
+#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
+#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
+#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
+
+
+static struct sysdev_class xpram_sysclass = {
+ set_kset_name("xpram"),
+};
+
+static struct sys_device xpram_sys_device = {
+ .id = 0,
+ .cls = &xpram_sysclass,
+};
+
+typedef struct {
+ unsigned int size; /* size of xpram segment in pages */
+ unsigned int offset; /* start page of xpram segment */
+} xpram_device_t;
+
+static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
+static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
+static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
+static unsigned int xpram_pages;
+static int xpram_devs;
+
+/*
+ * Parameter parsing functions.
+ */
+static int devs = XPRAM_DEVS;
+static unsigned int sizes[XPRAM_MAX_DEVS];
+
+module_param(devs, int, 0);
+module_param_array(sizes, int, NULL, 0);
+
+MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
+ "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
+MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
+ "the defaults are 0s \n" \
+ "All devices with size 0 equally partition the "
+ "remaining space on the expanded strorage not "
+ "claimed by explicit sizes\n");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+/*
+ * Parses the kernel parameters given in the kernel parameter line.
+ * The expected format is
+ * <number_of_partitions>[","<partition_size>]*
+ * where
+ * devices is a positive integer that initializes xpram_devs
+ * each size is a non-negative integer possibly followed by a
+ * magnitude (k,K,m,M,g,G), the list of sizes initialises
+ * xpram_sizes
+ *
+ * Arguments
+ * str: substring of kernel parameter line that contains xprams
+ * kernel parameters.
+ *
+ * Result 0 on success, -EINVAL else -- only for Version > 2.3
+ *
+ * Side effects
+ * the global variabls devs is set to the value of
+ * <number_of_partitions> and sizes[i] is set to the i-th
+ * partition size (if provided). A parsing error of a value
+ * results in this value being set to -EINVAL.
+ */
+static int __init xpram_setup (char *str)
+{
+ char *cp;
+ int i;
+
+ devs = simple_strtoul(str, &cp, 10);
+ if (cp <= str || devs > XPRAM_MAX_DEVS)
+ return 0;
+ for (i = 0; (i < devs) && (*cp++ == ','); i++) {
+ sizes[i] = simple_strtoul(cp, &cp, 10);
+ if (*cp == 'g' || *cp == 'G') {
+ sizes[i] <<= 20;
+ cp++;
+ } else if (*cp == 'm' || *cp == 'M') {
+ sizes[i] <<= 10;
+ cp++;
+ } else if (*cp == 'k' || *cp == 'K')
+ cp++;
+ while (isspace(*cp)) cp++;
+ }
+ if (*cp == ',' && i >= devs)
+ PRINT_WARN("partition sizes list has too many entries.\n");
+ else if (*cp != 0)
+ PRINT_WARN("ignored '%s' at end of parameter string.\n", cp);
+ return 1;
+}
+
+__setup("xpram_parts=", xpram_setup);
+#endif
+
+/*
+ * Copy expanded memory page (4kB) into main memory
+ * Arguments
+ * page_addr: address of target page
+ * xpage_index: index of expandeded memory page
+ * Return value
+ * 0: if operation succeeds
+ * -EIO: if pgin failed
+ * -ENXIO: if xpram has vanished
+ */
+static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
+{
+ int cc;
+
+ __asm__ __volatile__ (
+ " lhi %0,2\n" /* return unused cc 2 if pgin traps */
+ " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+#ifndef CONFIG_ARCH_S390X
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+#endif
+ : "=&d" (cc)
+ : "a" (__pa(page_addr)), "a" (xpage_index)
+ : "cc" );
+ if (cc == 3)
+ return -ENXIO;
+ if (cc == 2) {
+ PRINT_ERR("expanded storage lost!\n");
+ return -ENXIO;
+ }
+ if (cc == 1) {
+ PRINT_ERR("page in failed for page index %u.\n",
+ xpage_index);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Copy a 4kB page of main memory to an expanded memory page
+ * Arguments
+ * page_addr: address of source page
+ * xpage_index: index of expandeded memory page
+ * Return value
+ * 0: if operation succeeds
+ * -EIO: if pgout failed
+ * -ENXIO: if xpram has vanished
+ */
+static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
+{
+ int cc;
+
+ __asm__ __volatile__ (
+ " lhi %0,2\n" /* return unused cc 2 if pgout traps */
+ " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+#ifndef CONFIG_ARCH_S390X
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+#endif
+ : "=&d" (cc)
+ : "a" (__pa(page_addr)), "a" (xpage_index)
+ : "cc" );
+ if (cc == 3)
+ return -ENXIO;
+ if (cc == 2) {
+ PRINT_ERR("expanded storage lost!\n");
+ return -ENXIO;
+ }
+ if (cc == 1) {
+ PRINT_ERR("page out failed for page index %u.\n",
+ xpage_index);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Check if xpram is available.
+ */
+static int __init xpram_present(void)
+{
+ unsigned long mem_page;
+ int rc;
+
+ mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (!mem_page)
+ return -ENOMEM;
+ rc = xpram_page_in(mem_page, 0);
+ free_page(mem_page);
+ return rc ? -ENXIO : 0;
+}
+
+/*
+ * Return index of the last available xpram page.
+ */
+static unsigned long __init xpram_highest_page_index(void)
+{
+ unsigned int page_index, add_bit;
+ unsigned long mem_page;
+
+ mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (!mem_page)
+ return 0;
+
+ page_index = 0;
+ add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
+ while (add_bit > 0) {
+ if (xpram_page_in(mem_page, page_index | add_bit) == 0)
+ page_index |= add_bit;
+ add_bit >>= 1;
+ }
+
+ free_page (mem_page);
+
+ return page_index;
+}
+
+/*
+ * Block device make request function.
+ */
+static int xpram_make_request(request_queue_t *q, struct bio *bio)
+{
+ xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
+ struct bio_vec *bvec;
+ unsigned int index;
+ unsigned long page_addr;
+ unsigned long bytes;
+ int i;
+
+ if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ /* Request is not page-aligned. */
+ goto fail;
+ if ((bio->bi_size >> 12) > xdev->size)
+ /* Request size is no page-aligned. */
+ goto fail;
+ if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+ goto fail;
+ index = (bio->bi_sector >> 3) + xdev->offset;
+ bio_for_each_segment(bvec, bio, i) {
+ page_addr = (unsigned long)
+ kmap(bvec->bv_page) + bvec->bv_offset;
+ bytes = bvec->bv_len;
+ if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
+ /* More paranoia. */
+ goto fail;
+ while (bytes > 0) {
+ if (bio_data_dir(bio) == READ) {
+ if (xpram_page_in(page_addr, index) != 0)
+ goto fail;
+ } else {
+ if (xpram_page_out(page_addr, index) != 0)
+ goto fail;
+ }
+ page_addr += 4096;
+ bytes -= 4096;
+ index++;
+ }
+ }
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bytes = bio->bi_size;
+ bio->bi_size = 0;
+ bio->bi_end_io(bio, bytes, 0);
+ return 0;
+fail:
+ bio_io_error(bio, bio->bi_size);
+ return 0;
+}
+
+static int xpram_ioctl (struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct hd_geometry __user *geo;
+ unsigned long size;
+ if (cmd != HDIO_GETGEO)
+ return -EINVAL;
+ /*
+ * get geometry: we have to fake one... trim the size to a
+ * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
+ * whatever cylinders. Tell also that data starts at sector. 4.
+ */
+ geo = (struct hd_geometry __user *) arg;
+ size = (xpram_pages * 8) & ~0x3f;
+ put_user(size >> 6, &geo->cylinders);
+ put_user(4, &geo->heads);
+ put_user(16, &geo->sectors);
+ put_user(4, &geo->start);
+ return 0;
+}
+
+static struct block_device_operations xpram_devops =
+{
+ .owner = THIS_MODULE,
+ .ioctl = xpram_ioctl,
+};
+
+/*
+ * Setup xpram_sizes array.
+ */
+static int __init xpram_setup_sizes(unsigned long pages)
+{
+ unsigned long mem_needed;
+ unsigned long mem_auto;
+ int mem_auto_no;
+ int i;
+
+ /* Check number of devices. */
+ if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
+ PRINT_ERR("invalid number %d of devices\n",devs);
+ return -EINVAL;
+ }
+ xpram_devs = devs;
+
+ /*
+ * Copy sizes array to xpram_sizes and align partition
+ * sizes to page boundary.
+ */
+ mem_needed = 0;
+ mem_auto_no = 0;
+ for (i = 0; i < xpram_devs; i++) {
+ xpram_sizes[i] = (sizes[i] + 3) & -4UL;
+ if (xpram_sizes[i])
+ mem_needed += xpram_sizes[i];
+ else
+ mem_auto_no++;
+ }
+
+ PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs);
+ for (i = 0; i < xpram_devs; i++) {
+ if (xpram_sizes[i])
+ PRINT_INFO(" size of partition %d: %u kB\n",
+ i, xpram_sizes[i]);
+ else
+ PRINT_INFO(" size of partition %d to be set "
+ "automatically\n",i);
+ }
+ PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n",
+ mem_needed);
+ PRINT_DEBUG(" partitions to be sized automatically: %d\n",
+ mem_auto_no);
+
+ if (mem_needed > pages * 4) {
+ PRINT_ERR("Not enough expanded memory available\n");
+ return -EINVAL;
+ }
+
+ /*
+ * partitioning:
+ * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
+ * else: ; all partitions with zero xpram_sizes[i]
+ * partition equally the remaining space
+ */
+ if (mem_auto_no) {
+ mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
+ PRINT_INFO(" automatically determined "
+ "partition size: %lu kB\n", mem_auto);
+ for (i = 0; i < xpram_devs; i++)
+ if (xpram_sizes[i] == 0)
+ xpram_sizes[i] = mem_auto;
+ }
+ return 0;
+}
+
+static struct request_queue *xpram_queue;
+
+static int __init xpram_setup_blkdev(void)
+{
+ unsigned long offset;
+ int i, rc = -ENOMEM;
+
+ for (i = 0; i < xpram_devs; i++) {
+ struct gendisk *disk = alloc_disk(1);
+ if (!disk)
+ goto out;
+ xpram_disks[i] = disk;
+ }
+
+ /*
+ * Register xpram major.
+ */
+ rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+ if (rc < 0)
+ goto out;
+
+ devfs_mk_dir("slram");
+
+ /*
+ * Assign the other needed values: make request function, sizes and
+ * hardsect size. All the minor devices feature the same value.
+ */
+ xpram_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!xpram_queue) {
+ rc = -ENOMEM;
+ goto out_unreg;
+ }
+ blk_queue_make_request(xpram_queue, xpram_make_request);
+ blk_queue_hardsect_size(xpram_queue, 4096);
+
+ /*
+ * Setup device structures.
+ */
+ offset = 0;
+ for (i = 0; i < xpram_devs; i++) {
+ struct gendisk *disk = xpram_disks[i];
+
+ xpram_devices[i].size = xpram_sizes[i] / 4;
+ xpram_devices[i].offset = offset;
+ offset += xpram_devices[i].size;
+ disk->major = XPRAM_MAJOR;
+ disk->first_minor = i;
+ disk->fops = &xpram_devops;
+ disk->private_data = &xpram_devices[i];
+ disk->queue = xpram_queue;
+ sprintf(disk->disk_name, "slram%d", i);
+ sprintf(disk->devfs_name, "slram/%d", i);
+ set_capacity(disk, xpram_sizes[i] << 1);
+ add_disk(disk);
+ }
+
+ return 0;
+out_unreg:
+ devfs_remove("slram");
+ unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+out:
+ while (i--)
+ put_disk(xpram_disks[i]);
+ return rc;
+}
+
+/*
+ * Finally, the init/exit functions.
+ */
+static void __exit xpram_exit(void)
+{
+ int i;
+ for (i = 0; i < xpram_devs; i++) {
+ del_gendisk(xpram_disks[i]);
+ put_disk(xpram_disks[i]);
+ }
+ unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+ devfs_remove("slram");
+ blk_cleanup_queue(xpram_queue);
+ sysdev_unregister(&xpram_sys_device);
+ sysdev_class_unregister(&xpram_sysclass);
+}
+
+static int __init xpram_init(void)
+{
+ int rc;
+
+ /* Find out size of expanded memory. */
+ if (xpram_present() != 0) {
+ PRINT_WARN("No expanded memory available\n");
+ return -ENODEV;
+ }
+ xpram_pages = xpram_highest_page_index();
+ PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
+ xpram_pages, (unsigned long) xpram_pages*4);
+ rc = xpram_setup_sizes(xpram_pages);
+ if (rc)
+ return rc;
+ rc = sysdev_class_register(&xpram_sysclass);
+ if (rc)
+ return rc;
+
+ rc = sysdev_register(&xpram_sys_device);
+ if (rc) {
+ sysdev_class_unregister(&xpram_sysclass);
+ return rc;
+ }
+ rc = xpram_setup_blkdev();
+ if (rc)
+ sysdev_unregister(&xpram_sys_device);
+ return rc;
+}
+
+module_init(xpram_init);
+module_exit(xpram_exit);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
new file mode 100644
index 000000000000..14e8cce9f862
--- /dev/null
+++ b/drivers/s390/char/Makefile
@@ -0,0 +1,28 @@
+#
+# S/390 character devices
+#
+
+obj-y += ctrlchar.o keyboard.o defkeymap.o
+
+obj-$(CONFIG_TN3270) += raw3270.o
+obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
+obj-$(CONFIG_TN3270_TTY) += tty3270.o
+obj-$(CONFIG_TN3270_FS) += fs3270.o
+
+obj-$(CONFIG_TN3215) += con3215.o
+
+obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
+obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
+obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
+obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
+obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
+
+obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
+obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
+
+tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
+tape-$(CONFIG_PROC_FS) += tape_proc.o
+tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
+obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
+obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
+obj-$(CONFIG_MONREADER) += monreader.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
new file mode 100644
index 000000000000..022f17bff731
--- /dev/null
+++ b/drivers/s390/char/con3215.c
@@ -0,0 +1,1192 @@
+/*
+ * drivers/s390/char/con3215.c
+ * 3215 line mode terminal driver.
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ * Updated:
+ * Aug-2000: Added tab support
+ * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/io.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/cpcmd.h>
+#include <asm/setup.h>
+
+#include "ctrlchar.h"
+
+#define NR_3215 1
+#define NR_3215_REQ (4*NR_3215)
+#define RAW3215_BUFFER_SIZE 65536 /* output buffer size */
+#define RAW3215_INBUF_SIZE 256 /* input buffer size */
+#define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */
+#define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */
+#define RAW3215_MAX_BYTES 3968 /* max. bytes to write with one ssch */
+#define RAW3215_MAX_NEWLINE 50 /* max. lines to write with one ssch */
+#define RAW3215_NR_CCWS 3
+#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
+
+#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
+#define RAW3215_ACTIVE 2 /* set if the device is in use */
+#define RAW3215_WORKING 4 /* set if a request is being worked on */
+#define RAW3215_THROTTLED 8 /* set if reading is disabled */
+#define RAW3215_STOPPED 16 /* set if writing is disabled */
+#define RAW3215_CLOSING 32 /* set while in close process */
+#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
+#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
+
+#define TAB_STOP_SIZE 8 /* tab stop size */
+
+/*
+ * Request types for a 3215 device
+ */
+enum raw3215_type {
+ RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
+};
+
+/*
+ * Request structure for a 3215 device
+ */
+struct raw3215_req {
+ enum raw3215_type type; /* type of the request */
+ int start, len; /* start index & len in output buffer */
+ int delayable; /* indication to wait for more data */
+ int residual; /* residual count for read request */
+ struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
+ struct raw3215_info *info; /* pointer to main structure */
+ struct raw3215_req *next; /* pointer to next request */
+} __attribute__ ((aligned(8)));
+
+struct raw3215_info {
+ struct ccw_device *cdev; /* device for tty driver */
+ spinlock_t *lock; /* pointer to irq lock */
+ int flags; /* state flags */
+ char *buffer; /* pointer to output buffer */
+ char *inbuf; /* pointer to input buffer */
+ int head; /* first free byte in output buffer */
+ int count; /* number of bytes in output buffer */
+ int written; /* number of bytes in write requests */
+ struct tty_struct *tty; /* pointer to tty structure if present */
+ struct tasklet_struct tasklet;
+ struct raw3215_req *queued_read; /* pointer to queued read requests */
+ struct raw3215_req *queued_write;/* pointer to queued write requests */
+ wait_queue_head_t empty_wait; /* wait queue for flushing */
+ struct timer_list timer; /* timer for delayed output */
+ char *message; /* pending message from raw3215_irq */
+ int msg_dstat; /* dstat for pending message */
+ int msg_cstat; /* cstat for pending message */
+ int line_pos; /* position on the line (for tabs) */
+ char ubuffer[80]; /* copy_from_user buffer */
+};
+
+/* array of 3215 devices structures */
+static struct raw3215_info *raw3215[NR_3215];
+/* spinlock to protect the raw3215 array */
+static DEFINE_SPINLOCK(raw3215_device_lock);
+/* list of free request structures */
+static struct raw3215_req *raw3215_freelist;
+/* spinlock to protect free list */
+static spinlock_t raw3215_freelist_lock;
+
+static struct tty_driver *tty3215_driver;
+
+/*
+ * Get a request structure from the free list
+ */
+static inline struct raw3215_req *
+raw3215_alloc_req(void) {
+ struct raw3215_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&raw3215_freelist_lock, flags);
+ req = raw3215_freelist;
+ raw3215_freelist = req->next;
+ spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+ return req;
+}
+
+/*
+ * Put a request structure back to the free list
+ */
+static inline void
+raw3215_free_req(struct raw3215_req *req) {
+ unsigned long flags;
+
+ if (req->type == RAW3215_FREE)
+ return; /* don't free a free request */
+ req->type = RAW3215_FREE;
+ spin_lock_irqsave(&raw3215_freelist_lock, flags);
+ req->next = raw3215_freelist;
+ raw3215_freelist = req;
+ spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+}
+
+/*
+ * Set up a read request that reads up to 160 byte from the 3215 device.
+ * If there is a queued read request it is used, but that shouldn't happen
+ * because a 3215 terminal won't accept a new read before the old one is
+ * completed.
+ */
+static void
+raw3215_mk_read_req(struct raw3215_info *raw)
+{
+ struct raw3215_req *req;
+ struct ccw1 *ccw;
+
+ /* there can only be ONE read request at a time */
+ req = raw->queued_read;
+ if (req == NULL) {
+ /* no queued read request, use new req structure */
+ req = raw3215_alloc_req();
+ req->type = RAW3215_READ;
+ req->info = raw;
+ raw->queued_read = req;
+ }
+
+ ccw = req->ccws;
+ ccw->cmd_code = 0x0A; /* read inquiry */
+ ccw->flags = 0x20; /* ignore incorrect length */
+ ccw->count = 160;
+ ccw->cda = (__u32) __pa(raw->inbuf);
+}
+
+/*
+ * Set up a write request with the information from the main structure.
+ * A ccw chain is created that writes as much as possible from the output
+ * buffer to the 3215 device. If a queued write exists it is replaced by
+ * the new, probably lengthened request.
+ */
+static void
+raw3215_mk_write_req(struct raw3215_info *raw)
+{
+ struct raw3215_req *req;
+ struct ccw1 *ccw;
+ int len, count, ix, lines;
+
+ if (raw->count <= raw->written)
+ return;
+ /* check if there is a queued write request */
+ req = raw->queued_write;
+ if (req == NULL) {
+ /* no queued write request, use new req structure */
+ req = raw3215_alloc_req();
+ req->type = RAW3215_WRITE;
+ req->info = raw;
+ raw->queued_write = req;
+ } else {
+ raw->written -= req->len;
+ }
+
+ ccw = req->ccws;
+ req->start = (raw->head - raw->count + raw->written) &
+ (RAW3215_BUFFER_SIZE - 1);
+ /*
+ * now we have to count newlines. We can at max accept
+ * RAW3215_MAX_NEWLINE newlines in a single ssch due to
+ * a restriction in VM
+ */
+ lines = 0;
+ ix = req->start;
+ while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
+ if (raw->buffer[ix] == 0x15)
+ lines++;
+ ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+ }
+ len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
+ if (len > RAW3215_MAX_BYTES)
+ len = RAW3215_MAX_BYTES;
+ req->len = len;
+ raw->written += len;
+
+ /* set the indication if we should try to enlarge this request */
+ req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
+
+ ix = req->start;
+ while (len > 0) {
+ if (ccw > req->ccws)
+ ccw[-1].flags |= 0x40; /* use command chaining */
+ ccw->cmd_code = 0x01; /* write, auto carrier return */
+ ccw->flags = 0x20; /* ignore incorrect length ind. */
+ ccw->cda =
+ (__u32) __pa(raw->buffer + ix);
+ count = len;
+ if (ix + count > RAW3215_BUFFER_SIZE)
+ count = RAW3215_BUFFER_SIZE - ix;
+ ccw->count = count;
+ len -= count;
+ ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
+ ccw++;
+ }
+ /*
+ * Add a NOP to the channel program. 3215 devices are purely
+ * emulated and its much better to avoid the channel end
+ * interrupt in this case.
+ */
+ if (ccw > req->ccws)
+ ccw[-1].flags |= 0x40; /* use command chaining */
+ ccw->cmd_code = 0x03; /* NOP */
+ ccw->flags = 0;
+ ccw->cda = 0;
+ ccw->count = 1;
+}
+
+/*
+ * Start a read or a write request
+ */
+static void
+raw3215_start_io(struct raw3215_info *raw)
+{
+ struct raw3215_req *req;
+ int res;
+
+ req = raw->queued_read;
+ if (req != NULL &&
+ !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
+ /* dequeue request */
+ raw->queued_read = NULL;
+ res = ccw_device_start(raw->cdev, req->ccws,
+ (unsigned long) req, 0, 0);
+ if (res != 0) {
+ /* do_IO failed, put request back to queue */
+ raw->queued_read = req;
+ } else {
+ raw->flags |= RAW3215_WORKING;
+ }
+ }
+ req = raw->queued_write;
+ if (req != NULL &&
+ !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
+ /* dequeue request */
+ raw->queued_write = NULL;
+ res = ccw_device_start(raw->cdev, req->ccws,
+ (unsigned long) req, 0, 0);
+ if (res != 0) {
+ /* do_IO failed, put request back to queue */
+ raw->queued_write = req;
+ } else {
+ raw->flags |= RAW3215_WORKING;
+ }
+ }
+}
+
+/*
+ * Function to start a delayed output after RAW3215_TIMEOUT seconds
+ */
+static void
+raw3215_timeout(unsigned long __data)
+{
+ struct raw3215_info *raw = (struct raw3215_info *) __data;
+ unsigned long flags;
+
+ spin_lock_irqsave(raw->lock, flags);
+ if (raw->flags & RAW3215_TIMER_RUNS) {
+ del_timer(&raw->timer);
+ raw->flags &= ~RAW3215_TIMER_RUNS;
+ raw3215_mk_write_req(raw);
+ raw3215_start_io(raw);
+ }
+ spin_unlock_irqrestore(raw->lock, flags);
+}
+
+/*
+ * Function to conditionally start an IO. A read is started immediately,
+ * a write is only started immediately if the flush flag is on or the
+ * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
+ * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
+ */
+static inline void
+raw3215_try_io(struct raw3215_info *raw)
+{
+ if (!(raw->flags & RAW3215_ACTIVE))
+ return;
+ if (raw->queued_read != NULL)
+ raw3215_start_io(raw);
+ else if (raw->queued_write != NULL) {
+ if ((raw->queued_write->delayable == 0) ||
+ (raw->flags & RAW3215_FLUSHING)) {
+ /* execute write requests bigger than minimum size */
+ raw3215_start_io(raw);
+ if (raw->flags & RAW3215_TIMER_RUNS) {
+ del_timer(&raw->timer);
+ raw->flags &= ~RAW3215_TIMER_RUNS;
+ }
+ } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
+ /* delay small writes */
+ init_timer(&raw->timer);
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+ raw->timer.data = (unsigned long) raw;
+ raw->timer.function = raw3215_timeout;
+ add_timer(&raw->timer);
+ raw->flags |= RAW3215_TIMER_RUNS;
+ }
+ }
+}
+
+/*
+ * The bottom half handler routine for 3215 devices. It tries to start
+ * the next IO and wakes up processes waiting on the tty.
+ */
+static void
+raw3215_tasklet(void *data)
+{
+ struct raw3215_info *raw;
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ raw = (struct raw3215_info *) data;
+ spin_lock_irqsave(raw->lock, flags);
+ raw3215_mk_write_req(raw);
+ raw3215_try_io(raw);
+ spin_unlock_irqrestore(raw->lock, flags);
+ /* Check for pending message from raw3215_irq */
+ if (raw->message != NULL) {
+ printk(raw->message, raw->msg_dstat, raw->msg_cstat);
+ raw->message = NULL;
+ }
+ tty = raw->tty;
+ if (tty != NULL &&
+ RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
+ tty_wakeup(tty);
+ }
+}
+
+/*
+ * Interrupt routine, called from common io layer
+ */
+static void
+raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct raw3215_info *raw;
+ struct raw3215_req *req;
+ struct tty_struct *tty;
+ int cstat, dstat;
+ int count, slen;
+
+ raw = cdev->dev.driver_data;
+ req = (struct raw3215_req *) intparm;
+ cstat = irb->scsw.cstat;
+ dstat = irb->scsw.dstat;
+ if (cstat != 0) {
+ raw->message = KERN_WARNING
+ "Got nonzero channel status in raw3215_irq "
+ "(dev sts 0x%2x, sch sts 0x%2x)";
+ raw->msg_dstat = dstat;
+ raw->msg_cstat = cstat;
+ tasklet_schedule(&raw->tasklet);
+ }
+ if (dstat & 0x01) { /* we got a unit exception */
+ dstat &= ~0x01; /* we can ignore it */
+ }
+ switch (dstat) {
+ case 0x80:
+ if (cstat != 0)
+ break;
+ /* Attention interrupt, someone hit the enter key */
+ raw3215_mk_read_req(raw);
+ if (MACHINE_IS_P390)
+ memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
+ tasklet_schedule(&raw->tasklet);
+ break;
+ case 0x08:
+ case 0x0C:
+ /* Channel end interrupt. */
+ if ((raw = req->info) == NULL)
+ return; /* That shouldn't happen ... */
+ if (req->type == RAW3215_READ) {
+ /* store residual count, then wait for device end */
+ req->residual = irb->scsw.count;
+ }
+ if (dstat == 0x08)
+ break;
+ case 0x04:
+ /* Device end interrupt. */
+ if ((raw = req->info) == NULL)
+ return; /* That shouldn't happen ... */
+ if (req->type == RAW3215_READ && raw->tty != NULL) {
+ unsigned int cchar;
+
+ tty = raw->tty;
+ count = 160 - req->residual;
+ if (MACHINE_IS_P390) {
+ slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
+ if (count > slen)
+ count = slen;
+ } else
+ if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
+ count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
+ EBCASC(raw->inbuf, count);
+ cchar = ctrlchar_handle(raw->inbuf, count, tty);
+ switch (cchar & CTRLCHAR_MASK) {
+ case CTRLCHAR_SYSRQ:
+ break;
+
+ case CTRLCHAR_CTRL:
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = cchar;
+ tty_flip_buffer_push(raw->tty);
+ break;
+
+ case CTRLCHAR_NONE:
+ memcpy(tty->flip.char_buf_ptr,
+ raw->inbuf, count);
+ if (count < 2 ||
+ (strncmp(raw->inbuf+count-2, "^n", 2) &&
+ strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
+ /* don't add the auto \n */
+ tty->flip.char_buf_ptr[count] = '\n';
+ memset(tty->flip.flag_buf_ptr,
+ TTY_NORMAL, count + 1);
+ count++;
+ } else
+ count-=2;
+ tty->flip.char_buf_ptr += count;
+ tty->flip.flag_buf_ptr += count;
+ tty->flip.count += count;
+ tty_flip_buffer_push(raw->tty);
+ break;
+ }
+ } else if (req->type == RAW3215_WRITE) {
+ raw->count -= req->len;
+ raw->written -= req->len;
+ }
+ raw->flags &= ~RAW3215_WORKING;
+ raw3215_free_req(req);
+ /* check for empty wait */
+ if (waitqueue_active(&raw->empty_wait) &&
+ raw->queued_write == NULL &&
+ raw->queued_read == NULL) {
+ wake_up_interruptible(&raw->empty_wait);
+ }
+ tasklet_schedule(&raw->tasklet);
+ break;
+ default:
+ /* Strange interrupt, I'll do my best to clean up */
+ if (req != NULL && req->type != RAW3215_FREE) {
+ if (req->type == RAW3215_WRITE) {
+ raw->count -= req->len;
+ raw->written -= req->len;
+ }
+ raw->flags &= ~RAW3215_WORKING;
+ raw3215_free_req(req);
+ }
+ raw->message = KERN_WARNING
+ "Spurious interrupt in in raw3215_irq "
+ "(dev sts 0x%2x, sch sts 0x%2x)";
+ raw->msg_dstat = dstat;
+ raw->msg_cstat = cstat;
+ tasklet_schedule(&raw->tasklet);
+ }
+ return;
+}
+
+/*
+ * Wait until length bytes are available int the output buffer.
+ * Has to be called with the s390irq lock held. Can be called
+ * disabled.
+ */
+static void
+raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+{
+ while (RAW3215_BUFFER_SIZE - raw->count < length) {
+ /* there might be a request pending */
+ raw->flags |= RAW3215_FLUSHING;
+ raw3215_mk_write_req(raw);
+ raw3215_try_io(raw);
+ raw->flags &= ~RAW3215_FLUSHING;
+#ifdef CONFIG_TN3215_CONSOLE
+ wait_cons_dev();
+#endif
+ /* Enough room freed up ? */
+ if (RAW3215_BUFFER_SIZE - raw->count >= length)
+ break;
+ /* there might be another cpu waiting for the lock */
+ spin_unlock(raw->lock);
+ udelay(100);
+ spin_lock(raw->lock);
+ }
+}
+
+/*
+ * String write routine for 3215 devices
+ */
+static void
+raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
+{
+ unsigned long flags;
+ int c, count;
+
+ while (length > 0) {
+ spin_lock_irqsave(raw->lock, flags);
+ count = (length > RAW3215_BUFFER_SIZE) ?
+ RAW3215_BUFFER_SIZE : length;
+ length -= count;
+
+ raw3215_make_room(raw, count);
+
+ /* copy string to output buffer and convert it to EBCDIC */
+ while (1) {
+ c = min_t(int, count,
+ min(RAW3215_BUFFER_SIZE - raw->count,
+ RAW3215_BUFFER_SIZE - raw->head));
+ if (c <= 0)
+ break;
+ memcpy(raw->buffer + raw->head, str, c);
+ ASCEBC(raw->buffer + raw->head, c);
+ raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
+ raw->count += c;
+ raw->line_pos += c;
+ str += c;
+ count -= c;
+ }
+ if (!(raw->flags & RAW3215_WORKING)) {
+ raw3215_mk_write_req(raw);
+ /* start or queue request */
+ raw3215_try_io(raw);
+ }
+ spin_unlock_irqrestore(raw->lock, flags);
+ }
+}
+
+/*
+ * Put character routine for 3215 devices
+ */
+static void
+raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+{
+ unsigned long flags;
+ unsigned int length, i;
+
+ spin_lock_irqsave(raw->lock, flags);
+ if (ch == '\t') {
+ length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
+ raw->line_pos += length;
+ ch = ' ';
+ } else if (ch == '\n') {
+ length = 1;
+ raw->line_pos = 0;
+ } else {
+ length = 1;
+ raw->line_pos++;
+ }
+ raw3215_make_room(raw, length);
+
+ for (i = 0; i < length; i++) {
+ raw->buffer[raw->head] = (char) _ascebc[(int) ch];
+ raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
+ raw->count++;
+ }
+ if (!(raw->flags & RAW3215_WORKING)) {
+ raw3215_mk_write_req(raw);
+ /* start or queue request */
+ raw3215_try_io(raw);
+ }
+ spin_unlock_irqrestore(raw->lock, flags);
+}
+
+/*
+ * Flush routine, it simply sets the flush flag and tries to start
+ * pending IO.
+ */
+static void
+raw3215_flush_buffer(struct raw3215_info *raw)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(raw->lock, flags);
+ if (raw->count > 0) {
+ raw->flags |= RAW3215_FLUSHING;
+ raw3215_try_io(raw);
+ raw->flags &= ~RAW3215_FLUSHING;
+ }
+ spin_unlock_irqrestore(raw->lock, flags);
+}
+
+/*
+ * Fire up a 3215 device.
+ */
+static int
+raw3215_startup(struct raw3215_info *raw)
+{
+ unsigned long flags;
+
+ if (raw->flags & RAW3215_ACTIVE)
+ return 0;
+ raw->line_pos = 0;
+ raw->flags |= RAW3215_ACTIVE;
+ spin_lock_irqsave(raw->lock, flags);
+ raw3215_try_io(raw);
+ spin_unlock_irqrestore(raw->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Shutdown a 3215 device.
+ */
+static void
+raw3215_shutdown(struct raw3215_info *raw)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+
+ if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
+ return;
+ /* Wait for outstanding requests, then free irq */
+ spin_lock_irqsave(raw->lock, flags);
+ if ((raw->flags & RAW3215_WORKING) ||
+ raw->queued_write != NULL ||
+ raw->queued_read != NULL) {
+ raw->flags |= RAW3215_CLOSING;
+ add_wait_queue(&raw->empty_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(raw->lock, flags);
+ schedule();
+ spin_lock_irqsave(raw->lock, flags);
+ remove_wait_queue(&raw->empty_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
+ }
+ spin_unlock_irqrestore(raw->lock, flags);
+}
+
+static int
+raw3215_probe (struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+ int line;
+
+ raw = kmalloc(sizeof(struct raw3215_info) +
+ RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
+ if (raw == NULL)
+ return -ENOMEM;
+
+ spin_lock(&raw3215_device_lock);
+ for (line = 0; line < NR_3215; line++) {
+ if (!raw3215[line]) {
+ raw3215[line] = raw;
+ break;
+ }
+ }
+ spin_unlock(&raw3215_device_lock);
+ if (line == NR_3215) {
+ kfree(raw);
+ return -ENODEV;
+ }
+
+ raw->cdev = cdev;
+ raw->lock = get_ccwdev_lock(cdev);
+ raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
+ memset(raw, 0, sizeof(struct raw3215_info));
+ raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
+ GFP_KERNEL|GFP_DMA);
+ if (raw->buffer == NULL) {
+ spin_lock(&raw3215_device_lock);
+ raw3215[line] = 0;
+ spin_unlock(&raw3215_device_lock);
+ kfree(raw);
+ return -ENOMEM;
+ }
+ tasklet_init(&raw->tasklet,
+ (void (*)(unsigned long)) raw3215_tasklet,
+ (unsigned long) raw);
+ init_waitqueue_head(&raw->empty_wait);
+
+ cdev->dev.driver_data = raw;
+ cdev->handler = raw3215_irq;
+
+ return 0;
+}
+
+static void
+raw3215_remove (struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+
+ ccw_device_set_offline(cdev);
+ raw = cdev->dev.driver_data;
+ if (raw) {
+ cdev->dev.driver_data = NULL;
+ if (raw->buffer)
+ kfree(raw->buffer);
+ kfree(raw);
+ }
+}
+
+static int
+raw3215_set_online (struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+
+ raw = cdev->dev.driver_data;
+ if (!raw)
+ return -ENODEV;
+
+ return raw3215_startup(raw);
+}
+
+static int
+raw3215_set_offline (struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+
+ raw = cdev->dev.driver_data;
+ if (!raw)
+ return -ENODEV;
+
+ raw3215_shutdown(raw);
+
+ return 0;
+}
+
+static struct ccw_device_id raw3215_id[] = {
+ { CCW_DEVICE(0x3215, 0) },
+ { /* end of list */ },
+};
+
+static struct ccw_driver raw3215_ccw_driver = {
+ .name = "3215",
+ .owner = THIS_MODULE,
+ .ids = raw3215_id,
+ .probe = &raw3215_probe,
+ .remove = &raw3215_remove,
+ .set_online = &raw3215_set_online,
+ .set_offline = &raw3215_set_offline,
+};
+
+#ifdef CONFIG_TN3215_CONSOLE
+/*
+ * Write a string to the 3215 console
+ */
+static void
+con3215_write(struct console *co, const char *str, unsigned int count)
+{
+ struct raw3215_info *raw;
+ int i;
+
+ if (count <= 0)
+ return;
+ raw = raw3215[0]; /* console 3215 is the first one */
+ while (count > 0) {
+ for (i = 0; i < count; i++)
+ if (str[i] == '\t' || str[i] == '\n')
+ break;
+ raw3215_write(raw, str, i);
+ count -= i;
+ str += i;
+ if (count > 0) {
+ raw3215_putchar(raw, *str);
+ count--;
+ str++;
+ }
+ }
+}
+
+static struct tty_driver *con3215_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return tty3215_driver;
+}
+
+/*
+ * panic() calls console_unblank before the system enters a
+ * disabled, endless loop.
+ */
+static void
+con3215_unblank(void)
+{
+ struct raw3215_info *raw;
+ unsigned long flags;
+
+ raw = raw3215[0]; /* console 3215 is the first one */
+ spin_lock_irqsave(raw->lock, flags);
+ raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+ spin_unlock_irqrestore(raw->lock, flags);
+}
+
+static int __init
+con3215_consetup(struct console *co, char *options)
+{
+ return 0;
+}
+
+/*
+ * The console structure for the 3215 console
+ */
+static struct console con3215 = {
+ .name = "ttyS",
+ .write = con3215_write,
+ .device = con3215_device,
+ .unblank = con3215_unblank,
+ .setup = con3215_consetup,
+ .flags = CON_PRINTBUFFER,
+};
+
+/*
+ * 3215 console initialization code called from console_init().
+ * NOTE: This is called before kmalloc is available.
+ */
+static int __init
+con3215_init(void)
+{
+ struct ccw_device *cdev;
+ struct raw3215_info *raw;
+ struct raw3215_req *req;
+ int i;
+
+ /* Check if 3215 is to be the console */
+ if (!CONSOLE_IS_3215)
+ return -ENODEV;
+
+ /* Set the console mode for VM */
+ if (MACHINE_IS_VM) {
+ cpcmd("TERM CONMODE 3215", NULL, 0);
+ cpcmd("TERM AUTOCR OFF", NULL, 0);
+ }
+
+ /* allocate 3215 request structures */
+ raw3215_freelist = NULL;
+ spin_lock_init(&raw3215_freelist_lock);
+ for (i = 0; i < NR_3215_REQ; i++) {
+ req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
+ req->next = raw3215_freelist;
+ raw3215_freelist = req;
+ }
+
+ cdev = ccw_device_probe_console();
+ if (!cdev)
+ return -ENODEV;
+
+ raw3215[0] = raw = (struct raw3215_info *)
+ alloc_bootmem_low(sizeof(struct raw3215_info));
+ memset(raw, 0, sizeof(struct raw3215_info));
+ raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
+ raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
+ raw->cdev = cdev;
+ raw->lock = get_ccwdev_lock(cdev);
+ cdev->dev.driver_data = raw;
+ cdev->handler = raw3215_irq;
+
+ raw->flags |= RAW3215_FIXED;
+ tasklet_init(&raw->tasklet,
+ (void (*)(unsigned long)) raw3215_tasklet,
+ (unsigned long) raw);
+ init_waitqueue_head(&raw->empty_wait);
+
+ /* Request the console irq */
+ if (raw3215_startup(raw) != 0) {
+ free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
+ free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
+ free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
+ raw3215[0] = NULL;
+ printk("Couldn't find a 3215 console device\n");
+ return -ENODEV;
+ }
+ register_console(&con3215);
+ return 0;
+}
+console_initcall(con3215_init);
+#endif
+
+/*
+ * tty3215_open
+ *
+ * This routine is called whenever a 3215 tty is opened.
+ */
+static int
+tty3215_open(struct tty_struct *tty, struct file * filp)
+{
+ struct raw3215_info *raw;
+ int retval, line;
+
+ line = tty->index;
+ if ((line < 0) || (line >= NR_3215))
+ return -ENODEV;
+
+ raw = raw3215[line];
+ if (raw == NULL)
+ return -ENODEV;
+
+ tty->driver_data = raw;
+ raw->tty = tty;
+
+ tty->low_latency = 0; /* don't use bottom half for pushing chars */
+ /*
+ * Start up 3215 device
+ */
+ retval = raw3215_startup(raw);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+/*
+ * tty3215_close()
+ *
+ * This routine is called when the 3215 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void
+tty3215_close(struct tty_struct *tty, struct file * filp)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ if (raw == NULL || tty->count > 1)
+ return;
+ tty->closing = 1;
+ /* Shutdown the terminal */
+ raw3215_shutdown(raw);
+ tty->closing = 0;
+ raw->tty = NULL;
+}
+
+/*
+ * Returns the amount of free space in the output buffer.
+ */
+static int
+tty3215_write_room(struct tty_struct *tty)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+
+ /* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
+ if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
+ return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
+ else
+ return 0;
+}
+
+/*
+ * String write routine for 3215 ttys
+ */
+static int
+tty3215_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ struct raw3215_info *raw;
+
+ if (!tty)
+ return 0;
+ raw = (struct raw3215_info *) tty->driver_data;
+ raw3215_write(raw, buf, count);
+ return count;
+}
+
+/*
+ * Put character routine for 3215 ttys
+ */
+static void
+tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct raw3215_info *raw;
+
+ if (!tty)
+ return;
+ raw = (struct raw3215_info *) tty->driver_data;
+ raw3215_putchar(raw, ch);
+}
+
+static void
+tty3215_flush_chars(struct tty_struct *tty)
+{
+}
+
+/*
+ * Returns the number of characters in the output buffer
+ */
+static int
+tty3215_chars_in_buffer(struct tty_struct *tty)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ return raw->count;
+}
+
+static void
+tty3215_flush_buffer(struct tty_struct *tty)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ raw3215_flush_buffer(raw);
+ tty_wakeup(tty);
+}
+
+/*
+ * Currently we don't have any io controls for 3215 ttys
+ */
+static int
+tty3215_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ switch (cmd) {
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/*
+ * Disable reading from a 3215 tty
+ */
+static void
+tty3215_throttle(struct tty_struct * tty)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ raw->flags |= RAW3215_THROTTLED;
+}
+
+/*
+ * Enable reading from a 3215 tty
+ */
+static void
+tty3215_unthrottle(struct tty_struct * tty)
+{
+ struct raw3215_info *raw;
+ unsigned long flags;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ if (raw->flags & RAW3215_THROTTLED) {
+ spin_lock_irqsave(raw->lock, flags);
+ raw->flags &= ~RAW3215_THROTTLED;
+ raw3215_try_io(raw);
+ spin_unlock_irqrestore(raw->lock, flags);
+ }
+}
+
+/*
+ * Disable writing to a 3215 tty
+ */
+static void
+tty3215_stop(struct tty_struct *tty)
+{
+ struct raw3215_info *raw;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ raw->flags |= RAW3215_STOPPED;
+}
+
+/*
+ * Enable writing to a 3215 tty
+ */
+static void
+tty3215_start(struct tty_struct *tty)
+{
+ struct raw3215_info *raw;
+ unsigned long flags;
+
+ raw = (struct raw3215_info *) tty->driver_data;
+ if (raw->flags & RAW3215_STOPPED) {
+ spin_lock_irqsave(raw->lock, flags);
+ raw->flags &= ~RAW3215_STOPPED;
+ raw3215_try_io(raw);
+ spin_unlock_irqrestore(raw->lock, flags);
+ }
+}
+
+static struct tty_operations tty3215_ops = {
+ .open = tty3215_open,
+ .close = tty3215_close,
+ .write = tty3215_write,
+ .put_char = tty3215_put_char,
+ .flush_chars = tty3215_flush_chars,
+ .write_room = tty3215_write_room,
+ .chars_in_buffer = tty3215_chars_in_buffer,
+ .flush_buffer = tty3215_flush_buffer,
+ .ioctl = tty3215_ioctl,
+ .throttle = tty3215_throttle,
+ .unthrottle = tty3215_unthrottle,
+ .stop = tty3215_stop,
+ .start = tty3215_start,
+};
+
+/*
+ * 3215 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+int __init
+tty3215_init(void)
+{
+ struct tty_driver *driver;
+ int ret;
+
+ if (!CONSOLE_IS_3215)
+ return 0;
+
+ driver = alloc_tty_driver(NR_3215);
+ if (!driver)
+ return -ENOMEM;
+
+ ret = ccw_driver_register(&raw3215_ccw_driver);
+ if (ret) {
+ put_tty_driver(driver);
+ return ret;
+ }
+ /*
+ * Initialize the tty_driver structure
+ * Entries in tty3215_driver that are NOT initialized:
+ * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+ */
+
+ driver->owner = THIS_MODULE;
+ driver->driver_name = "tty3215";
+ driver->name = "ttyS";
+ driver->major = TTY_MAJOR;
+ driver->minor_start = 64;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_TTY;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+ driver->init_termios.c_oflag = ONLCR | XTABS;
+ driver->init_termios.c_lflag = ISIG;
+ driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(driver, &tty3215_ops);
+ ret = tty_register_driver(driver);
+ if (ret) {
+ printk("Couldn't register tty3215 driver\n");
+ put_tty_driver(driver);
+ return ret;
+ }
+ tty3215_driver = driver;
+ return 0;
+}
+
+static void __exit
+tty3215_exit(void)
+{
+ tty_unregister_driver(tty3215_driver);
+ put_tty_driver(tty3215_driver);
+ ccw_driver_unregister(&raw3215_ccw_driver);
+}
+
+module_init(tty3215_init);
+module_exit(tty3215_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
new file mode 100644
index 000000000000..d52fb57a6b19
--- /dev/null
+++ b/drivers/s390/char/con3270.c
@@ -0,0 +1,638 @@
+/*
+ * drivers/s390/char/con3270.c
+ * IBM/3270 Driver - console view.
+ *
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+
+#include "raw3270.h"
+#include "ctrlchar.h"
+
+#define CON3270_OUTPUT_BUFFER_SIZE 1024
+#define CON3270_STRING_PAGES 4
+
+static struct raw3270_fn con3270_fn;
+
+/*
+ * Main 3270 console view data structure.
+ */
+struct con3270 {
+ struct raw3270_view view;
+ spinlock_t lock;
+ struct list_head freemem; /* list of free memory for strings. */
+
+ /* Output stuff. */
+ struct list_head lines; /* list of lines. */
+ struct list_head update; /* list of lines to update. */
+ int line_nr; /* line number for next update. */
+ int nr_lines; /* # lines in list. */
+ int nr_up; /* # lines up in history. */
+ unsigned long update_flags; /* Update indication bits. */
+ struct string *cline; /* current output line. */
+ struct string *status; /* last line of display. */
+ struct raw3270_request *write; /* single write request. */
+ struct timer_list timer;
+
+ /* Input stuff. */
+ struct string *input; /* input string for read request. */
+ struct raw3270_request *read; /* single read request. */
+ struct raw3270_request *kreset; /* single keyboard reset request. */
+ struct tasklet_struct readlet; /* tasklet to issue read request. */
+};
+
+static struct con3270 *condev;
+
+/* con3270->update_flags. See con3270_update for details. */
+#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
+#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
+#define CON_UPDATE_STATUS 4 /* Update status line. */
+#define CON_UPDATE_ALL 7
+
+static void con3270_update(struct con3270 *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+void
+con3270_set_timer(struct con3270 *cp, int expires)
+{
+ if (expires == 0) {
+ if (timer_pending(&cp->timer))
+ del_timer(&cp->timer);
+ return;
+ }
+ if (timer_pending(&cp->timer) &&
+ mod_timer(&cp->timer, jiffies + expires))
+ return;
+ cp->timer.function = (void (*)(unsigned long)) con3270_update;
+ cp->timer.data = (unsigned long) cp;
+ cp->timer.expires = jiffies + expires;
+ add_timer(&cp->timer);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "console view" in the lower left corner and "Running"/"More..."/"Holding"
+ * in the lower right corner of the screen.
+ */
+static void
+con3270_update_status(struct con3270 *cp)
+{
+ char *str;
+
+ str = (cp->nr_up != 0) ? "History" : "Running";
+ memcpy(cp->status->string + 24, str, 7);
+ codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+ cp->update_flags |= CON_UPDATE_STATUS;
+}
+
+static void
+con3270_create_status(struct con3270 *cp)
+{
+ static const unsigned char blueprint[] =
+ { TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
+ 'c','o','n','s','o','l','e',' ','v','i','e','w',
+ TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
+
+ cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
+ /* Copy blueprint to status line */
+ memcpy(cp->status->string, blueprint, sizeof(blueprint));
+ /* Set TO_RA addresses. */
+ raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
+ cp->view.cols * (cp->view.rows - 1));
+ raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
+ cp->view.cols * cp->view.rows - 8);
+ /* Convert strings to ebcdic. */
+ codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
+ codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a console string.
+ */
+static void
+con3270_update_string(struct con3270 *cp, struct string *s, int nr)
+{
+ if (s->len >= cp->view.cols - 5)
+ return;
+ raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
+ cp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+con3270_rebuild_update(struct con3270 *cp)
+{
+ struct string *s, *n;
+ int nr;
+
+ /*
+ * Throw away update list and create a new one,
+ * containing all lines that will fit on the screen.
+ */
+ list_for_each_entry_safe(s, n, &cp->update, update)
+ list_del_init(&s->update);
+ nr = cp->view.rows - 2 + cp->nr_up;
+ list_for_each_entry_reverse(s, &cp->lines, list) {
+ if (nr < cp->view.rows - 1)
+ list_add(&s->update, &cp->update);
+ if (--nr < 0)
+ break;
+ }
+ cp->line_nr = 0;
+ cp->update_flags |= CON_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. Free strings from history if necessary.
+ */
+static struct string *
+con3270_alloc_string(struct con3270 *cp, size_t size)
+{
+ struct string *s, *n;
+
+ s = alloc_string(&cp->freemem, size);
+ if (s)
+ return s;
+ list_for_each_entry_safe(s, n, &cp->lines, list) {
+ list_del(&s->list);
+ if (!list_empty(&s->update))
+ list_del(&s->update);
+ cp->nr_lines--;
+ if (free_string(&cp->freemem, s) >= size)
+ break;
+ }
+ s = alloc_string(&cp->freemem, size);
+ BUG_ON(!s);
+ if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
+ cp->nr_up = cp->nr_lines - cp->view.rows + 1;
+ con3270_rebuild_update(cp);
+ con3270_update_status(cp);
+ }
+ return s;
+}
+
+/*
+ * Write completion callback.
+ */
+static void
+con3270_write_callback(struct raw3270_request *rq, void *data)
+{
+ raw3270_request_reset(rq);
+ xchg(&((struct con3270 *) rq->view)->write, rq);
+}
+
+/*
+ * Update console display.
+ */
+static void
+con3270_update(struct con3270 *cp)
+{
+ struct raw3270_request *wrq;
+ char wcc, prolog[6];
+ unsigned long flags;
+ unsigned long updated;
+ struct string *s, *n;
+ int rc;
+
+ wrq = xchg(&cp->write, 0);
+ if (!wrq) {
+ con3270_set_timer(cp, 1);
+ return;
+ }
+
+ spin_lock_irqsave(&cp->view.lock, flags);
+ updated = 0;
+ if (cp->update_flags & CON_UPDATE_ERASE) {
+ /* Use erase write alternate to initialize display. */
+ raw3270_request_set_cmd(wrq, TC_EWRITEA);
+ updated |= CON_UPDATE_ERASE;
+ } else
+ raw3270_request_set_cmd(wrq, TC_WRITE);
+
+ wcc = TW_NONE;
+ raw3270_request_add_data(wrq, &wcc, 1);
+
+ /*
+ * Update status line.
+ */
+ if (cp->update_flags & CON_UPDATE_STATUS)
+ if (raw3270_request_add_data(wrq, cp->status->string,
+ cp->status->len) == 0)
+ updated |= CON_UPDATE_STATUS;
+
+ if (cp->update_flags & CON_UPDATE_LIST) {
+ prolog[0] = TO_SBA;
+ prolog[3] = TO_SA;
+ prolog[4] = TAT_COLOR;
+ prolog[5] = TAC_TURQ;
+ raw3270_buffer_address(cp->view.dev, prolog + 1,
+ cp->view.cols * cp->line_nr);
+ raw3270_request_add_data(wrq, prolog, 6);
+ /* Write strings in the update list to the screen. */
+ list_for_each_entry_safe(s, n, &cp->update, update) {
+ if (s != cp->cline)
+ con3270_update_string(cp, s, cp->line_nr);
+ if (raw3270_request_add_data(wrq, s->string,
+ s->len) != 0)
+ break;
+ list_del_init(&s->update);
+ if (s != cp->cline)
+ cp->line_nr++;
+ }
+ if (list_empty(&cp->update))
+ updated |= CON_UPDATE_LIST;
+ }
+ wrq->callback = con3270_write_callback;
+ rc = raw3270_start(&cp->view, wrq);
+ if (rc == 0) {
+ cp->update_flags &= ~updated;
+ if (cp->update_flags)
+ con3270_set_timer(cp, 1);
+ } else {
+ raw3270_request_reset(wrq);
+ xchg(&cp->write, wrq);
+ }
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+/*
+ * Read tasklet.
+ */
+static void
+con3270_read_tasklet(struct raw3270_request *rrq)
+{
+ static char kreset_data = TW_KR;
+ struct con3270 *cp;
+ unsigned long flags;
+ int nr_up, deactivate;
+
+ cp = (struct con3270 *) rrq->view;
+ spin_lock_irqsave(&cp->view.lock, flags);
+ nr_up = cp->nr_up;
+ deactivate = 0;
+ /* Check aid byte. */
+ switch (cp->input->string[0]) {
+ case 0x7d: /* enter: jump to bottom. */
+ nr_up = 0;
+ break;
+ case 0xf3: /* PF3: deactivate the console view. */
+ deactivate = 1;
+ break;
+ case 0x6d: /* clear: start from scratch. */
+ con3270_rebuild_update(cp);
+ cp->update_flags = CON_UPDATE_ALL;
+ con3270_set_timer(cp, 1);
+ break;
+ case 0xf7: /* PF7: do a page up in the console log. */
+ nr_up += cp->view.rows - 2;
+ if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
+ nr_up = cp->nr_lines - cp->view.rows + 1;
+ if (nr_up < 0)
+ nr_up = 0;
+ }
+ break;
+ case 0xf8: /* PF8: do a page down in the console log. */
+ nr_up -= cp->view.rows - 2;
+ if (nr_up < 0)
+ nr_up = 0;
+ break;
+ }
+ if (nr_up != cp->nr_up) {
+ cp->nr_up = nr_up;
+ con3270_rebuild_update(cp);
+ con3270_update_status(cp);
+ con3270_set_timer(cp, 1);
+ }
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+
+ /* Start keyboard reset command. */
+ raw3270_request_reset(cp->kreset);
+ raw3270_request_set_cmd(cp->kreset, TC_WRITE);
+ raw3270_request_add_data(cp->kreset, &kreset_data, 1);
+ raw3270_start(&cp->view, cp->kreset);
+
+ if (deactivate)
+ raw3270_deactivate_view(&cp->view);
+
+ raw3270_request_reset(rrq);
+ xchg(&cp->read, rrq);
+ raw3270_put_view(&cp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+con3270_read_callback(struct raw3270_request *rq, void *data)
+{
+ raw3270_get_view(rq->view);
+ /* Schedule tasklet to pass input to tty. */
+ tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Called only from interrupt function.
+ */
+static void
+con3270_issue_read(struct con3270 *cp)
+{
+ struct raw3270_request *rrq;
+ int rc;
+
+ rrq = xchg(&cp->read, 0);
+ if (!rrq)
+ /* Read already scheduled. */
+ return;
+ rrq->callback = con3270_read_callback;
+ rrq->callback_data = cp;
+ raw3270_request_set_cmd(rrq, TC_READMOD);
+ raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
+ /* Issue the read modified request. */
+ rc = raw3270_start_irq(&cp->view, rrq);
+ if (rc)
+ raw3270_request_reset(rrq);
+}
+
+/*
+ * Switch to the console view.
+ */
+static int
+con3270_activate(struct raw3270_view *view)
+{
+ unsigned long flags;
+ struct con3270 *cp;
+
+ cp = (struct con3270 *) view;
+ spin_lock_irqsave(&cp->view.lock, flags);
+ cp->nr_up = 0;
+ con3270_rebuild_update(cp);
+ con3270_update_status(cp);
+ cp->update_flags = CON_UPDATE_ALL;
+ con3270_set_timer(cp, 1);
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+ return 0;
+}
+
+static void
+con3270_deactivate(struct raw3270_view *view)
+{
+ unsigned long flags;
+ struct con3270 *cp;
+
+ cp = (struct con3270 *) view;
+ spin_lock_irqsave(&cp->view.lock, flags);
+ del_timer(&cp->timer);
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+static int
+con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+{
+ /* Handle ATTN. Schedule tasklet to read aid. */
+ if (irb->scsw.dstat & DEV_STAT_ATTENTION)
+ con3270_issue_read(cp);
+
+ if (rq) {
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
+ rq->rc = -EIO;
+ else
+ /* Normal end. Copy residual count. */
+ rq->rescnt = irb->scsw.count;
+ }
+ return RAW3270_IO_DONE;
+}
+
+/* Console view to a 3270 device. */
+static struct raw3270_fn con3270_fn = {
+ .activate = con3270_activate,
+ .deactivate = con3270_deactivate,
+ .intv = (void *) con3270_irq
+};
+
+static inline void
+con3270_cline_add(struct con3270 *cp)
+{
+ if (!list_empty(&cp->cline->list))
+ /* Already added. */
+ return;
+ list_add_tail(&cp->cline->list, &cp->lines);
+ cp->nr_lines++;
+ con3270_rebuild_update(cp);
+}
+
+static inline void
+con3270_cline_insert(struct con3270 *cp, unsigned char c)
+{
+ cp->cline->string[cp->cline->len++] =
+ cp->view.ascebc[(c < ' ') ? ' ' : c];
+ if (list_empty(&cp->cline->update)) {
+ list_add_tail(&cp->cline->update, &cp->update);
+ cp->update_flags |= CON_UPDATE_LIST;
+ }
+}
+
+static inline void
+con3270_cline_end(struct con3270 *cp)
+{
+ struct string *s;
+ unsigned int size;
+
+ /* Copy cline. */
+ size = (cp->cline->len < cp->view.cols - 5) ?
+ cp->cline->len + 4 : cp->view.cols;
+ s = con3270_alloc_string(cp, size);
+ memcpy(s->string, cp->cline->string, cp->cline->len);
+ if (s->len < cp->view.cols - 5) {
+ s->string[s->len - 4] = TO_RA;
+ s->string[s->len - 1] = 0;
+ } else {
+ while (--size > cp->cline->len)
+ s->string[size] = cp->view.ascebc[' '];
+ }
+ /* Replace cline with allocated line s and reset cline. */
+ list_add(&s->list, &cp->cline->list);
+ list_del_init(&cp->cline->list);
+ if (!list_empty(&cp->cline->update)) {
+ list_add(&s->update, &cp->cline->update);
+ list_del_init(&cp->cline->update);
+ }
+ cp->cline->len = 0;
+}
+
+/*
+ * Write a string to the 3270 console
+ */
+static void
+con3270_write(struct console *co, const char *str, unsigned int count)
+{
+ struct con3270 *cp;
+ unsigned long flags;
+ unsigned char c;
+
+ cp = condev;
+ if (cp->view.dev)
+ raw3270_activate_view(&cp->view);
+ spin_lock_irqsave(&cp->view.lock, flags);
+ while (count-- > 0) {
+ c = *str++;
+ if (cp->cline->len == 0)
+ con3270_cline_add(cp);
+ if (c != '\n')
+ con3270_cline_insert(cp, c);
+ if (c == '\n' || cp->cline->len >= cp->view.cols)
+ con3270_cline_end(cp);
+ }
+ /* Setup timer to output current console buffer after 1/10 second */
+ if (cp->view.dev && !timer_pending(&cp->timer))
+ con3270_set_timer(cp, HZ/10);
+ spin_unlock_irqrestore(&cp->view.lock,flags);
+}
+
+extern struct tty_driver *tty3270_driver;
+
+static struct tty_driver *
+con3270_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return tty3270_driver;
+}
+
+/*
+ * Wait for end of write request.
+ */
+static void
+con3270_wait_write(struct con3270 *cp)
+{
+ while (!cp->write) {
+ raw3270_wait_cons_dev(cp->view.dev);
+ barrier();
+ }
+}
+
+/*
+ * panic() calls console_unblank before the system enters a
+ * disabled, endless loop.
+ */
+static void
+con3270_unblank(void)
+{
+ struct con3270 *cp;
+ unsigned long flags;
+
+ cp = condev;
+ if (!cp->view.dev)
+ return;
+ spin_lock_irqsave(&cp->view.lock, flags);
+ con3270_wait_write(cp);
+ cp->nr_up = 0;
+ con3270_rebuild_update(cp);
+ con3270_update_status(cp);
+ while (cp->update_flags != 0) {
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+ con3270_update(cp);
+ spin_lock_irqsave(&cp->view.lock, flags);
+ con3270_wait_write(cp);
+ }
+ spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+static int __init
+con3270_consetup(struct console *co, char *options)
+{
+ return 0;
+}
+
+/*
+ * The console structure for the 3270 console
+ */
+static struct console con3270 = {
+ .name = "tty3270",
+ .write = con3270_write,
+ .device = con3270_device,
+ .unblank = con3270_unblank,
+ .setup = con3270_consetup,
+ .flags = CON_PRINTBUFFER,
+};
+
+/*
+ * 3270 console initialization code called from console_init().
+ * NOTE: This is called before kmalloc is available.
+ */
+static int __init
+con3270_init(void)
+{
+ struct ccw_device *cdev;
+ struct raw3270 *rp;
+ void *cbuf;
+ int i;
+
+ /* Check if 3270 is to be the console */
+ if (!CONSOLE_IS_3270)
+ return -ENODEV;
+
+ /* Set the console mode for VM */
+ if (MACHINE_IS_VM) {
+ cpcmd("TERM CONMODE 3270", 0, 0);
+ cpcmd("TERM AUTOCR OFF", 0, 0);
+ }
+
+ cdev = ccw_device_probe_console();
+ if (!cdev)
+ return -ENODEV;
+ rp = raw3270_setup_console(cdev);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+
+ condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
+ memset(condev, 0, sizeof(struct con3270));
+ condev->view.dev = rp;
+
+ condev->read = raw3270_request_alloc_bootmem(0);
+ condev->read->callback = con3270_read_callback;
+ condev->read->callback_data = condev;
+ condev->write =
+ raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
+ condev->kreset = raw3270_request_alloc_bootmem(1);
+
+ INIT_LIST_HEAD(&condev->lines);
+ INIT_LIST_HEAD(&condev->update);
+ init_timer(&condev->timer);
+ tasklet_init(&condev->readlet,
+ (void (*)(unsigned long)) con3270_read_tasklet,
+ (unsigned long) condev->read);
+
+ raw3270_add_view(&condev->view, &con3270_fn, 0);
+
+ INIT_LIST_HEAD(&condev->freemem);
+ for (i = 0; i < CON3270_STRING_PAGES; i++) {
+ cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
+ }
+ condev->cline = alloc_string(&condev->freemem, condev->view.cols);
+ condev->cline->len = 0;
+ con3270_create_status(condev);
+ condev->input = alloc_string(&condev->freemem, 80);
+ register_console(&con3270);
+ return 0;
+}
+
+console_initcall(con3270_init);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
new file mode 100644
index 000000000000..be463242cf0f
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,75 @@
+/*
+ * drivers/s390/char/ctrlchar.c
+ * Unified handling of special chars.
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <asm/errno.h>
+#include <linux/sysrq.h>
+#include <linux/ctype.h>
+
+#include "ctrlchar.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static int ctrlchar_sysrq_key;
+
+static void
+ctrlchar_handle_sysrq(void *tty)
+{
+ handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty);
+}
+
+static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, 0);
+#endif
+
+
+/**
+ * Check for special chars at start of input.
+ *
+ * @param buf Console input buffer.
+ * @param len Length of valid data in buffer.
+ * @param tty The tty struct for this console.
+ * @return CTRLCHAR_NONE, if nothing matched,
+ * CTRLCHAR_SYSRQ, if sysrq was encountered
+ * otherwise char to be inserted logically or'ed
+ * with CTRLCHAR_CTRL
+ */
+unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
+{
+ if ((len < 2) || (len > 3))
+ return CTRLCHAR_NONE;
+
+ /* hat is 0xb1 in codepage 037 (US etc.) and thus */
+ /* converted to 0x5e in ascii ('^') */
+ if ((buf[0] != '^') && (buf[0] != '\252'))
+ return CTRLCHAR_NONE;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ /* racy */
+ if (len == 3 && buf[1] == '-') {
+ ctrlchar_sysrq_key = buf[2];
+ ctrlchar_work.data = tty;
+ schedule_work(&ctrlchar_work);
+ return CTRLCHAR_SYSRQ;
+ }
+#endif
+
+ if (len != 2)
+ return CTRLCHAR_NONE;
+
+ switch (tolower(buf[1])) {
+ case 'c':
+ return INTR_CHAR(tty) | CTRLCHAR_CTRL;
+ case 'd':
+ return EOF_CHAR(tty) | CTRLCHAR_CTRL;
+ case 'z':
+ return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
+ }
+ return CTRLCHAR_NONE;
+}
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
new file mode 100644
index 000000000000..935ffa0ea7c6
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,20 @@
+/*
+ * drivers/s390/char/ctrlchar.c
+ * Unified handling of special chars.
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/tty.h>
+
+extern unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
+
+
+#define CTRLCHAR_NONE (1 << 8)
+#define CTRLCHAR_CTRL (2 << 8)
+#define CTRLCHAR_SYSRQ (3 << 8)
+
+#define CTRLCHAR_MASK (~0xffu)
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
new file mode 100644
index 000000000000..ca15adb140d1
--- /dev/null
+++ b/drivers/s390/char/defkeymap.c
@@ -0,0 +1,156 @@
+
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable defkeymap.map > defkeymap.c */
+
+#include <linux/types.h>
+#include <linux/keyboard.h>
+#include <linux/kd.h>
+
+u_short plain_map[NR_KEYS] = {
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
+ 0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
+ 0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
+ 0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
+ 0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
+ 0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
+ 0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
+ 0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
+};
+
+static u_short shift_map[NR_KEYS] = {
+ 0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
+ 0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
+ 0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
+ 0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
+ 0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
+ 0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
+ 0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
+ 0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
+ 0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
+ 0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
+ 0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
+ 0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
+ 0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
+ 0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
+ 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
+ 0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
+};
+
+static u_short ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
+};
+
+static u_short shift_ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
+ 0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
+ 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+ushort *key_maps[MAX_NR_KEYMAPS] = {
+ plain_map, shift_map, 0, 0,
+ ctrl_map, shift_ctrl_map, 0
+};
+
+unsigned int keymap_count = 4;
+
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char func_buf[] = {
+ '\033', '[', '[', 'A', 0,
+ '\033', '[', '[', 'B', 0,
+ '\033', '[', '[', 'C', 0,
+ '\033', '[', '[', 'D', 0,
+ '\033', '[', '[', 'E', 0,
+ '\033', '[', '1', '7', '~', 0,
+ '\033', '[', '1', '8', '~', 0,
+ '\033', '[', '1', '9', '~', 0,
+ '\033', '[', '2', '0', '~', 0,
+ '\033', '[', '2', '1', '~', 0,
+ '\033', '[', '2', '3', '~', 0,
+ '\033', '[', '2', '4', '~', 0,
+ '\033', '[', '2', '5', '~', 0,
+ '\033', '[', '2', '6', '~', 0,
+ '\033', '[', '2', '8', '~', 0,
+ '\033', '[', '2', '9', '~', 0,
+ '\033', '[', '3', '1', '~', 0,
+ '\033', '[', '3', '2', '~', 0,
+ '\033', '[', '3', '3', '~', 0,
+ '\033', '[', '3', '4', '~', 0,
+};
+
+
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+int funcbufleft = 0; /* space left */
+
+char *func_table[MAX_NR_FUNC] = {
+ func_buf + 0,
+ func_buf + 5,
+ func_buf + 10,
+ func_buf + 15,
+ func_buf + 20,
+ func_buf + 25,
+ func_buf + 31,
+ func_buf + 37,
+ func_buf + 43,
+ func_buf + 49,
+ func_buf + 55,
+ func_buf + 61,
+ func_buf + 67,
+ func_buf + 73,
+ func_buf + 79,
+ func_buf + 85,
+ func_buf + 91,
+ func_buf + 97,
+ func_buf + 103,
+ func_buf + 109,
+ 0,
+};
+
+struct kbdiacr accent_table[MAX_DIACR] = {
+ {'^', 'c', '\003'}, {'^', 'd', '\004'},
+ {'^', 'z', '\032'}, {'^', '\012', '\000'},
+};
+
+unsigned int accent_table_size = 4;
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
new file mode 100644
index 000000000000..353b3f268824
--- /dev/null
+++ b/drivers/s390/char/defkeymap.map
@@ -0,0 +1,191 @@
+# Default keymap for 3270 (ebcdic codepage 037).
+keymaps 0-1,4-5
+
+keycode 0 = nul Oslash
+keycode 1 = nul a
+keycode 2 = nul b
+keycode 3 = nul c
+keycode 4 = nul d
+keycode 5 = nul e
+keycode 6 = nul f
+keycode 7 = nul g
+keycode 8 = nul h
+keycode 9 = nul i
+keycode 10 = nul guillemotleft
+keycode 11 = nul guillemotright
+keycode 12 = nul eth
+keycode 13 = nul yacute
+keycode 14 = nul thorn
+keycode 15 = nul plusminus
+keycode 16 = nul degree
+keycode 17 = nul j
+keycode 18 = nul k
+keycode 19 = nul l
+keycode 20 = nul m
+keycode 21 = nul n
+keycode 22 = nul o
+keycode 23 = nul p
+keycode 24 = nul q
+keycode 25 = nul r
+keycode 26 = nul nul
+keycode 27 = nul nul
+keycode 28 = nul ae
+keycode 29 = nul cedilla
+keycode 30 = nul AE
+keycode 31 = nul currency
+keycode 32 = nul mu
+keycode 33 = nul tilde
+keycode 34 = nul s
+keycode 35 = nul t
+keycode 36 = nul u
+keycode 37 = nul v
+keycode 38 = nul w
+keycode 39 = nul x
+keycode 40 = nul y
+keycode 41 = nul z
+keycode 42 = nul exclamdown
+keycode 43 = nul questiondown
+keycode 44 = nul ETH
+keycode 45 = nul Yacute
+keycode 46 = nul THORN
+keycode 47 = nul registered
+keycode 48 = nul dead_circumflex
+keycode 49 = nul sterling
+keycode 50 = nul yen
+keycode 51 = nul periodcentered
+keycode 52 = nul copyright
+keycode 53 = nul section
+keycode 54 = nul paragraph
+keycode 55 = nul onequarter
+keycode 56 = nul onehalf
+keycode 57 = nul threequarters
+keycode 58 = nul bracketleft
+keycode 59 = nul bracketright
+keycode 60 = nul nul
+keycode 61 = nul diaeresis
+keycode 62 = nul acute
+keycode 63 = nul multiply
+keycode 64 = space braceleft
+keycode 65 = nul A
+keycode 66 = acircumflex B
+keycode 67 = adiaeresis C
+keycode 68 = agrave D
+keycode 69 = aacute E
+keycode 70 = atilde F
+keycode 71 = aring G
+keycode 72 = ccedilla H
+keycode 73 = ntilde I
+keycode 74 = cent nul
+keycode 75 = period ocircumflex
+keycode 76 = less odiaeresis
+keycode 77 = parenleft ograve
+keycode 78 = plus oacute
+keycode 79 = bar otilde
+keycode 80 = ampersand braceright
+keycode 81 = eacute J
+keycode 82 = acircumflex K
+keycode 83 = ediaeresis L
+keycode 84 = egrave M
+keycode 85 = iacute N
+keycode 86 = icircumflex O
+keycode 87 = idiaeresis P
+keycode 88 = igrave Q
+keycode 89 = ssharp R
+keycode 90 = exclam onesuperior
+keycode 91 = dollar ucircumflex
+keycode 92 = asterisk udiaeresis
+keycode 93 = parenright ugrave
+keycode 94 = semicolon uacute
+keycode 95 = notsign ydiaeresis
+keycode 96 = minus backslash
+keycode 97 = slash division
+keycode 98 = Acircumflex S
+keycode 99 = Adiaeresis T
+keycode 100 = Agrave U
+keycode 101 = Aacute V
+keycode 102 = Atilde W
+keycode 103 = Aring X
+keycode 104 = Ccedilla Y
+keycode 105 = Ntilde Z
+keycode 106 = brokenbar twosuperior
+keycode 107 = comma Ocircumflex
+keycode 108 = percent Odiaeresis
+keycode 109 = underscore Ograve
+keycode 110 = greater Oacute
+keycode 111 = question Otilde
+keycode 112 = oslash zero
+keycode 113 = Eacute one
+keycode 114 = Ecircumflex two
+keycode 115 = Ediaeresis three
+keycode 116 = Egrave four
+keycode 117 = Iacute five
+keycode 118 = Icircumflex six
+keycode 119 = Idiaeresis seven
+keycode 120 = Igrave eight
+keycode 121 = grave nine
+keycode 122 = colon threesuperior
+keycode 123 = numbersign Ucircumflex
+keycode 124 = at Udiaeresis
+keycode 125 = apostrophe Ugrave
+keycode 126 = equal Uacute
+keycode 127 = quotedbl nul
+
+# AID keys
+control keycode 74 = F22
+control keycode 75 = F23
+control keycode 76 = F24
+control keycode 107 = Control_z # PA3
+control keycode 108 = Control_c # PA1
+control keycode 109 = KeyboardSignal # Clear
+control keycode 110 = Control_d # PA2
+control keycode 122 = F10
+control keycode 123 = F11 # F11
+control keycode 124 = Last_Console # F12
+control keycode 125 = Linefeed
+shift control keycode 65 = F13
+shift control keycode 66 = F14
+shift control keycode 67 = F15
+shift control keycode 68 = F16
+shift control keycode 69 = F17
+shift control keycode 70 = F18
+shift control keycode 71 = F19
+shift control keycode 72 = F20
+shift control keycode 73 = F21
+shift control keycode 113 = F1
+shift control keycode 114 = F2
+shift control keycode 115 = Incr_Console
+shift control keycode 116 = F4
+shift control keycode 117 = F5
+shift control keycode 118 = F6
+shift control keycode 119 = Scroll_Backward
+shift control keycode 120 = Scroll_Forward
+shift control keycode 121 = F9
+
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+# string F21 ??
+# string F22 ??
+# string F23 ??
+# string F24 ??
+compose '^' 'c' to Control_c
+compose '^' 'd' to Control_d
+compose '^' 'z' to Control_z
+compose '^' '\012' to nul
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
new file mode 100644
index 000000000000..60afcdcf91c2
--- /dev/null
+++ b/drivers/s390/char/fs3270.c
@@ -0,0 +1,373 @@
+/*
+ * drivers/s390/char/fs3270.c
+ * IBM/3270 Driver - fullscreen driver.
+ *
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+
+#include "raw3270.h"
+#include "ctrlchar.h"
+
+struct raw3270_fn fs3270_fn;
+
+struct fs3270 {
+ struct raw3270_view view;
+ pid_t fs_pid; /* Pid of controlling program. */
+ int read_command; /* ccw command to use for reads. */
+ int write_command; /* ccw command to use for writes. */
+ int attention; /* Got attention. */
+ struct raw3270_request *clear; /* single clear request. */
+ wait_queue_head_t attn_wait; /* Attention wait queue. */
+};
+
+static void
+fs3270_wake_up(struct raw3270_request *rq, void *data)
+{
+ wake_up((wait_queue_head_t *) data);
+}
+
+static int
+fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
+{
+ wait_queue_head_t wq;
+ int rc;
+
+ init_waitqueue_head(&wq);
+ rq->callback = fs3270_wake_up;
+ rq->callback_data = &wq;
+ rc = raw3270_start(view, rq);
+ if (rc)
+ return rc;
+ /* Started sucessfully. Now wait for completion. */
+ wait_event(wq, raw3270_request_final(rq));
+ return rq->rc;
+}
+
+static void
+fs3270_reset_callback(struct raw3270_request *rq, void *data)
+{
+ raw3270_request_reset(rq);
+}
+
+/*
+ * Switch to the fullscreen view.
+ */
+static int
+fs3270_activate(struct raw3270_view *view)
+{
+ struct fs3270 *fp;
+
+ fp = (struct fs3270 *) view;
+ raw3270_request_set_cmd(fp->clear, TC_EWRITEA);
+ fp->clear->callback = fs3270_reset_callback;
+ return raw3270_start(view, fp->clear);
+}
+
+/*
+ * Shutdown fullscreen view.
+ */
+static void
+fs3270_deactivate(struct raw3270_view *view)
+{
+ // FIXME: is this a good idea? The user program using fullscreen 3270
+ // will die just because a console message appeared. On the other
+ // hand the fullscreen device is unoperational now.
+ struct fs3270 *fp;
+
+ fp = (struct fs3270 *) view;
+ if (fp->fs_pid != 0)
+ kill_proc(fp->fs_pid, SIGHUP, 1);
+ fp->fs_pid = 0;
+}
+
+static int
+fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
+{
+ /* Handle ATTN. Set indication and wake waiters for attention. */
+ if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
+ fp->attention = 1;
+ wake_up(&fp->attn_wait);
+ }
+
+ if (rq) {
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
+ rq->rc = -EIO;
+ else
+ /* Normal end. Copy residual count. */
+ rq->rescnt = irb->scsw.count;
+ }
+ return RAW3270_IO_DONE;
+}
+
+/*
+ * Process reads from fullscreen 3270.
+ */
+static ssize_t
+fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
+{
+ struct fs3270 *fp;
+ struct raw3270_request *rq;
+ struct idal_buffer *ib;
+ int rc;
+
+ if (count == 0 || count > 65535)
+ return -EINVAL;
+ fp = filp->private_data;
+ if (!fp)
+ return -ENODEV;
+ ib = idal_buffer_alloc(count, 0);
+ if (!ib)
+ return -ENOMEM;
+ rq = raw3270_request_alloc(0);
+ if (!IS_ERR(rq)) {
+ if (fp->read_command == 0 && fp->write_command != 0)
+ fp->read_command = 6;
+ raw3270_request_set_cmd(rq, fp->read_command ? : 2);
+ raw3270_request_set_idal(rq, ib);
+ wait_event(fp->attn_wait, fp->attention);
+ rc = fs3270_do_io(&fp->view, rq);
+ if (rc == 0 && idal_buffer_to_user(ib, data, count))
+ rc = -EFAULT;
+ raw3270_request_free(rq);
+ } else
+ rc = PTR_ERR(rq);
+ idal_buffer_free(ib);
+ return rc;
+}
+
+/*
+ * Process writes to fullscreen 3270.
+ */
+static ssize_t
+fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
+{
+ struct fs3270 *fp;
+ struct raw3270_request *rq;
+ struct idal_buffer *ib;
+ int write_command;
+ int rc;
+
+ fp = filp->private_data;
+ if (!fp)
+ return -ENODEV;
+ ib = idal_buffer_alloc(count, 0);
+ if (!ib)
+ return -ENOMEM;
+ rq = raw3270_request_alloc(0);
+ if (!IS_ERR(rq)) {
+ if (idal_buffer_from_user(ib, data, count) == 0) {
+ write_command = fp->write_command ? : 1;
+ if (write_command == 5)
+ write_command = 13;
+ raw3270_request_set_cmd(rq, write_command);
+ raw3270_request_set_idal(rq, ib);
+ rc = fs3270_do_io(&fp->view, rq);
+ } else
+ rc = -EFAULT;
+ raw3270_request_free(rq);
+ } else
+ rc = PTR_ERR(rq);
+ idal_buffer_free(ib);
+ return rc;
+}
+
+/*
+ * process ioctl commands for the tube driver
+ */
+static int
+fs3270_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct fs3270 *fp;
+ struct raw3270_iocb iocb;
+ int rc;
+
+ fp = filp->private_data;
+ if (!fp)
+ return -ENODEV;
+ rc = 0;
+ switch (cmd) {
+ case TUBICMD:
+ fp->read_command = arg;
+ break;
+ case TUBOCMD:
+ fp->write_command = arg;
+ break;
+ case TUBGETI:
+ rc = put_user(fp->read_command, (char *) arg);
+ break;
+ case TUBGETO:
+ rc = put_user(fp->write_command,(char *) arg);
+ break;
+ case TUBGETMOD:
+ iocb.model = fp->view.model;
+ iocb.line_cnt = fp->view.rows;
+ iocb.col_cnt = fp->view.cols;
+ iocb.pf_cnt = 24;
+ iocb.re_cnt = 20;
+ iocb.map = 0;
+ if (copy_to_user((char *) arg, &iocb,
+ sizeof(struct raw3270_iocb)))
+ rc = -EFAULT;
+ break;
+ }
+ return rc;
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct fs3270 *
+fs3270_alloc_view(void)
+{
+ struct fs3270 *fp;
+
+ fp = (struct fs3270 *) kmalloc(sizeof(struct fs3270),GFP_KERNEL);
+ if (!fp)
+ return ERR_PTR(-ENOMEM);
+ memset(fp, 0, sizeof(struct fs3270));
+ fp->clear = raw3270_request_alloc(0);
+ if (!IS_ERR(fp->clear)) {
+ kfree(fp);
+ return ERR_PTR(-ENOMEM);
+ }
+ return fp;
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void
+fs3270_free_view(struct raw3270_view *view)
+{
+ raw3270_request_free(((struct fs3270 *) view)->clear);
+ kfree(view);
+}
+
+/*
+ * Unlink fs3270 data structure from filp.
+ */
+static void
+fs3270_release(struct raw3270_view *view)
+{
+}
+
+/* View to a 3270 device. Can be console, tty or fullscreen. */
+struct raw3270_fn fs3270_fn = {
+ .activate = fs3270_activate,
+ .deactivate = fs3270_deactivate,
+ .intv = (void *) fs3270_irq,
+ .release = fs3270_release,
+ .free = fs3270_free_view
+};
+
+/*
+ * This routine is called whenever a 3270 fullscreen device is opened.
+ */
+static int
+fs3270_open(struct inode *inode, struct file *filp)
+{
+ struct fs3270 *fp;
+ int minor, rc;
+
+ if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR)
+ return -ENODEV;
+ minor = iminor(filp->f_dentry->d_inode);
+ /* Check if some other program is already using fullscreen mode. */
+ fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
+ if (!IS_ERR(fp)) {
+ raw3270_put_view(&fp->view);
+ return -EBUSY;
+ }
+ /* Allocate fullscreen view structure. */
+ fp = fs3270_alloc_view();
+ if (IS_ERR(fp))
+ return PTR_ERR(fp);
+
+ init_waitqueue_head(&fp->attn_wait);
+ fp->fs_pid = current->pid;
+ rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+ if (rc) {
+ fs3270_free_view(&fp->view);
+ return rc;
+ }
+
+ rc = raw3270_activate_view(&fp->view);
+ if (rc) {
+ raw3270_del_view(&fp->view);
+ return rc;
+ }
+ filp->private_data = fp;
+ return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static int
+fs3270_close(struct inode *inode, struct file *filp)
+{
+ struct fs3270 *fp;
+
+ fp = filp->private_data;
+ filp->private_data = 0;
+ if (fp)
+ raw3270_del_view(&fp->view);
+ return 0;
+}
+
+static struct file_operations fs3270_fops = {
+ .owner = THIS_MODULE, /* owner */
+ .read = fs3270_read, /* read */
+ .write = fs3270_write, /* write */
+ .ioctl = fs3270_ioctl, /* ioctl */
+ .open = fs3270_open, /* open */
+ .release = fs3270_close, /* release */
+};
+
+/*
+ * 3270 fullscreen driver initialization.
+ */
+static int __init
+fs3270_init(void)
+{
+ int rc;
+
+ rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
+ if (rc) {
+ printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
+ IBM_FS3270_MAJOR, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit
+fs3270_exit(void)
+{
+ unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
+
+module_init(fs3270_init);
+module_exit(fs3270_exit);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
new file mode 100644
index 000000000000..fd43d99b45a3
--- /dev/null
+++ b/drivers/s390/char/keyboard.c
@@ -0,0 +1,519 @@
+/*
+ * drivers/s390/char/keyboard.c
+ * ebcdic keycode functions for s390 console drivers
+ *
+ * S390 version
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+#include <asm/uaccess.h>
+
+#include "keyboard.h"
+
+/*
+ * Handler Tables.
+ */
+#define K_HANDLERS\
+ k_self, k_fn, k_spec, k_ignore,\
+ k_dead, k_ignore, k_ignore, k_ignore,\
+ k_ignore, k_ignore, k_ignore, k_ignore,\
+ k_ignore, k_ignore, k_ignore, k_ignore
+
+typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
+static k_handler_fn K_HANDLERS;
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
+
+/* maximum values each key_handler can handle */
+static const int kbd_max_vals[] = {
+ 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
+ NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
+
+static unsigned char ret_diacr[NR_DEAD] = {
+ '`', '\'', '^', '~', '"', ','
+};
+
+/*
+ * Alloc/free of kbd_data structures.
+ */
+struct kbd_data *
+kbd_alloc(void) {
+ struct kbd_data *kbd;
+ int i, len;
+
+ kbd = kmalloc(sizeof(struct kbd_data), GFP_KERNEL);
+ if (!kbd)
+ goto out;
+ memset(kbd, 0, sizeof(struct kbd_data));
+ kbd->key_maps = kmalloc(sizeof(key_maps), GFP_KERNEL);
+ if (!key_maps)
+ goto out_kbd;
+ memset(kbd->key_maps, 0, sizeof(key_maps));
+ for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+ if (key_maps[i]) {
+ kbd->key_maps[i] =
+ kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
+ if (!kbd->key_maps[i])
+ goto out_maps;
+ memcpy(kbd->key_maps[i], key_maps[i],
+ sizeof(u_short)*NR_KEYS);
+ }
+ }
+ kbd->func_table = kmalloc(sizeof(func_table), GFP_KERNEL);
+ if (!kbd->func_table)
+ goto out_maps;
+ memset(kbd->func_table, 0, sizeof(func_table));
+ for (i = 0; i < ARRAY_SIZE(func_table); i++) {
+ if (func_table[i]) {
+ len = strlen(func_table[i]) + 1;
+ kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
+ if (!kbd->func_table[i])
+ goto out_func;
+ memcpy(kbd->func_table[i], func_table[i], len);
+ }
+ }
+ kbd->fn_handler =
+ kmalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
+ if (!kbd->fn_handler)
+ goto out_func;
+ memset(kbd->fn_handler, 0, sizeof(fn_handler_fn *) * NR_FN_HANDLER);
+ kbd->accent_table =
+ kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL);
+ if (!kbd->accent_table)
+ goto out_fn_handler;
+ memcpy(kbd->accent_table, accent_table,
+ sizeof(struct kbdiacr)*MAX_DIACR);
+ kbd->accent_table_size = accent_table_size;
+ return kbd;
+
+out_fn_handler:
+ kfree(kbd->fn_handler);
+out_func:
+ for (i = 0; i < ARRAY_SIZE(func_table); i++)
+ if (kbd->func_table[i])
+ kfree(kbd->func_table[i]);
+ kfree(kbd->func_table);
+out_maps:
+ for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+ if (kbd->key_maps[i])
+ kfree(kbd->key_maps[i]);
+ kfree(kbd->key_maps);
+out_kbd:
+ kfree(kbd);
+out:
+ return 0;
+}
+
+void
+kbd_free(struct kbd_data *kbd)
+{
+ int i;
+
+ kfree(kbd->accent_table);
+ kfree(kbd->fn_handler);
+ for (i = 0; i < ARRAY_SIZE(func_table); i++)
+ if (kbd->func_table[i])
+ kfree(kbd->func_table[i]);
+ kfree(kbd->func_table);
+ for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+ if (kbd->key_maps[i])
+ kfree(kbd->key_maps[i]);
+ kfree(kbd->key_maps);
+ kfree(kbd);
+}
+
+/*
+ * Generate ascii -> ebcdic translation table from kbd_data.
+ */
+void
+kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
+{
+ unsigned short *keymap, keysym;
+ int i, j, k;
+
+ memset(ascebc, 0x40, 256);
+ for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+ keymap = kbd->key_maps[i];
+ if (!keymap)
+ continue;
+ for (j = 0; j < NR_KEYS; j++) {
+ k = ((i & 1) << 7) + j;
+ keysym = keymap[j];
+ if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+ KTYP(keysym) == (KT_LETTER | 0xf0))
+ ascebc[KVAL(keysym)] = k;
+ else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+ ascebc[ret_diacr[KVAL(keysym)]] = k;
+ }
+ }
+}
+
+/*
+ * Generate ebcdic -> ascii translation table from kbd_data.
+ */
+void
+kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
+{
+ unsigned short *keymap, keysym;
+ int i, j, k;
+
+ memset(ebcasc, ' ', 256);
+ for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+ keymap = kbd->key_maps[i];
+ if (!keymap)
+ continue;
+ for (j = 0; j < NR_KEYS; j++) {
+ keysym = keymap[j];
+ k = ((i & 1) << 7) + j;
+ if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+ KTYP(keysym) == (KT_LETTER | 0xf0))
+ ebcasc[k] = KVAL(keysym);
+ else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+ ebcasc[k] = ret_diacr[KVAL(keysym)];
+ }
+ }
+}
+
+/*
+ * We have a combining character DIACR here, followed by the character CH.
+ * If the combination occurs in the table, return the corresponding value.
+ * Otherwise, if CH is a space or equals DIACR, return DIACR.
+ * Otherwise, conclude that DIACR was not combining after all,
+ * queue it and return CH.
+ */
+static unsigned char
+handle_diacr(struct kbd_data *kbd, unsigned char ch)
+{
+ int i, d;
+
+ d = kbd->diacr;
+ kbd->diacr = 0;
+
+ for (i = 0; i < kbd->accent_table_size; i++) {
+ if (kbd->accent_table[i].diacr == d &&
+ kbd->accent_table[i].base == ch)
+ return kbd->accent_table[i].result;
+ }
+
+ if (ch == ' ' || ch == d)
+ return d;
+
+ kbd_put_queue(kbd->tty, d);
+ return ch;
+}
+
+/*
+ * Handle dead key.
+ */
+static void
+k_dead(struct kbd_data *kbd, unsigned char value)
+{
+ value = ret_diacr[value];
+ kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
+}
+
+/*
+ * Normal character handler.
+ */
+static void
+k_self(struct kbd_data *kbd, unsigned char value)
+{
+ if (kbd->diacr)
+ value = handle_diacr(kbd, value);
+ kbd_put_queue(kbd->tty, value);
+}
+
+/*
+ * Special key handlers
+ */
+static void
+k_ignore(struct kbd_data *kbd, unsigned char value)
+{
+}
+
+/*
+ * Function key handler.
+ */
+static void
+k_fn(struct kbd_data *kbd, unsigned char value)
+{
+ if (kbd->func_table[value])
+ kbd_puts_queue(kbd->tty, kbd->func_table[value]);
+}
+
+static void
+k_spec(struct kbd_data *kbd, unsigned char value)
+{
+ if (value >= NR_FN_HANDLER)
+ return;
+ if (kbd->fn_handler[value])
+ kbd->fn_handler[value](kbd);
+}
+
+/*
+ * Put utf8 character to tty flip buffer.
+ * UTF-8 is defined for words of up to 31 bits,
+ * but we need only 16 bits here
+ */
+static void
+to_utf8(struct tty_struct *tty, ushort c)
+{
+ if (c < 0x80)
+ /* 0******* */
+ kbd_put_queue(tty, c);
+ else if (c < 0x800) {
+ /* 110***** 10****** */
+ kbd_put_queue(tty, 0xc0 | (c >> 6));
+ kbd_put_queue(tty, 0x80 | (c & 0x3f));
+ } else {
+ /* 1110**** 10****** 10****** */
+ kbd_put_queue(tty, 0xe0 | (c >> 12));
+ kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
+ kbd_put_queue(tty, 0x80 | (c & 0x3f));
+ }
+}
+
+/*
+ * Process keycode.
+ */
+void
+kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
+{
+ unsigned short keysym;
+ unsigned char type, value;
+
+ if (!kbd || !kbd->tty)
+ return;
+
+ if (keycode >= 384)
+ keysym = kbd->key_maps[5][keycode - 384];
+ else if (keycode >= 256)
+ keysym = kbd->key_maps[4][keycode - 256];
+ else if (keycode >= 128)
+ keysym = kbd->key_maps[1][keycode - 128];
+ else
+ keysym = kbd->key_maps[0][keycode];
+
+ type = KTYP(keysym);
+ if (type >= 0xf0) {
+ type -= 0xf0;
+ if (type == KT_LETTER)
+ type = KT_LATIN;
+ value = KVAL(keysym);
+#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
+ if (kbd->sysrq) {
+ if (kbd->sysrq == K(KT_LATIN, '-')) {
+ kbd->sysrq = 0;
+ handle_sysrq(value, 0, kbd->tty);
+ return;
+ }
+ if (value == '-') {
+ kbd->sysrq = K(KT_LATIN, '-');
+ return;
+ }
+ /* Incomplete sysrq sequence. */
+ (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
+ kbd->sysrq = 0;
+ } else if ((type == KT_LATIN && value == '^') ||
+ (type == KT_DEAD && ret_diacr[value] == '^')) {
+ kbd->sysrq = K(type, value);
+ return;
+ }
+#endif
+ (*k_handler[type])(kbd, value);
+ } else
+ to_utf8(kbd->tty, keysym);
+}
+
+/*
+ * Ioctl stuff.
+ */
+static int
+do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
+ int cmd, int perm)
+{
+ struct kbentry tmp;
+ ushort *key_map, val, ov;
+
+ if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+ return -EFAULT;
+#if NR_KEYS < 256
+ if (tmp.kb_index >= NR_KEYS)
+ return -EINVAL;
+#endif
+#if MAX_NR_KEYMAPS < 256
+ if (tmp.kb_table >= MAX_NR_KEYMAPS)
+ return -EINVAL;
+#endif
+
+ switch (cmd) {
+ case KDGKBENT:
+ key_map = kbd->key_maps[tmp.kb_table];
+ if (key_map) {
+ val = U(key_map[tmp.kb_index]);
+ if (KTYP(val) >= KBD_NR_TYPES)
+ val = K_HOLE;
+ } else
+ val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+ return put_user(val, &user_kbe->kb_value);
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
+ if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+ /* disallocate map */
+ key_map = kbd->key_maps[tmp.kb_table];
+ if (key_map) {
+ kbd->key_maps[tmp.kb_table] = 0;
+ kfree(key_map);
+ }
+ break;
+ }
+
+ if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
+ return -EINVAL;
+ if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
+ return -EINVAL;
+
+ if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+ int j;
+
+ key_map = (ushort *) kmalloc(sizeof(plain_map),
+ GFP_KERNEL);
+ if (!key_map)
+ return -ENOMEM;
+ kbd->key_maps[tmp.kb_table] = key_map;
+ for (j = 0; j < NR_KEYS; j++)
+ key_map[j] = U(K_HOLE);
+ }
+ ov = U(key_map[tmp.kb_index]);
+ if (tmp.kb_value == ov)
+ break; /* nothing to do */
+ /*
+ * Attention Key.
+ */
+ if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ key_map[tmp.kb_index] = U(tmp.kb_value);
+ break;
+ }
+ return 0;
+}
+
+static int
+do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
+ int cmd, int perm)
+{
+ unsigned char kb_func;
+ char *p;
+ int len;
+
+ /* Get u_kbs->kb_func. */
+ if (get_user(kb_func, &u_kbs->kb_func))
+ return -EFAULT;
+#if MAX_NR_FUNC < 256
+ if (kb_func >= MAX_NR_FUNC)
+ return -EINVAL;
+#endif
+
+ switch (cmd) {
+ case KDGKBSENT:
+ p = kbd->func_table[kb_func];
+ if (p) {
+ len = strlen(p);
+ if (len >= sizeof(u_kbs->kb_string))
+ len = sizeof(u_kbs->kb_string) - 1;
+ if (copy_to_user(u_kbs->kb_string, p, len))
+ return -EFAULT;
+ } else
+ len = 0;
+ if (put_user('\0', u_kbs->kb_string + len))
+ return -EFAULT;
+ break;
+ case KDSKBSENT:
+ if (!perm)
+ return -EPERM;
+ len = strnlen_user(u_kbs->kb_string,
+ sizeof(u_kbs->kb_string) - 1);
+ p = kmalloc(len, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ if (copy_from_user(p, u_kbs->kb_string, len)) {
+ kfree(p);
+ return -EFAULT;
+ }
+ p[len] = 0;
+ if (kbd->func_table[kb_func])
+ kfree(kbd->func_table[kb_func]);
+ kbd->func_table[kb_func] = p;
+ break;
+ }
+ return 0;
+}
+
+int
+kbd_ioctl(struct kbd_data *kbd, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct kbdiacrs __user *a;
+ void __user *argp;
+ int ct, perm;
+
+ argp = (void __user *)arg;
+
+ /*
+ * To have permissions to do most of the vt ioctls, we either have
+ * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+ */
+ perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
+ switch (cmd) {
+ case KDGKBTYPE:
+ return put_user(KB_101, (char __user *)argp);
+ case KDGKBENT:
+ case KDSKBENT:
+ return do_kdsk_ioctl(kbd, argp, cmd, perm);
+ case KDGKBSENT:
+ case KDSKBSENT:
+ return do_kdgkb_ioctl(kbd, argp, cmd, perm);
+ case KDGKBDIACR:
+ a = argp;
+
+ if (put_user(kbd->accent_table_size, &a->kb_cnt))
+ return -EFAULT;
+ ct = kbd->accent_table_size;
+ if (copy_to_user(a->kbdiacr, kbd->accent_table,
+ ct * sizeof(struct kbdiacr)))
+ return -EFAULT;
+ return 0;
+ case KDSKBDIACR:
+ a = argp;
+ if (!perm)
+ return -EPERM;
+ if (get_user(ct, &a->kb_cnt))
+ return -EFAULT;
+ if (ct >= MAX_DIACR)
+ return -EINVAL;
+ kbd->accent_table_size = ct;
+ if (copy_from_user(kbd->accent_table, a->kbdiacr,
+ ct * sizeof(struct kbdiacr)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+EXPORT_SYMBOL(kbd_ioctl);
+EXPORT_SYMBOL(kbd_ascebc);
+EXPORT_SYMBOL(kbd_free);
+EXPORT_SYMBOL(kbd_alloc);
+EXPORT_SYMBOL(kbd_keycode);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
new file mode 100644
index 000000000000..3b4da5a9cf79
--- /dev/null
+++ b/drivers/s390/char/keyboard.h
@@ -0,0 +1,57 @@
+/*
+ * drivers/s390/char/keyboard.h
+ * ebcdic keycode functions for s390 console drivers
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/keyboard.h>
+
+#define NR_FN_HANDLER 20
+
+struct kbd_data;
+
+typedef void (fn_handler_fn)(struct kbd_data *);
+
+/*
+ * FIXME: explain key_maps tricks.
+ */
+
+struct kbd_data {
+ struct tty_struct *tty;
+ unsigned short **key_maps;
+ char **func_table;
+ fn_handler_fn **fn_handler;
+ struct kbdiacr *accent_table;
+ unsigned int accent_table_size;
+ unsigned char diacr;
+ unsigned short sysrq;
+};
+
+struct kbd_data *kbd_alloc(void);
+void kbd_free(struct kbd_data *);
+void kbd_ascebc(struct kbd_data *, unsigned char *);
+
+void kbd_keycode(struct kbd_data *, unsigned int);
+int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long);
+
+/*
+ * Helper Functions.
+ */
+extern inline void
+kbd_put_queue(struct tty_struct *tty, int ch)
+{
+ tty_insert_flip_char(tty, ch, 0);
+ tty_schedule_flip(tty);
+}
+
+extern inline void
+kbd_puts_queue(struct tty_struct *tty, char *cp)
+{
+ while (*cp)
+ tty_insert_flip_char(tty, *cp++, 0);
+ tty_schedule_flip(tty);
+}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
new file mode 100644
index 000000000000..5fd3ad867386
--- /dev/null
+++ b/drivers/s390/char/monreader.c
@@ -0,0 +1,662 @@
+/*
+ * drivers/s390/char/monreader.c
+ *
+ * Character device driver for reading z/VM *MONITOR service records.
+ *
+ * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ *
+ * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/extmem.h>
+#include <linux/poll.h>
+#include "../net/iucv.h"
+
+
+//#define MON_DEBUG /* Debug messages on/off */
+
+#define MON_NAME "monreader"
+
+#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
+#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
+#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
+
+#ifdef MON_DEBUG
+#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
+#else
+#define P_DEBUG(x...) do {} while (0)
+#endif
+
+#define MON_COLLECT_SAMPLE 0x80
+#define MON_COLLECT_EVENT 0x40
+#define MON_SERVICE "*MONITOR"
+#define MON_IN_USE 0x01
+#define MON_MSGLIM 255
+
+static char mon_dcss_name[9] = "MONDCSS\0";
+
+struct mon_msg {
+ u32 pos;
+ u32 mca_offset;
+ iucv_MessagePending local_eib;
+ char msglim_reached;
+ char replied_msglim;
+};
+
+struct mon_private {
+ u16 pathid;
+ iucv_handle_t iucv_handle;
+ struct mon_msg *msg_array[MON_MSGLIM];
+ unsigned int write_index;
+ unsigned int read_index;
+ atomic_t msglim_count;
+ atomic_t read_ready;
+ atomic_t iucv_connected;
+ atomic_t iucv_severed;
+};
+
+static unsigned long mon_in_use = 0;
+
+static unsigned long mon_dcss_start;
+static unsigned long mon_dcss_end;
+
+static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
+
+static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+static u8 user_data_connect[16] = {
+ /* Version code, must be 0x01 for shared mode */
+ 0x01,
+ /* what to collect */
+ MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
+ /* DCSS name in EBCDIC, 8 bytes padded with blanks */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static u8 user_data_sever[16] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+
+/******************************************************************************
+ * helper functions *
+ *****************************************************************************/
+/*
+ * Create the 8 bytes EBCDIC DCSS segment name from
+ * an ASCII name, incl. padding
+ */
+static inline void
+dcss_mkname(char *ascii_name, char *ebcdic_name)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (ascii_name[i] == '\0')
+ break;
+ ebcdic_name[i] = toupper(ascii_name[i]);
+ };
+ for (; i < 8; i++)
+ ebcdic_name[i] = ' ';
+ ASCEBC(ebcdic_name, 8);
+}
+
+/*
+ * print appropriate error message for segment_load()/segment_type()
+ * return code
+ */
+static void
+mon_segment_warn(int rc, char* seg_name)
+{
+ switch (rc) {
+ case -ENOENT:
+ P_WARNING("cannot load/query segment %s, does not exist\n",
+ seg_name);
+ break;
+ case -ENOSYS:
+ P_WARNING("cannot load/query segment %s, not running on VM\n",
+ seg_name);
+ break;
+ case -EIO:
+ P_WARNING("cannot load/query segment %s, hardware error\n",
+ seg_name);
+ break;
+ case -ENOTSUPP:
+ P_WARNING("cannot load/query segment %s, is a multi-part "
+ "segment\n", seg_name);
+ break;
+ case -ENOSPC:
+ P_WARNING("cannot load/query segment %s, overlaps with "
+ "storage\n", seg_name);
+ break;
+ case -EBUSY:
+ P_WARNING("cannot load/query segment %s, overlaps with "
+ "already loaded dcss\n", seg_name);
+ break;
+ case -EPERM:
+ P_WARNING("cannot load/query segment %s, already loaded in "
+ "incompatible mode\n", seg_name);
+ break;
+ case -ENOMEM:
+ P_WARNING("cannot load/query segment %s, out of memory\n",
+ seg_name);
+ break;
+ case -ERANGE:
+ P_WARNING("cannot load/query segment %s, exceeds kernel "
+ "mapping range\n", seg_name);
+ break;
+ default:
+ P_WARNING("cannot load/query segment %s, return value %i\n",
+ seg_name, rc);
+ break;
+ }
+}
+
+static inline unsigned long
+mon_mca_start(struct mon_msg *monmsg)
+{
+ return monmsg->local_eib.ln1msg1.iprmmsg1_u32;
+}
+
+static inline unsigned long
+mon_mca_end(struct mon_msg *monmsg)
+{
+ return monmsg->local_eib.ln1msg2.ipbfln1f;
+}
+
+static inline u8
+mon_mca_type(struct mon_msg *monmsg, u8 index)
+{
+ return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+}
+
+static inline u32
+mon_mca_size(struct mon_msg *monmsg)
+{
+ return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
+}
+
+static inline u32
+mon_rec_start(struct mon_msg *monmsg)
+{
+ return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+}
+
+static inline u32
+mon_rec_end(struct mon_msg *monmsg)
+{
+ return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+}
+
+static inline int
+mon_check_mca(struct mon_msg *monmsg)
+{
+ if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
+ (mon_rec_start(monmsg) < mon_dcss_start) ||
+ (mon_rec_end(monmsg) > mon_dcss_end) ||
+ (mon_mca_type(monmsg, 0) == 0) ||
+ (mon_mca_size(monmsg) % 12 != 0) ||
+ (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
+ (mon_mca_end(monmsg) > mon_dcss_end) ||
+ (mon_mca_start(monmsg) < mon_dcss_start) ||
+ ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
+ {
+ P_DEBUG("READ, IGNORED INVALID MCA\n\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int
+mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
+{
+ u8 prmmsg[8];
+ int rc;
+
+ P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
+ "0x%08X\n\n",
+ monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
+ monmsg->local_eib.iptrgcls);
+ rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid,
+ monmsg->local_eib.ipmsgid,
+ monmsg->local_eib.iptrgcls,
+ 0, prmmsg);
+ atomic_dec(&monpriv->msglim_count);
+ if (likely(!monmsg->msglim_reached)) {
+ monmsg->pos = 0;
+ monmsg->mca_offset = 0;
+ monpriv->read_index = (monpriv->read_index + 1) %
+ MON_MSGLIM;
+ atomic_dec(&monpriv->read_ready);
+ } else
+ monmsg->replied_msglim = 1;
+ if (rc) {
+ P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+static inline struct mon_private *
+mon_alloc_mem(void)
+{
+ int i,j;
+ struct mon_private *monpriv;
+
+ monpriv = kmalloc(sizeof(struct mon_private), GFP_KERNEL);
+ if (!monpriv) {
+ P_ERROR("no memory for monpriv\n");
+ return NULL;
+ }
+ memset(monpriv, 0, sizeof(struct mon_private));
+ for (i = 0; i < MON_MSGLIM; i++) {
+ monpriv->msg_array[i] = kmalloc(sizeof(struct mon_msg),
+ GFP_KERNEL);
+ if (!monpriv->msg_array[i]) {
+ P_ERROR("open, no memory for msg_array\n");
+ for (j = 0; j < i; j++)
+ kfree(monpriv->msg_array[j]);
+ return NULL;
+ }
+ memset(monpriv->msg_array[i], 0, sizeof(struct mon_msg));
+ }
+ return monpriv;
+}
+
+static inline void
+mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
+{
+#ifdef MON_DEBUG
+ u8 msg_type[2], mca_type;
+ unsigned long records_len;
+
+ records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
+
+ memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2);
+ EBCASC(msg_type, 2);
+ mca_type = mon_mca_type(monmsg, 0);
+ EBCASC(&mca_type, 1);
+
+ P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
+ monpriv->read_index, monpriv->write_index);
+ P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
+ monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
+ monmsg->local_eib.iptrgcls);
+ P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
+ msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
+ mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
+ P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
+ mon_mca_start(monmsg), mon_mca_end(monmsg));
+ P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
+ mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
+ if (mon_mca_size(monmsg) > 12)
+ P_DEBUG("READ, MORE THAN ONE MCA\n\n");
+#endif
+}
+
+static inline void
+mon_next_mca(struct mon_msg *monmsg)
+{
+ if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
+ return;
+ P_DEBUG("READ, NEXT MCA\n\n");
+ monmsg->mca_offset += 12;
+ monmsg->pos = 0;
+}
+
+static inline struct mon_msg *
+mon_next_message(struct mon_private *monpriv)
+{
+ struct mon_msg *monmsg;
+
+ if (!atomic_read(&monpriv->read_ready))
+ return NULL;
+ monmsg = monpriv->msg_array[monpriv->read_index];
+ if (unlikely(monmsg->replied_msglim)) {
+ monmsg->replied_msglim = 0;
+ monmsg->msglim_reached = 0;
+ monmsg->pos = 0;
+ monmsg->mca_offset = 0;
+ P_WARNING("read, message limit reached\n");
+ monpriv->read_index = (monpriv->read_index + 1) %
+ MON_MSGLIM;
+ atomic_dec(&monpriv->read_ready);
+ return ERR_PTR(-EOVERFLOW);
+ }
+ return monmsg;
+}
+
+
+/******************************************************************************
+ * IUCV handler *
+ *****************************************************************************/
+static void
+mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
+{
+ struct mon_private *monpriv = (struct mon_private *) pgm_data;
+
+ P_DEBUG("IUCV connection completed\n");
+ P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
+ "0x%02X, Sample = 0x%02X\n",
+ eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]);
+ atomic_set(&monpriv->iucv_connected, 1);
+ wake_up(&mon_conn_wait_queue);
+}
+
+static void
+mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
+{
+ struct mon_private *monpriv = (struct mon_private *) pgm_data;
+
+ P_ERROR("IUCV connection severed with rc = 0x%X\n",
+ (u8) eib->ipuser[0]);
+ atomic_set(&monpriv->iucv_severed, 1);
+ wake_up(&mon_conn_wait_queue);
+ wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static void
+mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
+{
+ struct mon_private *monpriv = (struct mon_private *) pgm_data;
+
+ P_DEBUG("IUCV message pending\n");
+ memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib,
+ sizeof(iucv_MessagePending));
+ if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
+ P_WARNING("IUCV message pending, message limit (%i) reached\n",
+ MON_MSGLIM);
+ monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
+ }
+ monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
+ atomic_inc(&monpriv->read_ready);
+ wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static iucv_interrupt_ops_t mon_iucvops = {
+ .ConnectionComplete = mon_iucv_ConnectionComplete,
+ .ConnectionSevered = mon_iucv_ConnectionSevered,
+ .MessagePending = mon_iucv_MessagePending,
+};
+
+/******************************************************************************
+ * file operations *
+ *****************************************************************************/
+static int
+mon_open(struct inode *inode, struct file *filp)
+{
+ int rc, i;
+ struct mon_private *monpriv;
+
+ /*
+ * only one user allowed
+ */
+ if (test_and_set_bit(MON_IN_USE, &mon_in_use))
+ return -EBUSY;
+
+ monpriv = mon_alloc_mem();
+ if (!monpriv)
+ return -ENOMEM;
+
+ /*
+ * Register with IUCV and connect to *MONITOR service
+ */
+ monpriv->iucv_handle = iucv_register_program("my_monreader ",
+ MON_SERVICE,
+ NULL,
+ &mon_iucvops,
+ monpriv);
+ if (!monpriv->iucv_handle) {
+ P_ERROR("failed to register with iucv driver\n");
+ rc = -EIO;
+ goto out_error;
+ }
+ P_INFO("open, registered with IUCV\n");
+
+ rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
+ MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
+ monpriv->iucv_handle, NULL);
+ if (rc) {
+ P_ERROR("iucv connection to *MONITOR failed with "
+ "IPUSER SEVER code = %i\n", rc);
+ rc = -EIO;
+ goto out_unregister;
+ }
+ /*
+ * Wait for connection confirmation
+ */
+ wait_event(mon_conn_wait_queue,
+ atomic_read(&monpriv->iucv_connected) ||
+ atomic_read(&monpriv->iucv_severed));
+ if (atomic_read(&monpriv->iucv_severed)) {
+ atomic_set(&monpriv->iucv_severed, 0);
+ atomic_set(&monpriv->iucv_connected, 0);
+ rc = -EIO;
+ goto out_unregister;
+ }
+ P_INFO("open, established connection to *MONITOR service\n\n");
+ filp->private_data = monpriv;
+ return nonseekable_open(inode, filp);
+
+out_unregister:
+ iucv_unregister_program(monpriv->iucv_handle);
+out_error:
+ for (i = 0; i < MON_MSGLIM; i++)
+ kfree(monpriv->msg_array[i]);
+ kfree(monpriv);
+ clear_bit(MON_IN_USE, &mon_in_use);
+ return rc;
+}
+
+static int
+mon_close(struct inode *inode, struct file *filp)
+{
+ int rc, i;
+ struct mon_private *monpriv = filp->private_data;
+
+ /*
+ * Close IUCV connection and unregister
+ */
+ rc = iucv_sever(monpriv->pathid, user_data_sever);
+ if (rc)
+ P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
+ else
+ P_INFO("close, terminated connection to *MONITOR service\n");
+
+ rc = iucv_unregister_program(monpriv->iucv_handle);
+ if (rc)
+ P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
+ else
+ P_INFO("close, unregistered with IUCV\n");
+
+ atomic_set(&monpriv->iucv_severed, 0);
+ atomic_set(&monpriv->iucv_connected, 0);
+ atomic_set(&monpriv->read_ready, 0);
+ atomic_set(&monpriv->msglim_count, 0);
+ monpriv->write_index = 0;
+ monpriv->read_index = 0;
+
+ for (i = 0; i < MON_MSGLIM; i++)
+ kfree(monpriv->msg_array[i]);
+ kfree(monpriv);
+ clear_bit(MON_IN_USE, &mon_in_use);
+ return 0;
+}
+
+static ssize_t
+mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+{
+ struct mon_private *monpriv = filp->private_data;
+ struct mon_msg *monmsg;
+ int ret;
+ u32 mce_start;
+
+ monmsg = mon_next_message(monpriv);
+ if (IS_ERR(monmsg))
+ return PTR_ERR(monmsg);
+
+ if (!monmsg) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(mon_read_wait_queue,
+ atomic_read(&monpriv->read_ready) ||
+ atomic_read(&monpriv->iucv_severed));
+ if (ret)
+ return ret;
+ if (unlikely(atomic_read(&monpriv->iucv_severed)))
+ return -EIO;
+ monmsg = monpriv->msg_array[monpriv->read_index];
+ }
+
+ if (!monmsg->pos) {
+ monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
+ mon_read_debug(monmsg, monpriv);
+ }
+ if (mon_check_mca(monmsg))
+ goto reply;
+
+ /* read monitor control element (12 bytes) first */
+ mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
+ if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
+ count = min(count, (size_t) mce_start + 12 - monmsg->pos);
+ ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+ count);
+ if (ret)
+ return -EFAULT;
+ monmsg->pos += count;
+ if (monmsg->pos == mce_start + 12)
+ monmsg->pos = mon_rec_start(monmsg);
+ goto out_copy;
+ }
+
+ /* read records */
+ if (monmsg->pos <= mon_rec_end(monmsg)) {
+ count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ + 1);
+ ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+ count);
+ if (ret)
+ return -EFAULT;
+ monmsg->pos += count;
+ if (monmsg->pos > mon_rec_end(monmsg))
+ mon_next_mca(monmsg);
+ goto out_copy;
+ }
+reply:
+ ret = mon_send_reply(monmsg, monpriv);
+ return ret;
+
+out_copy:
+ *ppos += count;
+ return count;
+}
+
+static unsigned int
+mon_poll(struct file *filp, struct poll_table_struct *p)
+{
+ struct mon_private *monpriv = filp->private_data;
+
+ poll_wait(filp, &mon_read_wait_queue, p);
+ if (unlikely(atomic_read(&monpriv->iucv_severed)))
+ return POLLERR;
+ if (atomic_read(&monpriv->read_ready))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static struct file_operations mon_fops = {
+ .owner = THIS_MODULE,
+ .open = &mon_open,
+ .release = &mon_close,
+ .read = &mon_read,
+ .poll = &mon_poll,
+};
+
+static struct miscdevice mon_dev = {
+ .name = "monreader",
+ .devfs_name = "monreader",
+ .fops = &mon_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/******************************************************************************
+ * module init/exit *
+ *****************************************************************************/
+static int __init
+mon_init(void)
+{
+ int rc;
+
+ if (!MACHINE_IS_VM) {
+ P_ERROR("not running under z/VM, driver not loaded\n");
+ return -ENODEV;
+ }
+
+ rc = segment_type(mon_dcss_name);
+ if (rc < 0) {
+ mon_segment_warn(rc, mon_dcss_name);
+ return rc;
+ }
+ if (rc != SEG_TYPE_SC) {
+ P_ERROR("segment %s has unsupported type, should be SC\n",
+ mon_dcss_name);
+ return -EINVAL;
+ }
+
+ rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+ &mon_dcss_start, &mon_dcss_end);
+ if (rc < 0) {
+ mon_segment_warn(rc, mon_dcss_name);
+ return -EINVAL;
+ }
+ dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+
+ rc = misc_register(&mon_dev);
+ if (rc < 0 ) {
+ P_ERROR("misc_register failed, rc = %i\n", rc);
+ goto out;
+ }
+ P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
+ mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
+ mon_dcss_end - mon_dcss_start + 1);
+ return 0;
+
+out:
+ segment_unload(mon_dcss_name);
+ return rc;
+}
+
+static void __exit
+mon_exit(void)
+{
+ segment_unload(mon_dcss_name);
+ WARN_ON(misc_deregister(&mon_dev) != 0);
+ return;
+}
+
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_string(mondcss, mon_dcss_name, 9, 0444);
+MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
+ "service, max. 8 chars. Default is MONDCSS");
+
+MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for reading z/VM "
+ "monitor service records.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
new file mode 100644
index 000000000000..8e16a9716686
--- /dev/null
+++ b/drivers/s390/char/raw3270.c
@@ -0,0 +1,1335 @@
+/*
+ * drivers/s390/char/raw3270.c
+ * IBM/3270 Driver - core functions.
+ *
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+
+#include "raw3270.h"
+
+/* The main 3270 data structure. */
+struct raw3270 {
+ struct list_head list;
+ struct ccw_device *cdev;
+ int minor;
+
+ short model, rows, cols;
+ unsigned long flags;
+
+ struct list_head req_queue; /* Request queue. */
+ struct list_head view_list; /* List of available views. */
+ struct raw3270_view *view; /* Active view. */
+
+ struct timer_list timer; /* Device timer. */
+
+ unsigned char *ascebc; /* ascii -> ebcdic table */
+};
+
+/* raw3270->flags */
+#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
+#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
+#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
+#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
+#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
+
+/* Semaphore to protect global data of raw3270 (devices, views, etc). */
+static DECLARE_MUTEX(raw3270_sem);
+
+/* List of 3270 devices. */
+static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
+
+/*
+ * Flag to indicate if the driver has been registered. Some operations
+ * like waiting for the end of i/o need to be done differently as long
+ * as the kernel is still starting up (console support).
+ */
+static int raw3270_registered;
+
+/* Module parameters */
+static int tubxcorrect = 0;
+module_param(tubxcorrect, bool, 0);
+
+/*
+ * Wait queue for device init/delete, view delete.
+ */
+DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+
+/*
+ * Encode array for 12 bit 3270 addresses.
+ */
+unsigned char raw3270_ebcgraf[64] = {
+ 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+
+void
+raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
+{
+ if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
+ cp[0] = (addr >> 8) & 0x3f;
+ cp[1] = addr & 0xff;
+ } else {
+ cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
+ cp[1] = raw3270_ebcgraf[addr & 0x3f];
+ }
+}
+
+/*
+ * Allocate a new 3270 ccw request
+ */
+struct raw3270_request *
+raw3270_request_alloc(size_t size)
+{
+ struct raw3270_request *rq;
+
+ /* Allocate request structure */
+ rq = kmalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
+ memset(rq, 0, sizeof(struct raw3270_request));
+
+ /* alloc output buffer. */
+ if (size > 0) {
+ rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (!rq->buffer) {
+ kfree(rq);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ rq->size = size;
+ INIT_LIST_HEAD(&rq->list);
+
+ /*
+ * Setup ccw.
+ */
+ rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.flags = CCW_FLAG_SLI;
+
+ return rq;
+}
+
+#ifdef CONFIG_TN3270_CONSOLE
+/*
+ * Allocate a new 3270 ccw request from bootmem. Only works very
+ * early in the boot process. Only con3270.c should be using this.
+ */
+struct raw3270_request *
+raw3270_request_alloc_bootmem(size_t size)
+{
+ struct raw3270_request *rq;
+
+ rq = alloc_bootmem_low(sizeof(struct raw3270));
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
+ memset(rq, 0, sizeof(struct raw3270_request));
+
+ /* alloc output buffer. */
+ if (size > 0) {
+ rq->buffer = alloc_bootmem_low(size);
+ if (!rq->buffer) {
+ free_bootmem((unsigned long) rq,
+ sizeof(struct raw3270));
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ rq->size = size;
+ INIT_LIST_HEAD(&rq->list);
+
+ /*
+ * Setup ccw.
+ */
+ rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.flags = CCW_FLAG_SLI;
+
+ return rq;
+}
+#endif
+
+/*
+ * Free 3270 ccw request
+ */
+void
+raw3270_request_free (struct raw3270_request *rq)
+{
+ if (rq->buffer)
+ kfree(rq->buffer);
+ kfree(rq);
+}
+
+/*
+ * Reset request to initial state.
+ */
+void
+raw3270_request_reset(struct raw3270_request *rq)
+{
+ BUG_ON(!list_empty(&rq->list));
+ rq->ccw.cmd_code = 0;
+ rq->ccw.count = 0;
+ rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.flags = CCW_FLAG_SLI;
+ rq->rescnt = 0;
+ rq->rc = 0;
+}
+
+/*
+ * Set command code to ccw of a request.
+ */
+void
+raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
+{
+ rq->ccw.cmd_code = cmd;
+}
+
+/*
+ * Add data fragment to output buffer.
+ */
+int
+raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
+{
+ if (size + rq->ccw.count > rq->size)
+ return -E2BIG;
+ memcpy(rq->buffer + rq->ccw.count, data, size);
+ rq->ccw.count += size;
+ return 0;
+}
+
+/*
+ * Set address/length pair to ccw of a request.
+ */
+void
+raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
+{
+ rq->ccw.cda = __pa(data);
+ rq->ccw.count = size;
+}
+
+/*
+ * Set idal buffer to ccw of a request.
+ */
+void
+raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
+{
+ rq->ccw.cda = __pa(ib->data);
+ rq->ccw.count = ib->size;
+ rq->ccw.flags |= CCW_FLAG_IDA;
+}
+
+/*
+ * Stop running ccw.
+ */
+static int
+raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
+{
+ int retries;
+ int rc;
+
+ if (raw3270_request_final(rq))
+ return 0;
+ /* Check if interrupt has already been processed */
+ for (retries = 0; retries < 5; retries++) {
+ if (retries < 2)
+ rc = ccw_device_halt(rp->cdev, (long) rq);
+ else
+ rc = ccw_device_clear(rp->cdev, (long) rq);
+ if (rc == 0)
+ break; /* termination successful */
+ }
+ return rc;
+}
+
+static int
+raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ rc = raw3270_halt_io_nolock(rp, rq);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ return rc;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * 3270 device is idle. Return without waiting for end of i/o.
+ */
+static int
+__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
+ struct raw3270_request *rq)
+{
+ rq->view = view;
+ raw3270_get_view(view);
+ if (list_empty(&rp->req_queue) &&
+ !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+ /* No other requests are on the queue. Start this one. */
+ rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+ (unsigned long) rq, 0, 0);
+ if (rq->rc) {
+ raw3270_put_view(view);
+ return rq->rc;
+ }
+ }
+ list_add_tail(&rq->list, &rp->req_queue);
+ return 0;
+}
+
+int
+raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
+{
+ unsigned long flags;
+ struct raw3270 *rp;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+ rp = view->dev;
+ if (!rp || rp->view != view)
+ rc = -EACCES;
+ else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+ rc = -ENODEV;
+ else
+ rc = __raw3270_start(rp, view, rq);
+ spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+ return rc;
+}
+
+int
+raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
+{
+ struct raw3270 *rp;
+
+ rp = view->dev;
+ rq->view = view;
+ raw3270_get_view(view);
+ list_add_tail(&rq->list, &rp->req_queue);
+ return 0;
+}
+
+/*
+ * 3270 interrupt routine, called from the ccw_device layer
+ */
+static void
+raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct raw3270 *rp;
+ struct raw3270_view *view;
+ struct raw3270_request *rq;
+ int rc;
+
+ rp = (struct raw3270 *) cdev->dev.driver_data;
+ if (!rp)
+ return;
+ rq = (struct raw3270_request *) intparm;
+ view = rq ? rq->view : rp->view;
+
+ if (IS_ERR(irb))
+ rc = RAW3270_IO_RETRY;
+ else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
+ rq->rc = -EIO;
+ rc = RAW3270_IO_DONE;
+ } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
+ DEV_STAT_UNIT_EXCEP)) {
+ /* Handle CE-DE-UE and subsequent UDE */
+ set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+ rc = RAW3270_IO_BUSY;
+ } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+ /* Wait for UDE if busy flag is set. */
+ if (irb->scsw.dstat & DEV_STAT_DEV_END) {
+ clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+ /* Got it, now retry. */
+ rc = RAW3270_IO_RETRY;
+ } else
+ rc = RAW3270_IO_BUSY;
+ } else if (view)
+ rc = view->fn->intv(view, rq, irb);
+ else
+ rc = RAW3270_IO_DONE;
+
+ switch (rc) {
+ case RAW3270_IO_DONE:
+ break;
+ case RAW3270_IO_BUSY:
+ /*
+ * Intervention required by the operator. We have to wait
+ * for unsolicited device end.
+ */
+ return;
+ case RAW3270_IO_RETRY:
+ if (!rq)
+ break;
+ rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+ (unsigned long) rq, 0, 0);
+ if (rq->rc == 0)
+ return; /* Sucessfully restarted. */
+ break;
+ case RAW3270_IO_STOP:
+ if (!rq)
+ break;
+ raw3270_halt_io_nolock(rp, rq);
+ rq->rc = -EIO;
+ break;
+ default:
+ BUG();
+ }
+ if (rq) {
+ BUG_ON(list_empty(&rq->list));
+ /* The request completed, remove from queue and do callback. */
+ list_del_init(&rq->list);
+ if (rq->callback)
+ rq->callback(rq, rq->callback_data);
+ /* Do put_device for get_device in raw3270_start. */
+ raw3270_put_view(view);
+ }
+ /*
+ * Try to start each request on request queue until one is
+ * started successful.
+ */
+ while (!list_empty(&rp->req_queue)) {
+ rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+ rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+ (unsigned long) rq, 0, 0);
+ if (rq->rc == 0)
+ break;
+ /* Start failed. Remove request and do callback. */
+ list_del_init(&rq->list);
+ if (rq->callback)
+ rq->callback(rq, rq->callback_data);
+ /* Do put_device for get_device in raw3270_start. */
+ raw3270_put_view(view);
+ }
+}
+
+/*
+ * Size sensing.
+ */
+
+struct raw3270_ua { /* Query Reply structure for Usable Area */
+ struct { /* Usable Area Query Reply Base */
+ short l; /* Length of this structured field */
+ char sfid; /* 0x81 if Query Reply */
+ char qcode; /* 0x81 if Usable Area */
+ char flags0;
+ char flags1;
+ short w; /* Width of usable area */
+ short h; /* Heigth of usavle area */
+ char units; /* 0x00:in; 0x01:mm */
+ int xr;
+ int yr;
+ char aw;
+ char ah;
+ short buffsz; /* Character buffer size, bytes */
+ char xmin;
+ char ymin;
+ char xmax;
+ char ymax;
+ } __attribute__ ((packed)) uab;
+ struct { /* Alternate Usable Area Self-Defining Parameter */
+ char l; /* Length of this Self-Defining Parm */
+ char sdpid; /* 0x02 if Alternate Usable Area */
+ char res;
+ char auaid; /* 0x01 is Id for the A U A */
+ short wauai; /* Width of AUAi */
+ short hauai; /* Height of AUAi */
+ char auaunits; /* 0x00:in, 0x01:mm */
+ int auaxr;
+ int auayr;
+ char awauai;
+ char ahauai;
+ } __attribute__ ((packed)) aua;
+} __attribute__ ((packed));
+
+static unsigned char raw3270_init_data[256];
+static struct raw3270_request raw3270_init_request;
+static struct diag210 raw3270_init_diag210;
+static DECLARE_MUTEX(raw3270_init_sem);
+
+static int
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+ struct irb *irb)
+{
+ /*
+ * Unit-Check Processing:
+ * Expect Command Reject or Intervention Required.
+ */
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ /* Request finished abnormally. */
+ if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
+ set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
+ return RAW3270_IO_BUSY;
+ }
+ }
+ if (rq) {
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ rq->rc = -EOPNOTSUPP;
+ else
+ rq->rc = -EIO;
+ } else
+ /* Request finished normally. Copy residual count. */
+ rq->rescnt = irb->scsw.count;
+ }
+ if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
+ set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
+ wake_up(&raw3270_wait_queue);
+ }
+ return RAW3270_IO_DONE;
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+ .intv = raw3270_init_irq
+};
+
+static struct raw3270_view raw3270_init_view = {
+ .fn = &raw3270_init_fn
+};
+
+/*
+ * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
+ * Wait for end of request. The request must have been started
+ * with raw3270_start, rc = 0. The device lock may NOT have been
+ * released between calling raw3270_start and raw3270_wait.
+ */
+static void
+raw3270_wake_init(struct raw3270_request *rq, void *data)
+{
+ wake_up((wait_queue_head_t *) data);
+}
+
+/*
+ * Special wait function that can cope with console initialization.
+ */
+static int
+raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
+ struct raw3270_request *rq)
+{
+ unsigned long flags;
+ wait_queue_head_t wq;
+ int rc;
+
+#ifdef CONFIG_TN3270_CONSOLE
+ if (raw3270_registered == 0) {
+ spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+ rq->callback = 0;
+ rc = __raw3270_start(rp, view, rq);
+ if (rc == 0)
+ while (!raw3270_request_final(rq)) {
+ wait_cons_dev();
+ barrier();
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+ return rq->rc;
+ }
+#endif
+ init_waitqueue_head(&wq);
+ rq->callback = raw3270_wake_init;
+ rq->callback_data = &wq;
+ spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+ rc = __raw3270_start(rp, view, rq);
+ spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+ if (rc)
+ return rc;
+ /* Now wait for the completion. */
+ rc = wait_event_interruptible(wq, raw3270_request_final(rq));
+ if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
+ raw3270_halt_io(view->dev, rq);
+ /* No wait for the halt to complete. */
+ wait_event(wq, raw3270_request_final(rq));
+ return -ERESTARTSYS;
+ }
+ return rq->rc;
+}
+
+static int
+__raw3270_size_device_vm(struct raw3270 *rp)
+{
+ int rc, model;
+
+ raw3270_init_diag210.vrdcdvno =
+ _ccw_device_get_device_number(rp->cdev);
+ raw3270_init_diag210.vrdclen = sizeof(struct diag210);
+ rc = diag210(&raw3270_init_diag210);
+ if (rc)
+ return rc;
+ model = raw3270_init_diag210.vrdccrmd;
+ switch (model) {
+ case 2:
+ rp->model = model;
+ rp->rows = 24;
+ rp->cols = 80;
+ break;
+ case 3:
+ rp->model = model;
+ rp->rows = 32;
+ rp->cols = 80;
+ break;
+ case 4:
+ rp->model = model;
+ rp->rows = 43;
+ rp->cols = 80;
+ break;
+ case 5:
+ rp->model = model;
+ rp->rows = 27;
+ rp->cols = 132;
+ break;
+ default:
+ printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static int
+__raw3270_size_device(struct raw3270 *rp)
+{
+ static const unsigned char wbuf[] =
+ { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+ struct raw3270_ua *uap;
+ unsigned short count;
+ int rc;
+
+ /*
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that preceeds the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
+ */
+ memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
+ memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
+ /* Store 'read partition' data stream to raw3270_init_data */
+ memcpy(raw3270_init_data, wbuf, sizeof(wbuf));
+ INIT_LIST_HEAD(&raw3270_init_request.list);
+ raw3270_init_request.ccw.cmd_code = TC_WRITESF;
+ raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
+ raw3270_init_request.ccw.count = sizeof(wbuf);
+ raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
+
+ rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ if (rc) {
+ /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
+ if (rc == -EOPNOTSUPP && MACHINE_IS_VM)
+ return __raw3270_size_device_vm(rp);
+ return rc;
+ }
+
+ /* Wait for attention interrupt. */
+#ifdef CONFIG_TN3270_CONSOLE
+ if (raw3270_registered == 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
+ wait_cons_dev();
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ } else
+#endif
+ rc = wait_event_interruptible(raw3270_wait_queue,
+ test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
+ if (rc)
+ return rc;
+
+ /*
+ * The device accepted the 'read partition' command. Now
+ * set up a read ccw and issue it.
+ */
+ raw3270_init_request.ccw.cmd_code = TC_READMOD;
+ raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
+ raw3270_init_request.ccw.count = sizeof(raw3270_init_data);
+ raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
+ rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ if (rc)
+ return rc;
+ /* Got a Query Reply */
+ count = sizeof(raw3270_init_data) - raw3270_init_request.rescnt;
+ uap = (struct raw3270_ua *) (raw3270_init_data + 1);
+ /* Paranoia check. */
+ if (raw3270_init_data[0] != 0x88 || uap->uab.qcode != 0x81)
+ return -EOPNOTSUPP;
+ /* Copy rows/columns of default Usable Area */
+ rp->rows = uap->uab.h;
+ rp->cols = uap->uab.w;
+ /* Check for 14 bit addressing */
+ if ((uap->uab.flags0 & 0x0d) == 0x01)
+ set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
+ /* Check for Alternate Usable Area */
+ if (uap->uab.l == sizeof(struct raw3270_ua) &&
+ uap->aua.sdpid == 0x02) {
+ rp->rows = uap->aua.hauai;
+ rp->cols = uap->aua.wauai;
+ }
+ return 0;
+}
+
+static int
+raw3270_size_device(struct raw3270 *rp)
+{
+ int rc;
+
+ down(&raw3270_init_sem);
+ rp->view = &raw3270_init_view;
+ raw3270_init_view.dev = rp;
+ rc = __raw3270_size_device(rp);
+ raw3270_init_view.dev = 0;
+ rp->view = 0;
+ up(&raw3270_init_sem);
+ if (rc == 0) { /* Found something. */
+ /* Try to find a model. */
+ rp->model = 0;
+ if (rp->rows == 24 && rp->cols == 80)
+ rp->model = 2;
+ if (rp->rows == 32 && rp->cols == 80)
+ rp->model = 3;
+ if (rp->rows == 43 && rp->cols == 80)
+ rp->model = 4;
+ if (rp->rows == 27 && rp->cols == 132)
+ rp->model = 5;
+ }
+ return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+ int rc;
+
+ down(&raw3270_init_sem);
+ memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
+ memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
+ /* Store reset data stream to raw3270_init_data/raw3270_init_request */
+ raw3270_init_data[0] = TW_KR;
+ INIT_LIST_HEAD(&raw3270_init_request.list);
+ raw3270_init_request.ccw.cmd_code = TC_EWRITEA;
+ raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
+ raw3270_init_request.ccw.count = 1;
+ raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
+ rp->view = &raw3270_init_view;
+ raw3270_init_view.dev = rp;
+ rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ raw3270_init_view.dev = 0;
+ rp->view = 0;
+ up(&raw3270_init_sem);
+ return rc;
+}
+
+/*
+ * Setup new 3270 device.
+ */
+static int
+raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+{
+ struct list_head *l;
+ struct raw3270 *tmp;
+ int minor;
+
+ memset(rp, 0, sizeof(struct raw3270));
+ /* Copy ebcdic -> ascii translation table. */
+ memcpy(ascebc, _ascebc, 256);
+ if (tubxcorrect) {
+ /* correct brackets and circumflex */
+ ascebc['['] = 0xad;
+ ascebc[']'] = 0xbd;
+ ascebc['^'] = 0xb0;
+ }
+ rp->ascebc = ascebc;
+
+ /* Set defaults. */
+ rp->rows = 24;
+ rp->cols = 80;
+
+ INIT_LIST_HEAD(&rp->req_queue);
+ INIT_LIST_HEAD(&rp->view_list);
+
+ /*
+ * Add device to list and find the smallest unused minor
+ * number for it.
+ */
+ down(&raw3270_sem);
+ /* Keep the list sorted. */
+ minor = 0;
+ rp->minor = -1;
+ list_for_each(l, &raw3270_devices) {
+ tmp = list_entry(l, struct raw3270, list);
+ if (tmp->minor > minor) {
+ rp->minor = minor;
+ __list_add(&rp->list, l->prev, l);
+ break;
+ }
+ minor++;
+ }
+ if (rp->minor == -1 && minor < RAW3270_MAXDEVS) {
+ rp->minor = minor;
+ list_add_tail(&rp->list, &raw3270_devices);
+ }
+ up(&raw3270_sem);
+ /* No free minor number? Then give up. */
+ if (rp->minor == -1)
+ return -EUSERS;
+ rp->cdev = cdev;
+ cdev->dev.driver_data = rp;
+ cdev->handler = raw3270_irq;
+ return 0;
+}
+
+#ifdef CONFIG_TN3270_CONSOLE
+/*
+ * Setup 3270 device configured as console.
+ */
+struct raw3270 *
+raw3270_setup_console(struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+ char *ascebc;
+ int rc;
+
+ rp = (struct raw3270 *) alloc_bootmem(sizeof(struct raw3270));
+ ascebc = (char *) alloc_bootmem(256);
+ rc = raw3270_setup_device(cdev, rp, ascebc);
+ if (rc)
+ return ERR_PTR(rc);
+ set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+ rc = raw3270_reset_device(rp);
+ if (rc)
+ return ERR_PTR(rc);
+ rc = raw3270_size_device(rp);
+ if (rc)
+ return ERR_PTR(rc);
+ rc = raw3270_reset_device(rp);
+ if (rc)
+ return ERR_PTR(rc);
+ set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ return rp;
+}
+
+void
+raw3270_wait_cons_dev(struct raw3270 *rp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ wait_cons_dev();
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+#endif
+
+/*
+ * Create a 3270 device structure.
+ */
+static struct raw3270 *
+raw3270_create_device(struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+ char *ascebc;
+ int rc;
+
+ rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL);
+ if (!rp)
+ return ERR_PTR(-ENOMEM);
+ ascebc = kmalloc(256, GFP_KERNEL);
+ if (!ascebc) {
+ kfree(rp);
+ return ERR_PTR(-ENOMEM);
+ }
+ rc = raw3270_setup_device(cdev, rp, ascebc);
+ if (rc) {
+ kfree(rp->ascebc);
+ kfree(rp);
+ rp = ERR_PTR(rc);
+ }
+ /* Get reference to ccw_device structure. */
+ get_device(&cdev->dev);
+ return rp;
+}
+
+/*
+ * Activate a view.
+ */
+int
+raw3270_activate_view(struct raw3270_view *view)
+{
+ struct raw3270 *rp;
+ struct raw3270_view *oldview, *nv;
+ unsigned long flags;
+ int rc;
+
+ rp = view->dev;
+ if (!rp)
+ return -ENODEV;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (rp->view == view)
+ rc = 0;
+ else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+ rc = -ENODEV;
+ else {
+ oldview = 0;
+ if (rp->view) {
+ oldview = rp->view;
+ oldview->fn->deactivate(oldview);
+ }
+ rp->view = view;
+ rc = view->fn->activate(view);
+ if (rc) {
+ /* Didn't work. Try to reactivate the old view. */
+ rp->view = oldview;
+ if (!oldview || oldview->fn->activate(oldview) != 0) {
+ /* Didn't work as well. Try any other view. */
+ list_for_each_entry(nv, &rp->view_list, list)
+ if (nv != view && nv != oldview) {
+ rp->view = nv;
+ if (nv->fn->activate(nv) == 0)
+ break;
+ rp->view = 0;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ return rc;
+}
+
+/*
+ * Deactivate current view.
+ */
+void
+raw3270_deactivate_view(struct raw3270_view *view)
+{
+ unsigned long flags;
+ struct raw3270 *rp;
+
+ rp = view->dev;
+ if (!rp)
+ return;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (rp->view == view) {
+ view->fn->deactivate(view);
+ rp->view = 0;
+ /* Move deactivated view to end of list. */
+ list_del_init(&view->list);
+ list_add_tail(&view->list, &rp->view_list);
+ /* Try to activate another view. */
+ if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ list_for_each_entry(view, &rp->view_list, list)
+ if (view->fn->activate(view) == 0) {
+ rp->view = view;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+/*
+ * Add view to device with minor "minor".
+ */
+int
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+{
+ unsigned long flags;
+ struct raw3270 *rp;
+ int rc;
+
+ down(&raw3270_sem);
+ rc = -ENODEV;
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ if (rp->minor != minor)
+ continue;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ atomic_set(&view->ref_count, 2);
+ view->dev = rp;
+ view->fn = fn;
+ view->model = rp->model;
+ view->rows = rp->rows;
+ view->cols = rp->cols;
+ view->ascebc = rp->ascebc;
+ spin_lock_init(&view->lock);
+ list_add_tail(&view->list, &rp->view_list);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ break;
+ }
+ up(&raw3270_sem);
+ return rc;
+}
+
+/*
+ * Find specific view of device with minor "minor".
+ */
+struct raw3270_view *
+raw3270_find_view(struct raw3270_fn *fn, int minor)
+{
+ struct raw3270 *rp;
+ struct raw3270_view *view, *tmp;
+ unsigned long flags;
+
+ down(&raw3270_sem);
+ view = ERR_PTR(-ENODEV);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ if (rp->minor != minor)
+ continue;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ view = ERR_PTR(-ENOENT);
+ list_for_each_entry(tmp, &rp->view_list, list) {
+ if (tmp->fn == fn) {
+ raw3270_get_view(tmp);
+ view = tmp;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ break;
+ }
+ up(&raw3270_sem);
+ return view;
+}
+
+/*
+ * Remove view from device and free view structure via call to view->fn->free.
+ */
+void
+raw3270_del_view(struct raw3270_view *view)
+{
+ unsigned long flags;
+ struct raw3270 *rp;
+ struct raw3270_view *nv;
+
+ rp = view->dev;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (rp->view == view) {
+ view->fn->deactivate(view);
+ rp->view = 0;
+ }
+ list_del_init(&view->list);
+ if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ /* Try to activate another view. */
+ list_for_each_entry(nv, &rp->view_list, list) {
+ if (nv->fn->activate(view) == 0) {
+ rp->view = nv;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ /* Wait for reference counter to drop to zero. */
+ atomic_dec(&view->ref_count);
+ wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
+ if (view->fn->free)
+ view->fn->free(view);
+}
+
+/*
+ * Remove a 3270 device structure.
+ */
+static void
+raw3270_delete_device(struct raw3270 *rp)
+{
+ struct ccw_device *cdev;
+
+ /* Remove from device chain. */
+ down(&raw3270_sem);
+ list_del_init(&rp->list);
+ up(&raw3270_sem);
+
+ /* Disconnect from ccw_device. */
+ cdev = rp->cdev;
+ rp->cdev = 0;
+ cdev->dev.driver_data = 0;
+ cdev->handler = 0;
+
+ /* Put ccw_device structure. */
+ put_device(&cdev->dev);
+
+ /* Now free raw3270 structure. */
+ kfree(rp->ascebc);
+ kfree(rp);
+}
+
+static int
+raw3270_probe (struct ccw_device *cdev)
+{
+ return 0;
+}
+
+/*
+ * Additional attributes for a 3270 device
+ */
+static ssize_t
+raw3270_model_show(struct device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%i\n",
+ ((struct raw3270 *) dev->driver_data)->model);
+}
+static DEVICE_ATTR(model, 0444, raw3270_model_show, 0);
+
+static ssize_t
+raw3270_rows_show(struct device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%i\n",
+ ((struct raw3270 *) dev->driver_data)->rows);
+}
+static DEVICE_ATTR(rows, 0444, raw3270_rows_show, 0);
+
+static ssize_t
+raw3270_columns_show(struct device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%i\n",
+ ((struct raw3270 *) dev->driver_data)->cols);
+}
+static DEVICE_ATTR(columns, 0444, raw3270_columns_show, 0);
+
+static struct attribute * raw3270_attrs[] = {
+ &dev_attr_model.attr,
+ &dev_attr_rows.attr,
+ &dev_attr_columns.attr,
+ NULL,
+};
+
+static struct attribute_group raw3270_attr_group = {
+ .attrs = raw3270_attrs,
+};
+
+static void
+raw3270_create_attributes(struct raw3270 *rp)
+{
+ //FIXME: check return code
+ sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
+}
+
+/*
+ * Notifier for device addition/removal
+ */
+struct raw3270_notifier {
+ struct list_head list;
+ void (*notifier)(int, int);
+};
+
+static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier);
+
+int raw3270_register_notifier(void (*notifier)(int, int))
+{
+ struct raw3270_notifier *np;
+ struct raw3270 *rp;
+
+ np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
+ if (!np)
+ return -ENOMEM;
+ np->notifier = notifier;
+ down(&raw3270_sem);
+ list_add_tail(&np->list, &raw3270_notifier);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ get_device(&rp->cdev->dev);
+ notifier(rp->minor, 1);
+ }
+ up(&raw3270_sem);
+ return 0;
+}
+
+void raw3270_unregister_notifier(void (*notifier)(int, int))
+{
+ struct raw3270_notifier *np;
+
+ down(&raw3270_sem);
+ list_for_each_entry(np, &raw3270_notifier, list)
+ if (np->notifier == notifier) {
+ list_del(&np->list);
+ kfree(np);
+ break;
+ }
+ up(&raw3270_sem);
+}
+
+/*
+ * Set 3270 device online.
+ */
+static int
+raw3270_set_online (struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+ struct raw3270_notifier *np;
+ int rc;
+
+ rp = raw3270_create_device(cdev);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+ rc = raw3270_reset_device(rp);
+ if (rc)
+ return rc;
+ rc = raw3270_size_device(rp);
+ if (rc)
+ return rc;
+ rc = raw3270_reset_device(rp);
+ if (rc)
+ return rc;
+ raw3270_create_attributes(rp);
+ set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ down(&raw3270_sem);
+ list_for_each_entry(np, &raw3270_notifier, list)
+ np->notifier(rp->minor, 1);
+ up(&raw3270_sem);
+ return 0;
+}
+
+/*
+ * Remove 3270 device structure.
+ */
+static void
+raw3270_remove (struct ccw_device *cdev)
+{
+ unsigned long flags;
+ struct raw3270 *rp;
+ struct raw3270_view *v;
+ struct raw3270_notifier *np;
+
+ rp = cdev->dev.driver_data;
+ clear_bit(RAW3270_FLAGS_READY, &rp->flags);
+
+ sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
+
+ /* Deactivate current view and remove all views. */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ if (rp->view) {
+ rp->view->fn->deactivate(rp->view);
+ rp->view = 0;
+ }
+ while (!list_empty(&rp->view_list)) {
+ v = list_entry(rp->view_list.next, struct raw3270_view, list);
+ if (v->fn->release)
+ v->fn->release(v);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ raw3270_del_view(v);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ down(&raw3270_sem);
+ list_for_each_entry(np, &raw3270_notifier, list)
+ np->notifier(rp->minor, 0);
+ up(&raw3270_sem);
+
+ /* Reset 3270 device. */
+ raw3270_reset_device(rp);
+ /* And finally remove it. */
+ raw3270_delete_device(rp);
+}
+
+/*
+ * Set 3270 device offline.
+ */
+static int
+raw3270_set_offline (struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+
+ rp = cdev->dev.driver_data;
+ if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
+ return -EBUSY;
+ raw3270_remove(cdev);
+ return 0;
+}
+
+static struct ccw_device_id raw3270_id[] = {
+ { CCW_DEVICE(0x3270, 0) },
+ { CCW_DEVICE(0x3271, 0) },
+ { CCW_DEVICE(0x3272, 0) },
+ { CCW_DEVICE(0x3273, 0) },
+ { CCW_DEVICE(0x3274, 0) },
+ { CCW_DEVICE(0x3275, 0) },
+ { CCW_DEVICE(0x3276, 0) },
+ { CCW_DEVICE(0x3277, 0) },
+ { CCW_DEVICE(0x3278, 0) },
+ { CCW_DEVICE(0x3279, 0) },
+ { CCW_DEVICE(0x3174, 0) },
+ { /* end of list */ },
+};
+
+static struct ccw_driver raw3270_ccw_driver = {
+ .name = "3270",
+ .owner = THIS_MODULE,
+ .ids = raw3270_id,
+ .probe = &raw3270_probe,
+ .remove = &raw3270_remove,
+ .set_online = &raw3270_set_online,
+ .set_offline = &raw3270_set_offline,
+};
+
+static int
+raw3270_init(void)
+{
+ struct raw3270 *rp;
+ int rc;
+
+ if (raw3270_registered)
+ return 0;
+ raw3270_registered = 1;
+ rc = ccw_driver_register(&raw3270_ccw_driver);
+ if (rc == 0) {
+ /* Create attributes for early (= console) device. */
+ down(&raw3270_sem);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ get_device(&rp->cdev->dev);
+ raw3270_create_attributes(rp);
+ }
+ up(&raw3270_sem);
+ }
+ return rc;
+}
+
+static void
+raw3270_exit(void)
+{
+ ccw_driver_unregister(&raw3270_ccw_driver);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(raw3270_init);
+module_exit(raw3270_exit);
+
+EXPORT_SYMBOL(raw3270_request_alloc);
+EXPORT_SYMBOL(raw3270_request_free);
+EXPORT_SYMBOL(raw3270_request_reset);
+EXPORT_SYMBOL(raw3270_request_set_cmd);
+EXPORT_SYMBOL(raw3270_request_add_data);
+EXPORT_SYMBOL(raw3270_request_set_data);
+EXPORT_SYMBOL(raw3270_request_set_idal);
+EXPORT_SYMBOL(raw3270_buffer_address);
+EXPORT_SYMBOL(raw3270_add_view);
+EXPORT_SYMBOL(raw3270_del_view);
+EXPORT_SYMBOL(raw3270_find_view);
+EXPORT_SYMBOL(raw3270_activate_view);
+EXPORT_SYMBOL(raw3270_deactivate_view);
+EXPORT_SYMBOL(raw3270_start);
+EXPORT_SYMBOL(raw3270_start_irq);
+EXPORT_SYMBOL(raw3270_register_notifier);
+EXPORT_SYMBOL(raw3270_unregister_notifier);
+EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
new file mode 100644
index 000000000000..ed5d4eb9f623
--- /dev/null
+++ b/drivers/s390/char/raw3270.h
@@ -0,0 +1,274 @@
+/*
+ * drivers/s390/char/raw3270.h
+ * IBM/3270 Driver
+ *
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <asm/idals.h>
+#include <asm/ioctl.h>
+
+/* ioctls for fullscreen 3270 */
+#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
+#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
+#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
+#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
+#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
+#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
+
+/* Local Channel Commands */
+#define TC_WRITE 0x01 /* Write */
+#define TC_EWRITE 0x05 /* Erase write */
+#define TC_READMOD 0x06 /* Read modified */
+#define TC_EWRITEA 0x0d /* Erase write alternate */
+#define TC_WRITESF 0x11 /* Write structured field */
+
+/* Buffer Control Orders */
+#define TO_SF 0x1d /* Start field */
+#define TO_SBA 0x11 /* Set buffer address */
+#define TO_IC 0x13 /* Insert cursor */
+#define TO_PT 0x05 /* Program tab */
+#define TO_RA 0x3c /* Repeat to address */
+#define TO_SFE 0x29 /* Start field extended */
+#define TO_EUA 0x12 /* Erase unprotected to address */
+#define TO_MF 0x2c /* Modify field */
+#define TO_SA 0x28 /* Set attribute */
+
+/* Field Attribute Bytes */
+#define TF_INPUT 0x40 /* Visible input */
+#define TF_INPUTN 0x4c /* Invisible input */
+#define TF_INMDT 0xc1 /* Visible, Set-MDT */
+#define TF_LOG 0x60
+
+/* Character Attribute Bytes */
+#define TAT_RESET 0x00
+#define TAT_FIELD 0xc0
+#define TAT_EXTHI 0x41
+#define TAT_COLOR 0x42
+#define TAT_CHARS 0x43
+#define TAT_TRANS 0x46
+
+/* Extended-Highlighting Bytes */
+#define TAX_RESET 0x00
+#define TAX_BLINK 0xf1
+#define TAX_REVER 0xf2
+#define TAX_UNDER 0xf4
+
+/* Reset value */
+#define TAR_RESET 0x00
+
+/* Color values */
+#define TAC_RESET 0x00
+#define TAC_BLUE 0xf1
+#define TAC_RED 0xf2
+#define TAC_PINK 0xf3
+#define TAC_GREEN 0xf4
+#define TAC_TURQ 0xf5
+#define TAC_YELLOW 0xf6
+#define TAC_WHITE 0xf7
+#define TAC_DEFAULT 0x00
+
+/* Write Control Characters */
+#define TW_NONE 0x40 /* No particular action */
+#define TW_KR 0xc2 /* Keyboard restore */
+#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
+
+#define RAW3270_MAXDEVS 256
+
+/* For TUBGETMOD and TUBSETMOD. Should include. */
+struct raw3270_iocb {
+ short model;
+ short line_cnt;
+ short col_cnt;
+ short pf_cnt;
+ short re_cnt;
+ short map;
+};
+
+struct raw3270;
+struct raw3270_view;
+
+/* 3270 CCW request */
+struct raw3270_request {
+ struct list_head list; /* list head for request queueing. */
+ struct raw3270_view *view; /* view of this request */
+ struct ccw1 ccw; /* single ccw. */
+ void *buffer; /* output buffer. */
+ size_t size; /* size of output buffer. */
+ int rescnt; /* residual count from devstat. */
+ int rc; /* return code for this request. */
+
+ /* Callback for delivering final status. */
+ void (*callback)(struct raw3270_request *, void *);
+ void *callback_data;
+};
+
+struct raw3270_request *raw3270_request_alloc(size_t size);
+struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
+void raw3270_request_free(struct raw3270_request *);
+void raw3270_request_reset(struct raw3270_request *);
+void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
+int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
+
+static inline int
+raw3270_request_final(struct raw3270_request *rq)
+{
+ return list_empty(&rq->list);
+}
+
+void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
+
+/* Return value of *intv (see raw3270_fn below) can be one of the following: */
+#define RAW3270_IO_DONE 0 /* request finished */
+#define RAW3270_IO_BUSY 1 /* request still active */
+#define RAW3270_IO_RETRY 2 /* retry current request */
+#define RAW3270_IO_STOP 3 /* kill current request */
+
+/*
+ * Functions of a 3270 view.
+ */
+struct raw3270_fn {
+ int (*activate)(struct raw3270_view *);
+ void (*deactivate)(struct raw3270_view *);
+ int (*intv)(struct raw3270_view *,
+ struct raw3270_request *, struct irb *);
+ void (*release)(struct raw3270_view *);
+ void (*free)(struct raw3270_view *);
+};
+
+/*
+ * View structure chaining. The raw3270_view structure is meant to
+ * be embedded at the start of the real view data structure, e.g.:
+ * struct example {
+ * struct raw3270_view view;
+ * ...
+ * };
+ */
+struct raw3270_view {
+ struct list_head list;
+ spinlock_t lock;
+ atomic_t ref_count;
+ struct raw3270 *dev;
+ struct raw3270_fn *fn;
+ unsigned int model;
+ unsigned int rows, cols; /* # of rows & colums of the view */
+ unsigned char *ascebc; /* ascii -> ebcdic table */
+};
+
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_activate_view(struct raw3270_view *);
+void raw3270_del_view(struct raw3270_view *);
+void raw3270_deactivate_view(struct raw3270_view *);
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
+int raw3270_start(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
+
+/* Reference count inliner for view structures. */
+static inline void
+raw3270_get_view(struct raw3270_view *view)
+{
+ atomic_inc(&view->ref_count);
+}
+
+extern wait_queue_head_t raw3270_wait_queue;
+
+static inline void
+raw3270_put_view(struct raw3270_view *view)
+{
+ if (atomic_dec_return(&view->ref_count) == 0)
+ wake_up(&raw3270_wait_queue);
+}
+
+struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
+void raw3270_wait_cons_dev(struct raw3270 *);
+
+/* Notifier for device addition/removal */
+int raw3270_register_notifier(void (*notifier)(int, int));
+void raw3270_unregister_notifier(void (*notifier)(int, int));
+
+/*
+ * Little memory allocator for string objects.
+ */
+struct string
+{
+ struct list_head list;
+ struct list_head update;
+ unsigned long size;
+ unsigned long len;
+ char string[0];
+} __attribute__ ((aligned(8)));
+
+static inline struct string *
+alloc_string(struct list_head *free_list, unsigned long len)
+{
+ struct string *cs, *tmp;
+ unsigned long size;
+
+ size = (len + 7L) & -8L;
+ list_for_each_entry(cs, free_list, list) {
+ if (cs->size < size)
+ continue;
+ if (cs->size > size + sizeof(struct string)) {
+ char *endaddr = (char *) (cs + 1) + cs->size;
+ tmp = (struct string *) (endaddr - size) - 1;
+ tmp->size = size;
+ cs->size -= size + sizeof(struct string);
+ cs = tmp;
+ } else
+ list_del(&cs->list);
+ cs->len = len;
+ INIT_LIST_HEAD(&cs->list);
+ INIT_LIST_HEAD(&cs->update);
+ return cs;
+ }
+ return 0;
+}
+
+static inline unsigned long
+free_string(struct list_head *free_list, struct string *cs)
+{
+ struct string *tmp;
+ struct list_head *p, *left;
+
+ /* Find out the left neighbour in free memory list. */
+ left = free_list;
+ list_for_each(p, free_list) {
+ if (list_entry(p, struct string, list) > cs)
+ break;
+ left = p;
+ }
+ /* Try to merge with right neighbour = next element from left. */
+ if (left->next != free_list) {
+ tmp = list_entry(left->next, struct string, list);
+ if ((char *) (cs + 1) + cs->size == (char *) tmp) {
+ list_del(&tmp->list);
+ cs->size += tmp->size + sizeof(struct string);
+ }
+ }
+ /* Try to merge with left neighbour. */
+ if (left != free_list) {
+ tmp = list_entry(left, struct string, list);
+ if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
+ tmp->size += cs->size + sizeof(struct string);
+ return tmp->size;
+ }
+ }
+ __list_add(&cs->list, left, left->next);
+ return cs->size;
+}
+
+static inline void
+add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
+{
+ struct string *cs;
+
+ cs = (struct string *) mem;
+ cs->size = size - sizeof(struct string);
+ free_string(free_list, cs);
+}
+
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
new file mode 100644
index 000000000000..ceb0e474fde4
--- /dev/null
+++ b/drivers/s390/char/sclp.c
@@ -0,0 +1,915 @@
+/*
+ * drivers/s390/char/sclp.c
+ * core function to access sclp interface
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <asm/types.h>
+#include <asm/s390_ext.h>
+
+#include "sclp.h"
+
+#define SCLP_HEADER "sclp: "
+
+/* Structure for register_early_external_interrupt. */
+static ext_int_info_t ext_int_info_hwc;
+
+/* Lock to protect internal data consistency. */
+static DEFINE_SPINLOCK(sclp_lock);
+
+/* Mask of events that we can receive from the sclp interface. */
+static sccb_mask_t sclp_receive_mask;
+
+/* Mask of events that we can send to the sclp interface. */
+static sccb_mask_t sclp_send_mask;
+
+/* List of registered event listeners and senders. */
+static struct list_head sclp_reg_list;
+
+/* List of queued requests. */
+static struct list_head sclp_req_queue;
+
+/* Data for read and and init requests. */
+static struct sclp_req sclp_read_req;
+static struct sclp_req sclp_init_req;
+static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+/* Timer for request retries. */
+static struct timer_list sclp_request_timer;
+
+/* Internal state: is the driver initialized? */
+static volatile enum sclp_init_state_t {
+ sclp_init_state_uninitialized,
+ sclp_init_state_initializing,
+ sclp_init_state_initialized
+} sclp_init_state = sclp_init_state_uninitialized;
+
+/* Internal state: is a request active at the sclp? */
+static volatile enum sclp_running_state_t {
+ sclp_running_state_idle,
+ sclp_running_state_running
+} sclp_running_state = sclp_running_state_idle;
+
+/* Internal state: is a read request pending? */
+static volatile enum sclp_reading_state_t {
+ sclp_reading_state_idle,
+ sclp_reading_state_reading
+} sclp_reading_state = sclp_reading_state_idle;
+
+/* Internal state: is the driver currently serving requests? */
+static volatile enum sclp_activation_state_t {
+ sclp_activation_state_active,
+ sclp_activation_state_deactivating,
+ sclp_activation_state_inactive,
+ sclp_activation_state_activating
+} sclp_activation_state = sclp_activation_state_active;
+
+/* Internal state: is an init mask request pending? */
+static volatile enum sclp_mask_state_t {
+ sclp_mask_state_idle,
+ sclp_mask_state_initializing
+} sclp_mask_state = sclp_mask_state_idle;
+
+/* Maximum retry counts */
+#define SCLP_INIT_RETRY 3
+#define SCLP_MASK_RETRY 3
+#define SCLP_REQUEST_RETRY 3
+
+/* Timeout intervals in seconds.*/
+#define SCLP_BUSY_INTERVAL 2
+#define SCLP_RETRY_INTERVAL 5
+
+static void sclp_process_queue(void);
+static int sclp_init_mask(int calculate);
+static int sclp_init(void);
+
+/* Perform service call. Return 0 on success, non-zero otherwise. */
+static int
+service_call(sclp_cmdw_t command, void *sccb)
+{
+ int cc;
+
+ __asm__ __volatile__(
+ " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
+ " ipm %0\n"
+ " srl %0,28"
+ : "=&d" (cc)
+ : "d" (command), "a" (__pa(sccb))
+ : "cc", "memory" );
+ if (cc == 3)
+ return -EIO;
+ if (cc == 2)
+ return -EBUSY;
+ return 0;
+}
+
+/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+ * force restart of running request. */
+static void
+sclp_request_timeout(unsigned long data)
+{
+ unsigned long flags;
+
+ if (data) {
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_running_state = sclp_running_state_idle;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ }
+ sclp_process_queue();
+}
+
+/* Set up request retry timer. Called while sclp_lock is locked. */
+static inline void
+__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
+ unsigned long data)
+{
+ del_timer(&sclp_request_timer);
+ sclp_request_timer.function = function;
+ sclp_request_timer.data = data;
+ sclp_request_timer.expires = jiffies + time;
+ add_timer(&sclp_request_timer);
+}
+
+/* Try to start a request. Return zero if the request was successfully
+ * started or if it will be started at a later time. Return non-zero otherwise.
+ * Called while sclp_lock is locked. */
+static int
+__sclp_start_request(struct sclp_req *req)
+{
+ int rc;
+
+ if (sclp_running_state != sclp_running_state_idle)
+ return 0;
+ del_timer(&sclp_request_timer);
+ if (req->start_count <= SCLP_REQUEST_RETRY) {
+ rc = service_call(req->command, req->sccb);
+ req->start_count++;
+ } else
+ rc = -EIO;
+ if (rc == 0) {
+ /* Sucessfully started request */
+ req->status = SCLP_REQ_RUNNING;
+ sclp_running_state = sclp_running_state_running;
+ __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+ sclp_request_timeout, 1);
+ return 0;
+ } else if (rc == -EBUSY) {
+ /* Try again later */
+ __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+ sclp_request_timeout, 0);
+ return 0;
+ }
+ /* Request failed */
+ req->status = SCLP_REQ_FAILED;
+ return rc;
+}
+
+/* Try to start queued requests. */
+static void
+sclp_process_queue(void)
+{
+ struct sclp_req *req;
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (sclp_running_state != sclp_running_state_idle) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return;
+ }
+ del_timer(&sclp_request_timer);
+ while (!list_empty(&sclp_req_queue)) {
+ req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+ rc = __sclp_start_request(req);
+ if (rc == 0)
+ break;
+ /* Request failed. */
+ list_del(&req->list);
+ if (req->callback) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ req->callback(req, req->callback_data);
+ spin_lock_irqsave(&sclp_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Queue a new request. Return zero on success, non-zero otherwise. */
+int
+sclp_add_request(struct sclp_req *req)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ if ((sclp_init_state != sclp_init_state_initialized ||
+ sclp_activation_state != sclp_activation_state_active) &&
+ req != &sclp_init_req) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EIO;
+ }
+ req->status = SCLP_REQ_QUEUED;
+ req->start_count = 0;
+ list_add_tail(&req->list, &sclp_req_queue);
+ rc = 0;
+ /* Start if request is first in list */
+ if (req->list.prev == &sclp_req_queue) {
+ rc = __sclp_start_request(req);
+ if (rc)
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+EXPORT_SYMBOL(sclp_add_request);
+
+/* Dispatch events found in request buffer to registered listeners. Return 0
+ * if all events were dispatched, non-zero otherwise. */
+static int
+sclp_dispatch_evbufs(struct sccb_header *sccb)
+{
+ unsigned long flags;
+ struct evbuf_header *evbuf;
+ struct list_head *l;
+ struct sclp_register *reg;
+ int offset;
+ int rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ rc = 0;
+ for (offset = sizeof(struct sccb_header); offset < sccb->length;
+ offset += evbuf->length) {
+ /* Search for event handler */
+ evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
+ reg = NULL;
+ list_for_each(l, &sclp_reg_list) {
+ reg = list_entry(l, struct sclp_register, list);
+ if (reg->receive_mask & (1 << (32 - evbuf->type)))
+ break;
+ else
+ reg = NULL;
+ }
+ if (reg && reg->receiver_fn) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ reg->receiver_fn(evbuf);
+ spin_lock_irqsave(&sclp_lock, flags);
+ } else if (reg == NULL)
+ rc = -ENOSYS;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+/* Read event data request callback. */
+static void
+sclp_read_cb(struct sclp_req *req, void *data)
+{
+ unsigned long flags;
+ struct sccb_header *sccb;
+
+ sccb = (struct sccb_header *) req->sccb;
+ if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
+ sccb->response_code == 0x220))
+ sclp_dispatch_evbufs(sccb);
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_reading_state = sclp_reading_state_idle;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Prepare read event data request. Called while sclp_lock is locked. */
+static inline void
+__sclp_make_read_req(void)
+{
+ struct sccb_header *sccb;
+
+ sccb = (struct sccb_header *) sclp_read_sccb;
+ clear_page(sccb);
+ memset(&sclp_read_req, 0, sizeof(struct sclp_req));
+ sclp_read_req.command = SCLP_CMDW_READDATA;
+ sclp_read_req.status = SCLP_REQ_QUEUED;
+ sclp_read_req.start_count = 0;
+ sclp_read_req.callback = sclp_read_cb;
+ sclp_read_req.sccb = sccb;
+ sccb->length = PAGE_SIZE;
+ sccb->function_code = 0;
+ sccb->control_mask[2] = 0x80;
+}
+
+/* Search request list for request with matching sccb. Return request if found,
+ * NULL otherwise. Called while sclp_lock is locked. */
+static inline struct sclp_req *
+__sclp_find_req(u32 sccb)
+{
+ struct list_head *l;
+ struct sclp_req *req;
+
+ list_for_each(l, &sclp_req_queue) {
+ req = list_entry(l, struct sclp_req, list);
+ if (sccb == (u32) (addr_t) req->sccb)
+ return req;
+ }
+ return NULL;
+}
+
+/* Handler for external interruption. Perform request post-processing.
+ * Prepare read event data request if necessary. Start processing of next
+ * request on queue. */
+static void
+sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
+{
+ struct sclp_req *req;
+ u32 finished_sccb;
+ u32 evbuf_pending;
+
+ spin_lock(&sclp_lock);
+ finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
+ evbuf_pending = S390_lowcore.ext_params & 0x3;
+ if (finished_sccb) {
+ req = __sclp_find_req(finished_sccb);
+ if (req) {
+ /* Request post-processing */
+ list_del(&req->list);
+ req->status = SCLP_REQ_DONE;
+ if (req->callback) {
+ spin_unlock(&sclp_lock);
+ req->callback(req, req->callback_data);
+ spin_lock(&sclp_lock);
+ }
+ }
+ sclp_running_state = sclp_running_state_idle;
+ }
+ if (evbuf_pending && sclp_receive_mask != 0 &&
+ sclp_reading_state == sclp_reading_state_idle &&
+ sclp_activation_state == sclp_activation_state_active ) {
+ sclp_reading_state = sclp_reading_state_reading;
+ __sclp_make_read_req();
+ /* Add request to head of queue */
+ list_add(&sclp_read_req.list, &sclp_req_queue);
+ }
+ spin_unlock(&sclp_lock);
+ sclp_process_queue();
+}
+
+/* Return current Time-Of-Day clock. */
+static inline u64
+sclp_get_clock(void)
+{
+ u64 result;
+
+ asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
+ return result;
+}
+
+/* Convert interval in jiffies to TOD ticks. */
+static inline u64
+sclp_tod_from_jiffies(unsigned long jiffies)
+{
+ return (u64) (jiffies / HZ) << 32;
+}
+
+/* Wait until a currently running request finished. Note: while this function
+ * is running, no timers are served on the calling CPU. */
+void
+sclp_sync_wait(void)
+{
+ unsigned long psw_mask;
+ unsigned long cr0, cr0_sync;
+ u64 timeout;
+
+ /* We'll be disabling timer interrupts, so we need a custom timeout
+ * mechanism */
+ timeout = 0;
+ if (timer_pending(&sclp_request_timer)) {
+ /* Get timeout TOD value */
+ timeout = sclp_get_clock() +
+ sclp_tod_from_jiffies(sclp_request_timer.expires -
+ jiffies);
+ }
+ /* Prevent bottom half from executing once we force interrupts open */
+ local_bh_disable();
+ /* Enable service-signal interruption, disable timer interrupts */
+ __ctl_store(cr0, 0, 0);
+ cr0_sync = cr0;
+ cr0_sync |= 0x00000200;
+ cr0_sync &= 0xFFFFF3AC;
+ __ctl_load(cr0_sync, 0, 0);
+ asm volatile ("STOSM 0(%1),0x01"
+ : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
+ /* Loop until driver state indicates finished request */
+ while (sclp_running_state != sclp_running_state_idle) {
+ /* Check for expired request timer */
+ if (timer_pending(&sclp_request_timer) &&
+ sclp_get_clock() > timeout &&
+ del_timer(&sclp_request_timer))
+ sclp_request_timer.function(sclp_request_timer.data);
+ barrier();
+ cpu_relax();
+ }
+ /* Restore interrupt settings */
+ asm volatile ("SSM 0(%0)"
+ : : "a" (&psw_mask) : "memory");
+ __ctl_load(cr0, 0, 0);
+ __local_bh_enable();
+}
+
+EXPORT_SYMBOL(sclp_sync_wait);
+
+/* Dispatch changes in send and receive mask to registered listeners. */
+static inline void
+sclp_dispatch_state_change(void)
+{
+ struct list_head *l;
+ struct sclp_register *reg;
+ unsigned long flags;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+
+ do {
+ spin_lock_irqsave(&sclp_lock, flags);
+ reg = NULL;
+ list_for_each(l, &sclp_reg_list) {
+ reg = list_entry(l, struct sclp_register, list);
+ receive_mask = reg->receive_mask & sclp_receive_mask;
+ send_mask = reg->send_mask & sclp_send_mask;
+ if (reg->sclp_receive_mask != receive_mask ||
+ reg->sclp_send_mask != send_mask) {
+ reg->sclp_receive_mask = receive_mask;
+ reg->sclp_send_mask = send_mask;
+ break;
+ } else
+ reg = NULL;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ if (reg && reg->state_change_fn)
+ reg->state_change_fn(reg);
+ } while (reg);
+}
+
+struct sclp_statechangebuf {
+ struct evbuf_header header;
+ u8 validity_sclp_active_facility_mask : 1;
+ u8 validity_sclp_receive_mask : 1;
+ u8 validity_sclp_send_mask : 1;
+ u8 validity_read_data_function_mask : 1;
+ u16 _zeros : 12;
+ u16 mask_length;
+ u64 sclp_active_facility_mask;
+ sccb_mask_t sclp_receive_mask;
+ sccb_mask_t sclp_send_mask;
+ u32 read_data_function_mask;
+} __attribute__((packed));
+
+
+/* State change event callback. Inform listeners of changes. */
+static void
+sclp_state_change_cb(struct evbuf_header *evbuf)
+{
+ unsigned long flags;
+ struct sclp_statechangebuf *scbuf;
+
+ scbuf = (struct sclp_statechangebuf *) evbuf;
+ if (scbuf->mask_length != sizeof(sccb_mask_t))
+ return;
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (scbuf->validity_sclp_receive_mask)
+ sclp_receive_mask = scbuf->sclp_receive_mask;
+ if (scbuf->validity_sclp_send_mask)
+ sclp_send_mask = scbuf->sclp_send_mask;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_dispatch_state_change();
+}
+
+static struct sclp_register sclp_state_change_event = {
+ .receive_mask = EvTyp_StateChange_Mask,
+ .receiver_fn = sclp_state_change_cb
+};
+
+/* Calculate receive and send mask of currently registered listeners.
+ * Called while sclp_lock is locked. */
+static inline void
+__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
+{
+ struct list_head *l;
+ struct sclp_register *t;
+
+ *receive_mask = 0;
+ *send_mask = 0;
+ list_for_each(l, &sclp_reg_list) {
+ t = list_entry(l, struct sclp_register, list);
+ *receive_mask |= t->receive_mask;
+ *send_mask |= t->send_mask;
+ }
+}
+
+/* Register event listener. Return 0 on success, non-zero otherwise. */
+int
+sclp_register(struct sclp_register *reg)
+{
+ unsigned long flags;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ int rc;
+
+ rc = sclp_init();
+ if (rc)
+ return rc;
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Check event mask for collisions */
+ __sclp_get_mask(&receive_mask, &send_mask);
+ if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EBUSY;
+ }
+ /* Trigger initial state change callback */
+ reg->sclp_receive_mask = 0;
+ reg->sclp_send_mask = 0;
+ list_add(&reg->list, &sclp_reg_list);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ rc = sclp_init_mask(1);
+ if (rc) {
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_del(&reg->list);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ }
+ return rc;
+}
+
+EXPORT_SYMBOL(sclp_register);
+
+/* Unregister event listener. */
+void
+sclp_unregister(struct sclp_register *reg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_del(&reg->list);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_init_mask(1);
+}
+
+EXPORT_SYMBOL(sclp_unregister);
+
+/* Remove event buffers which are marked processed. Return the number of
+ * remaining event buffers. */
+int
+sclp_remove_processed(struct sccb_header *sccb)
+{
+ struct evbuf_header *evbuf;
+ int unprocessed;
+ u16 remaining;
+
+ evbuf = (struct evbuf_header *) (sccb + 1);
+ unprocessed = 0;
+ remaining = sccb->length - sizeof(struct sccb_header);
+ while (remaining > 0) {
+ remaining -= evbuf->length;
+ if (evbuf->flags & 0x80) {
+ sccb->length -= evbuf->length;
+ memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
+ remaining);
+ } else {
+ unprocessed++;
+ evbuf = (struct evbuf_header *)
+ ((addr_t) evbuf + evbuf->length);
+ }
+ }
+ return unprocessed;
+}
+
+EXPORT_SYMBOL(sclp_remove_processed);
+
+struct init_sccb {
+ struct sccb_header header;
+ u16 _reserved;
+ u16 mask_length;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ sccb_mask_t sclp_send_mask;
+ sccb_mask_t sclp_receive_mask;
+} __attribute__((packed));
+
+/* Prepare init mask request. Called while sclp_lock is locked. */
+static inline void
+__sclp_make_init_req(u32 receive_mask, u32 send_mask)
+{
+ struct init_sccb *sccb;
+
+ sccb = (struct init_sccb *) sclp_init_sccb;
+ clear_page(sccb);
+ memset(&sclp_init_req, 0, sizeof(struct sclp_req));
+ sclp_init_req.command = SCLP_CMDW_WRITEMASK;
+ sclp_init_req.status = SCLP_REQ_FILLED;
+ sclp_init_req.start_count = 0;
+ sclp_init_req.callback = NULL;
+ sclp_init_req.callback_data = NULL;
+ sclp_init_req.sccb = sccb;
+ sccb->header.length = sizeof(struct init_sccb);
+ sccb->mask_length = sizeof(sccb_mask_t);
+ sccb->receive_mask = receive_mask;
+ sccb->send_mask = send_mask;
+ sccb->sclp_receive_mask = 0;
+ sccb->sclp_send_mask = 0;
+}
+
+/* Start init mask request. If calculate is non-zero, calculate the mask as
+ * requested by registered listeners. Use zero mask otherwise. Return 0 on
+ * success, non-zero otherwise. */
+static int
+sclp_init_mask(int calculate)
+{
+ unsigned long flags;
+ struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ int retry;
+ int rc;
+ unsigned long wait;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Check if interface is in appropriate state */
+ if (sclp_mask_state != sclp_mask_state_idle) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EBUSY;
+ }
+ if (sclp_activation_state == sclp_activation_state_inactive) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EINVAL;
+ }
+ sclp_mask_state = sclp_mask_state_initializing;
+ /* Determine mask */
+ if (calculate)
+ __sclp_get_mask(&receive_mask, &send_mask);
+ else {
+ receive_mask = 0;
+ send_mask = 0;
+ }
+ rc = -EIO;
+ for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
+ /* Prepare request */
+ __sclp_make_init_req(receive_mask, send_mask);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ if (sclp_add_request(&sclp_init_req)) {
+ /* Try again later */
+ wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
+ while (time_before(jiffies, wait))
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_lock, flags);
+ continue;
+ }
+ while (sclp_init_req.status != SCLP_REQ_DONE &&
+ sclp_init_req.status != SCLP_REQ_FAILED)
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (sclp_init_req.status == SCLP_REQ_DONE &&
+ sccb->header.response_code == 0x20) {
+ /* Successful request */
+ if (calculate) {
+ sclp_receive_mask = sccb->sclp_receive_mask;
+ sclp_send_mask = sccb->sclp_send_mask;
+ } else {
+ sclp_receive_mask = 0;
+ sclp_send_mask = 0;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_dispatch_state_change();
+ spin_lock_irqsave(&sclp_lock, flags);
+ rc = 0;
+ break;
+ }
+ }
+ sclp_mask_state = sclp_mask_state_idle;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+/* Deactivate SCLP interface. On success, new requests will be rejected,
+ * events will no longer be dispatched. Return 0 on success, non-zero
+ * otherwise. */
+int
+sclp_deactivate(void)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Deactivate can only be called when active */
+ if (sclp_activation_state != sclp_activation_state_active) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EINVAL;
+ }
+ sclp_activation_state = sclp_activation_state_deactivating;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ rc = sclp_init_mask(0);
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (rc == 0)
+ sclp_activation_state = sclp_activation_state_inactive;
+ else
+ sclp_activation_state = sclp_activation_state_active;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+EXPORT_SYMBOL(sclp_deactivate);
+
+/* Reactivate SCLP interface after sclp_deactivate. On success, new
+ * requests will be accepted, events will be dispatched again. Return 0 on
+ * success, non-zero otherwise. */
+int
+sclp_reactivate(void)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Reactivate can only be called when inactive */
+ if (sclp_activation_state != sclp_activation_state_inactive) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return -EINVAL;
+ }
+ sclp_activation_state = sclp_activation_state_activating;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ rc = sclp_init_mask(1);
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (rc == 0)
+ sclp_activation_state = sclp_activation_state_active;
+ else
+ sclp_activation_state = sclp_activation_state_inactive;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+EXPORT_SYMBOL(sclp_reactivate);
+
+/* Handler for external interruption used during initialization. Modify
+ * request state to done. */
+static void
+sclp_check_handler(struct pt_regs *regs, __u16 code)
+{
+ u32 finished_sccb;
+
+ finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
+ /* Is this the interrupt we are waiting for? */
+ if (finished_sccb == 0)
+ return;
+ if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
+ printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
+ "for buffer at 0x%x\n", finished_sccb);
+ return;
+ }
+ spin_lock(&sclp_lock);
+ if (sclp_running_state == sclp_running_state_running) {
+ sclp_init_req.status = SCLP_REQ_DONE;
+ sclp_running_state = sclp_running_state_idle;
+ }
+ spin_unlock(&sclp_lock);
+}
+
+/* Initial init mask request timed out. Modify request state to failed. */
+static void
+sclp_check_timeout(unsigned long data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (sclp_running_state == sclp_running_state_running) {
+ sclp_init_req.status = SCLP_REQ_FAILED;
+ sclp_running_state = sclp_running_state_idle;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Perform a check of the SCLP interface. Return zero if the interface is
+ * available and there are no pending requests from a previous instance.
+ * Return non-zero otherwise. */
+static int
+sclp_check_interface(void)
+{
+ struct init_sccb *sccb;
+ unsigned long flags;
+ int retry;
+ int rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Prepare init mask command */
+ rc = register_early_external_interrupt(0x2401, sclp_check_handler,
+ &ext_int_info_hwc);
+ if (rc) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+ }
+ for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
+ __sclp_make_init_req(0, 0);
+ sccb = (struct init_sccb *) sclp_init_req.sccb;
+ rc = service_call(sclp_init_req.command, sccb);
+ if (rc == -EIO)
+ break;
+ sclp_init_req.status = SCLP_REQ_RUNNING;
+ sclp_running_state = sclp_running_state_running;
+ __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+ sclp_check_timeout, 0);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ /* Enable service-signal interruption - needs to happen
+ * with IRQs enabled. */
+ ctl_set_bit(0, 9);
+ /* Wait for signal from interrupt or timeout */
+ sclp_sync_wait();
+ /* Disable service-signal interruption - needs to happen
+ * with IRQs enabled. */
+ ctl_clear_bit(0,9);
+ spin_lock_irqsave(&sclp_lock, flags);
+ del_timer(&sclp_request_timer);
+ if (sclp_init_req.status == SCLP_REQ_DONE &&
+ sccb->header.response_code == 0x20) {
+ rc = 0;
+ break;
+ } else
+ rc = -EBUSY;
+ }
+ unregister_early_external_interrupt(0x2401, sclp_check_handler,
+ &ext_int_info_hwc);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
+ * events from interfering with rebooted system. */
+static int
+sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ sclp_deactivate();
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sclp_reboot_notifier = {
+ .notifier_call = sclp_reboot_event
+};
+
+/* Initialize SCLP driver. Return zero if driver is operational, non-zero
+ * otherwise. */
+static int
+sclp_init(void)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!MACHINE_HAS_SCLP)
+ return -ENODEV;
+ spin_lock_irqsave(&sclp_lock, flags);
+ /* Check for previous or running initialization */
+ if (sclp_init_state != sclp_init_state_uninitialized) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return 0;
+ }
+ sclp_init_state = sclp_init_state_initializing;
+ /* Set up variables */
+ INIT_LIST_HEAD(&sclp_req_queue);
+ INIT_LIST_HEAD(&sclp_reg_list);
+ list_add(&sclp_state_change_event.list, &sclp_reg_list);
+ init_timer(&sclp_request_timer);
+ /* Check interface */
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ rc = sclp_check_interface();
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (rc) {
+ sclp_init_state = sclp_init_state_uninitialized;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+ }
+ /* Register reboot handler */
+ rc = register_reboot_notifier(&sclp_reboot_notifier);
+ if (rc) {
+ sclp_init_state = sclp_init_state_uninitialized;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+ }
+ /* Register interrupt handler */
+ rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
+ &ext_int_info_hwc);
+ if (rc) {
+ unregister_reboot_notifier(&sclp_reboot_notifier);
+ sclp_init_state = sclp_init_state_uninitialized;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+ }
+ sclp_init_state = sclp_init_state_initialized;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ /* Enable service-signal external interruption - needs to happen with
+ * IRQs enabled. */
+ ctl_set_bit(0, 9);
+ sclp_init_mask(1);
+ return 0;
+}
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
new file mode 100644
index 000000000000..2c71d6ee7b5b
--- /dev/null
+++ b/drivers/s390/char/sclp.h
@@ -0,0 +1,159 @@
+/*
+ * drivers/s390/char/sclp.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_H__
+#define __SCLP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <asm/ebcdic.h>
+
+/* maximum number of pages concerning our own memory management */
+#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
+#define MAX_CONSOLE_PAGES 4
+
+#define EvTyp_OpCmd 0x01
+#define EvTyp_Msg 0x02
+#define EvTyp_StateChange 0x08
+#define EvTyp_PMsgCmd 0x09
+#define EvTyp_CntlProgOpCmd 0x20
+#define EvTyp_CntlProgIdent 0x0B
+#define EvTyp_SigQuiesce 0x1D
+#define EvTyp_VT220Msg 0x1A
+
+#define EvTyp_OpCmd_Mask 0x80000000
+#define EvTyp_Msg_Mask 0x40000000
+#define EvTyp_StateChange_Mask 0x01000000
+#define EvTyp_PMsgCmd_Mask 0x00800000
+#define EvTyp_CtlProgOpCmd_Mask 0x00000001
+#define EvTyp_CtlProgIdent_Mask 0x00200000
+#define EvTyp_SigQuiesce_Mask 0x00000008
+#define EvTyp_VT220Msg_Mask 0x00000040
+
+#define GnrlMsgFlgs_DOM 0x8000
+#define GnrlMsgFlgs_SndAlrm 0x4000
+#define GnrlMsgFlgs_HoldMsg 0x2000
+
+#define LnTpFlgs_CntlText 0x8000
+#define LnTpFlgs_LabelText 0x4000
+#define LnTpFlgs_DataText 0x2000
+#define LnTpFlgs_EndText 0x1000
+#define LnTpFlgs_PromptText 0x0800
+
+typedef unsigned int sclp_cmdw_t;
+
+#define SCLP_CMDW_READDATA 0x00770005
+#define SCLP_CMDW_WRITEDATA 0x00760005
+#define SCLP_CMDW_WRITEMASK 0x00780005
+
+#define GDS_ID_MDSMU 0x1310
+#define GDS_ID_MDSRouteInfo 0x1311
+#define GDS_ID_AgUnWrkCorr 0x1549
+#define GDS_ID_SNACondReport 0x1532
+#define GDS_ID_CPMSU 0x1212
+#define GDS_ID_RoutTargInstr 0x154D
+#define GDS_ID_OpReq 0x8070
+#define GDS_ID_TextCmd 0x1320
+
+#define GDS_KEY_SelfDefTextMsg 0x31
+
+typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
+
+struct sccb_header {
+ u16 length;
+ u8 function_code;
+ u8 control_mask[3];
+ u16 response_code;
+} __attribute__((packed));
+
+struct gds_subvector {
+ u8 length;
+ u8 key;
+} __attribute__((packed));
+
+struct gds_vector {
+ u16 length;
+ u16 gds_id;
+} __attribute__((packed));
+
+struct evbuf_header {
+ u16 length;
+ u8 type;
+ u8 flags;
+ u16 _reserved;
+} __attribute__((packed));
+
+struct sclp_req {
+ struct list_head list; /* list_head for request queueing. */
+ sclp_cmdw_t command; /* sclp command to execute */
+ void *sccb; /* pointer to the sccb to execute */
+ char status; /* status of this request */
+ int start_count; /* number of SVCs done for this req */
+ /* Callback that is called after reaching final status. */
+ void (*callback)(struct sclp_req *, void *data);
+ void *callback_data;
+};
+
+#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
+#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
+#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
+#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
+#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
+
+/* function pointers that a high level driver has to use for registration */
+/* of some routines it wants to be called from the low level driver */
+struct sclp_register {
+ struct list_head list;
+ /* event masks this user is registered for */
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ /* actually present events */
+ sccb_mask_t sclp_receive_mask;
+ sccb_mask_t sclp_send_mask;
+ /* called if event type availability changes */
+ void (*state_change_fn)(struct sclp_register *);
+ /* called for events in cp_receive_mask/sclp_receive_mask */
+ void (*receiver_fn)(struct evbuf_header *);
+};
+
+/* externals from sclp.c */
+int sclp_add_request(struct sclp_req *req);
+void sclp_sync_wait(void);
+int sclp_register(struct sclp_register *reg);
+void sclp_unregister(struct sclp_register *reg);
+int sclp_remove_processed(struct sccb_header *sccb);
+int sclp_deactivate(void);
+int sclp_reactivate(void);
+
+/* useful inlines */
+
+/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
+/* translate single character from ASCII to EBCDIC */
+static inline unsigned char
+sclp_ascebc(unsigned char ch)
+{
+ return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
+}
+
+/* translate string from EBCDIC to ASCII */
+static inline void
+sclp_ebcasc_str(unsigned char *str, int nr)
+{
+ (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
+}
+
+/* translate string from ASCII to EBCDIC */
+static inline void
+sclp_ascebc_str(unsigned char *str, int nr)
+{
+ (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
+}
+
+#endif /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
new file mode 100644
index 000000000000..10ef22f13541
--- /dev/null
+++ b/drivers/s390/char/sclp_con.c
@@ -0,0 +1,252 @@
+/*
+ * drivers/s390/char/sclp_con.c
+ * SCLP line mode console driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kmod.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+#define SCLP_CON_PRINT_HEADER "sclp console driver: "
+
+#define sclp_console_major 4 /* TTYAUX_MAJOR */
+#define sclp_console_minor 64
+#define sclp_console_name "ttyS"
+
+/* Lock to guard over changes to global variables */
+static spinlock_t sclp_con_lock;
+/* List of free pages that can be used for console output buffering */
+static struct list_head sclp_con_pages;
+/* List of full struct sclp_buffer structures ready for output */
+static struct list_head sclp_con_outqueue;
+/* Counter how many buffers are emitted (max 1) and how many */
+/* are on the output queue. */
+static int sclp_con_buffer_count;
+/* Pointer to current console buffer */
+static struct sclp_buffer *sclp_conbuf;
+/* Timer for delayed output of console messages */
+static struct timer_list sclp_con_timer;
+
+/* Output format for console messages */
+static unsigned short sclp_con_columns;
+static unsigned short sclp_con_width_htab;
+
+static void
+sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
+{
+ unsigned long flags;
+ void *page;
+
+ do {
+ page = sclp_unmake_buffer(buffer);
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ /* Remove buffer from outqueue */
+ list_del(&buffer->list);
+ sclp_con_buffer_count--;
+ list_add_tail((struct list_head *) page, &sclp_con_pages);
+ /* Check if there is a pending buffer on the out queue. */
+ buffer = NULL;
+ if (!list_empty(&sclp_con_outqueue))
+ buffer = list_entry(sclp_con_outqueue.next,
+ struct sclp_buffer, list);
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
+}
+
+static inline void
+sclp_conbuf_emit(void)
+{
+ struct sclp_buffer* buffer;
+ unsigned long flags;
+ int count;
+ int rc;
+
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ buffer = sclp_conbuf;
+ sclp_conbuf = NULL;
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ return;
+ }
+ list_add_tail(&buffer->list, &sclp_con_outqueue);
+ count = sclp_con_buffer_count++;
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ if (count)
+ return;
+ rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
+ if (rc)
+ sclp_conbuf_callback(buffer, rc);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer without further waiting on a final new line.
+ */
+static void
+sclp_console_timeout(unsigned long data)
+{
+ sclp_conbuf_emit();
+}
+
+/*
+ * Writes the given message to S390 system console
+ */
+static void
+sclp_console_write(struct console *console, const char *message,
+ unsigned int count)
+{
+ unsigned long flags;
+ void *page;
+ int written;
+
+ if (count == 0)
+ return;
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ /*
+ * process escape characters, write message into buffer,
+ * send buffer to SCLP
+ */
+ do {
+ /* make sure we have a console output buffer */
+ if (sclp_conbuf == NULL) {
+ while (list_empty(&sclp_con_pages)) {
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ }
+ page = sclp_con_pages.next;
+ list_del((struct list_head *) page);
+ sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
+ sclp_con_width_htab);
+ }
+ /* try to write the string to the current output buffer */
+ written = sclp_write(sclp_conbuf, (const unsigned char *)
+ message, count);
+ if (written == count)
+ break;
+ /*
+ * Not all characters could be written to the current
+ * output buffer. Emit the buffer, create a new buffer
+ * and then output the rest of the string.
+ */
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ sclp_conbuf_emit();
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ message += written;
+ count -= written;
+ } while (count > 0);
+ /* Setup timer to output current console buffer after 1/10 second */
+ if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
+ !timer_pending(&sclp_con_timer)) {
+ init_timer(&sclp_con_timer);
+ sclp_con_timer.function = sclp_console_timeout;
+ sclp_con_timer.data = 0UL;
+ sclp_con_timer.expires = jiffies + HZ/10;
+ add_timer(&sclp_con_timer);
+ }
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+static struct tty_driver *
+sclp_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return sclp_tty_driver;
+}
+
+/*
+ * This routine is called from panic when the kernel
+ * is going to give up. We have to make sure that all buffers
+ * will be flushed to the SCLP.
+ */
+static void
+sclp_console_unblank(void)
+{
+ unsigned long flags;
+
+ sclp_conbuf_emit();
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ if (timer_pending(&sclp_con_timer))
+ del_timer(&sclp_con_timer);
+ while (sclp_con_buffer_count > 0) {
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ }
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * used to register the SCLP console to the kernel and to
+ * give printk necessary information
+ */
+static struct console sclp_console =
+{
+ .name = sclp_console_name,
+ .write = sclp_console_write,
+ .device = sclp_console_device,
+ .unblank = sclp_console_unblank,
+ .flags = CON_PRINTBUFFER,
+ .index = 0 /* ttyS0 */
+};
+
+/*
+ * called by console_init() in drivers/char/tty_io.c at boot-time.
+ */
+static int __init
+sclp_console_init(void)
+{
+ void *page;
+ int i;
+ int rc;
+
+ if (!CONSOLE_IS_SCLP)
+ return 0;
+ rc = sclp_rw_init();
+ if (rc)
+ return rc;
+ /* Allocate pages for output buffering */
+ INIT_LIST_HEAD(&sclp_con_pages);
+ for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
+ if (page == NULL)
+ return -ENOMEM;
+ list_add_tail((struct list_head *) page, &sclp_con_pages);
+ }
+ INIT_LIST_HEAD(&sclp_con_outqueue);
+ spin_lock_init(&sclp_con_lock);
+ sclp_con_buffer_count = 0;
+ sclp_conbuf = NULL;
+ init_timer(&sclp_con_timer);
+
+ /* Set output format */
+ if (MACHINE_IS_VM)
+ /*
+ * save 4 characters for the CPU number
+ * written at start of each line by VM/CP
+ */
+ sclp_con_columns = 76;
+ else
+ sclp_con_columns = 80;
+ sclp_con_width_htab = 8;
+
+ /* enable printk-access to this driver */
+ register_console(&sclp_console);
+ return 0;
+}
+
+console_initcall(sclp_console_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
new file mode 100644
index 000000000000..5a6cef2dfa13
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi.c
@@ -0,0 +1,254 @@
+/*
+ * Author: Martin Peschke <mpeschke@de.ibm.com>
+ * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
+ *
+ * SCLP Control-Program Identification.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include <asm/semaphore.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define CPI_LENGTH_SYSTEM_TYPE 8
+#define CPI_LENGTH_SYSTEM_NAME 8
+#define CPI_LENGTH_SYSPLEX_NAME 8
+
+struct cpi_evbuf {
+ struct evbuf_header header;
+ u8 id_format;
+ u8 reserved0;
+ u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
+ u64 reserved1;
+ u8 system_name[CPI_LENGTH_SYSTEM_NAME];
+ u64 reserved2;
+ u64 system_level;
+ u64 reserved3;
+ u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
+ u8 reserved4[16];
+} __attribute__((packed));
+
+struct cpi_sccb {
+ struct sccb_header header;
+ struct cpi_evbuf cpi_evbuf;
+} __attribute__((packed));
+
+/* Event type structure for write message and write priority message */
+static struct sclp_register sclp_cpi_event =
+{
+ .send_mask = EvTyp_CtlProgIdent_Mask
+};
+
+MODULE_AUTHOR(
+ "Martin Peschke, IBM Deutschland Entwicklung GmbH "
+ "<mpeschke@de.ibm.com>");
+
+MODULE_DESCRIPTION(
+ "identify this operating system instance to the S/390 "
+ "or zSeries hardware");
+
+static char *system_name = NULL;
+module_param(system_name, charp, 0);
+MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
+
+static char *sysplex_name = NULL;
+#ifdef ALLOW_SYSPLEX_NAME
+module_param(sysplex_name, charp, 0);
+MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
+#endif
+
+/* use default value for this field (as well as for system level) */
+static char *system_type = "LINUX";
+
+static int
+cpi_check_parms(void)
+{
+ /* reject if no system type specified */
+ if (!system_type) {
+ printk("cpi: bug: no system type specified\n");
+ return -EINVAL;
+ }
+
+ /* reject if system type larger than 8 characters */
+ if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
+ printk("cpi: bug: system type has length of %li characters - "
+ "only %i characters supported\n",
+ strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
+ return -EINVAL;
+ }
+
+ /* reject if no system name specified */
+ if (!system_name) {
+ printk("cpi: no system name specified\n");
+ return -EINVAL;
+ }
+
+ /* reject if system name larger than 8 characters */
+ if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
+ printk("cpi: system name has length of %li characters - "
+ "only %i characters supported\n",
+ strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
+ return -EINVAL;
+ }
+
+ /* reject if specified sysplex name larger than 8 characters */
+ if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
+ printk("cpi: sysplex name has length of %li characters"
+ " - only %i characters supported\n",
+ strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cpi_callback(struct sclp_req *req, void *data)
+{
+ struct semaphore *sem;
+
+ sem = (struct semaphore *) data;
+ up(sem);
+}
+
+static struct sclp_req *
+cpi_prepare_req(void)
+{
+ struct sclp_req *req;
+ struct cpi_sccb *sccb;
+ struct cpi_evbuf *evb;
+
+ req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+ sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ if (sccb == NULL) {
+ kfree(req);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(sccb, 0, sizeof(struct cpi_sccb));
+
+ /* setup SCCB for Control-Program Identification */
+ sccb->header.length = sizeof(struct cpi_sccb);
+ sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
+ sccb->cpi_evbuf.header.type = 0x0B;
+ evb = &sccb->cpi_evbuf;
+
+ /* set system type */
+ memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
+ memcpy(evb->system_type, system_type, strlen(system_type));
+ sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
+ EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
+
+ /* set system name */
+ memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
+ memcpy(evb->system_name, system_name, strlen(system_name));
+ sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
+ EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
+
+ /* set sytem level */
+ evb->system_level = LINUX_VERSION_CODE;
+
+ /* set sysplex name */
+ if (sysplex_name) {
+ memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
+ memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
+ sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+ EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+ }
+
+ /* prepare request data structure presented to SCLP driver */
+ req->command = SCLP_CMDW_WRITEDATA;
+ req->sccb = sccb;
+ req->status = SCLP_REQ_FILLED;
+ req->callback = cpi_callback;
+ return req;
+}
+
+static void
+cpi_free_req(struct sclp_req *req)
+{
+ free_page((unsigned long) req->sccb);
+ kfree(req);
+}
+
+static int __init
+cpi_module_init(void)
+{
+ struct semaphore sem;
+ struct sclp_req *req;
+ int rc;
+
+ rc = cpi_check_parms();
+ if (rc)
+ return rc;
+
+ rc = sclp_register(&sclp_cpi_event);
+ if (rc) {
+ /* could not register sclp event. Die. */
+ printk(KERN_WARNING "cpi: could not register to hardware "
+ "console.\n");
+ return -EINVAL;
+ }
+ if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
+ printk(KERN_WARNING "cpi: no control program identification "
+ "support\n");
+ sclp_unregister(&sclp_cpi_event);
+ return -ENOTSUPP;
+ }
+
+ req = cpi_prepare_req();
+ if (IS_ERR(req)) {
+ printk(KERN_WARNING "cpi: couldn't allocate request\n");
+ sclp_unregister(&sclp_cpi_event);
+ return PTR_ERR(req);
+ }
+
+ /* Prepare semaphore */
+ sema_init(&sem, 0);
+ req->callback_data = &sem;
+ /* Add request to sclp queue */
+ rc = sclp_add_request(req);
+ if (rc) {
+ printk(KERN_WARNING "cpi: could not start request\n");
+ cpi_free_req(req);
+ sclp_unregister(&sclp_cpi_event);
+ return rc;
+ }
+ /* make "insmod" sleep until callback arrives */
+ down(&sem);
+
+ rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
+ if (rc != 0x0020) {
+ printk(KERN_WARNING "cpi: failed with response code 0x%x\n",
+ rc);
+ rc = -ECOMM;
+ } else
+ rc = 0;
+
+ cpi_free_req(req);
+ sclp_unregister(&sclp_cpi_event);
+
+ return rc;
+}
+
+
+static void __exit cpi_module_exit(void)
+{
+}
+
+
+/* declare driver module init/cleanup functions */
+module_init(cpi_module_init);
+module_exit(cpi_module_exit);
+
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 000000000000..83f75774df60
--- /dev/null
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,99 @@
+/*
+ * drivers/s390/char/sclp_quiesce.c
+ * signal quiesce handler
+ *
+ * (C) Copyright IBM Corp. 1999,2004
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/sigp.h>
+
+#include "sclp.h"
+
+
+#ifdef CONFIG_SMP
+/* Signal completion of shutdown process. All CPUs except the first to enter
+ * this function: go to stopped state. First CPU: wait until all other
+ * CPUs are in stopped or check stop state. Afterwards, load special PSW
+ * to indicate completion. */
+static void
+do_load_quiesce_psw(void * __unused)
+{
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+ psw_t quiesce_psw;
+ int cpu;
+
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+ signal_processor(smp_processor_id(), sigp_stop);
+ /* Wait for all other cpus to enter stopped state */
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ while(!smp_cpu_not_running(cpu))
+ cpu_relax();
+ }
+ /* Quiesce the last cpu with the special psw */
+ quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
+ quiesce_psw.addr = 0xfff;
+ __load_psw(quiesce_psw);
+}
+
+/* Shutdown handler. Perform shutdown function on all CPUs. */
+static void
+do_machine_quiesce(void)
+{
+ on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
+}
+#else
+/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
+static void
+do_machine_quiesce(void)
+{
+ psw_t quiesce_psw;
+
+ quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
+ quiesce_psw.addr = 0xfff;
+ __load_psw(quiesce_psw);
+}
+#endif
+
+extern void ctrl_alt_del(void);
+
+/* Handler for quiesce event. Start shutdown procedure. */
+static void
+sclp_quiesce_handler(struct evbuf_header *evbuf)
+{
+ _machine_restart = (void *) do_machine_quiesce;
+ _machine_halt = do_machine_quiesce;
+ _machine_power_off = do_machine_quiesce;
+ ctrl_alt_del();
+}
+
+static struct sclp_register sclp_quiesce_event = {
+ .receive_mask = EvTyp_SigQuiesce_Mask,
+ .receiver_fn = sclp_quiesce_handler
+};
+
+/* Initialize quiesce driver. */
+static int __init
+sclp_quiesce_init(void)
+{
+ int rc;
+
+ rc = sclp_register(&sclp_quiesce_event);
+ if (rc)
+ printk(KERN_WARNING "sclp: could not register quiesce handler "
+ "(rc=%d)\n", rc);
+ return rc;
+}
+
+module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
new file mode 100644
index 000000000000..ac10dfb20a62
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,471 @@
+/*
+ * drivers/s390/char/sclp_rw.c
+ * driver: reading from and writing to system console on S/390 via SCLP
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <asm/uaccess.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
+
+/*
+ * The room for the SCCB (only for writing) is not equal to a pages size
+ * (as it is specified as the maximum size in the the SCLP ducumentation)
+ * because of the additional data structure described above.
+ */
+#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
+
+/* Event type structure for write message and write priority message */
+static struct sclp_register sclp_rw_event = {
+ .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
+};
+
+/*
+ * Setup a sclp write buffer. Gets a page as input (4K) and returns
+ * a pointer to a struct sclp_buffer structure that is located at the
+ * end of the input page. This reduces the buffer space by a few
+ * bytes but simplifies things.
+ */
+struct sclp_buffer *
+sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
+{
+ struct sclp_buffer *buffer;
+ struct write_sccb *sccb;
+
+ sccb = (struct write_sccb *) page;
+ /*
+ * We keep the struct sclp_buffer structure at the end
+ * of the sccb page.
+ */
+ buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
+ buffer->sccb = sccb;
+ buffer->retry_count = 0;
+ buffer->mto_number = 0;
+ buffer->mto_char_sum = 0;
+ buffer->current_line = NULL;
+ buffer->current_length = 0;
+ buffer->columns = columns;
+ buffer->htab = htab;
+
+ /* initialize sccb */
+ memset(sccb, 0, sizeof(struct write_sccb));
+ sccb->header.length = sizeof(struct write_sccb);
+ sccb->msg_buf.header.length = sizeof(struct msg_buf);
+ sccb->msg_buf.header.type = EvTyp_Msg;
+ sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
+ sccb->msg_buf.mdb.header.type = 1;
+ sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
+ sccb->msg_buf.mdb.header.revision_code = 1;
+ sccb->msg_buf.mdb.go.length = sizeof(struct go);
+ sccb->msg_buf.mdb.go.type = 1;
+
+ return buffer;
+}
+
+/*
+ * Return a pointer to the orignal page that has been used to create
+ * the buffer.
+ */
+void *
+sclp_unmake_buffer(struct sclp_buffer *buffer)
+{
+ return buffer->sccb;
+}
+
+/*
+ * Initialize a new Message Text Object (MTO) at the end of the provided buffer
+ * with enough room for max_len characters. Return 0 on success.
+ */
+static int
+sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
+{
+ struct write_sccb *sccb;
+ struct mto *mto;
+ int mto_size;
+
+ /* max size of new Message Text Object including message text */
+ mto_size = sizeof(struct mto) + max_len;
+
+ /* check if current buffer sccb can contain the mto */
+ sccb = buffer->sccb;
+ if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
+ return -ENOMEM;
+
+ /* find address of new message text object */
+ mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
+
+ /*
+ * fill the new Message-Text Object,
+ * starting behind the former last byte of the SCCB
+ */
+ memset(mto, 0, sizeof(struct mto));
+ mto->length = sizeof(struct mto);
+ mto->type = 4; /* message text object */
+ mto->line_type_flags = LnTpFlgs_EndText; /* end text */
+
+ /* set pointer to first byte after struct mto. */
+ buffer->current_line = (char *) (mto + 1);
+ buffer->current_length = 0;
+
+ return 0;
+}
+
+/*
+ * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
+ * MTO, enclosing MDB, event buffer and SCCB.
+ */
+static void
+sclp_finalize_mto(struct sclp_buffer *buffer)
+{
+ struct write_sccb *sccb;
+ struct mto *mto;
+ int str_len, mto_size;
+
+ str_len = buffer->current_length;
+ buffer->current_line = NULL;
+ buffer->current_length = 0;
+
+ /* real size of new Message Text Object including message text */
+ mto_size = sizeof(struct mto) + str_len;
+
+ /* find address of new message text object */
+ sccb = buffer->sccb;
+ mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
+
+ /* set size of message text object */
+ mto->length = mto_size;
+
+ /*
+ * update values of sizes
+ * (SCCB, Event(Message) Buffer, Message Data Block)
+ */
+ sccb->header.length += mto_size;
+ sccb->msg_buf.header.length += mto_size;
+ sccb->msg_buf.mdb.header.length += mto_size;
+
+ /*
+ * count number of buffered messages (= number of Message Text
+ * Objects) and number of buffered characters
+ * for the SCCB currently used for buffering and at all
+ */
+ buffer->mto_number++;
+ buffer->mto_char_sum += str_len;
+}
+
+/*
+ * processing of a message including escape characters,
+ * returns number of characters written to the output sccb
+ * ("processed" means that is not guaranteed that the character have already
+ * been sent to the SCLP but that it will be done at least next time the SCLP
+ * is not busy)
+ */
+int
+sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
+{
+ int spaces, i_msg;
+ int rc;
+
+ /*
+ * parse msg for escape sequences (\t,\v ...) and put formated
+ * msg into an mto (created by sclp_initialize_mto).
+ *
+ * We have to do this work ourselfs because there is no support for
+ * these characters on the native machine and only partial support
+ * under VM (Why does VM interpret \n but the native machine doesn't ?)
+ *
+ * Depending on i/o-control setting the message is always written
+ * immediately or we wait for a final new line maybe coming with the
+ * next message. Besides we avoid a buffer overrun by writing its
+ * content.
+ *
+ * RESTRICTIONS:
+ *
+ * \r and \b work within one line because we are not able to modify
+ * previous output that have already been accepted by the SCLP.
+ *
+ * \t combined with following \r is not correctly represented because
+ * \t is expanded to some spaces but \r does not know about a
+ * previous \t and decreases the current position by one column.
+ * This is in order to a slim and quick implementation.
+ */
+ for (i_msg = 0; i_msg < count; i_msg++) {
+ switch (msg[i_msg]) {
+ case '\n': /* new line, line feed (ASCII) */
+ /* check if new mto needs to be created */
+ if (buffer->current_line == NULL) {
+ rc = sclp_initialize_mto(buffer, 0);
+ if (rc)
+ return i_msg;
+ }
+ sclp_finalize_mto(buffer);
+ break;
+ case '\a': /* bell, one for several times */
+ /* set SCLP sound alarm bit in General Object */
+ buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
+ GnrlMsgFlgs_SndAlrm;
+ break;
+ case '\t': /* horizontal tabulator */
+ /* check if new mto needs to be created */
+ if (buffer->current_line == NULL) {
+ rc = sclp_initialize_mto(buffer,
+ buffer->columns);
+ if (rc)
+ return i_msg;
+ }
+ /* "go to (next htab-boundary + 1, same line)" */
+ do {
+ if (buffer->current_length >= buffer->columns)
+ break;
+ /* ok, add a blank */
+ *buffer->current_line++ = 0x40;
+ buffer->current_length++;
+ } while (buffer->current_length % buffer->htab);
+ break;
+ case '\f': /* form feed */
+ case '\v': /* vertical tabulator */
+ /* "go to (actual column, actual line + 1)" */
+ /* = new line, leading spaces */
+ if (buffer->current_line != NULL) {
+ spaces = buffer->current_length;
+ sclp_finalize_mto(buffer);
+ rc = sclp_initialize_mto(buffer,
+ buffer->columns);
+ if (rc)
+ return i_msg;
+ memset(buffer->current_line, 0x40, spaces);
+ buffer->current_line += spaces;
+ buffer->current_length = spaces;
+ } else {
+ /* one an empty line this is the same as \n */
+ rc = sclp_initialize_mto(buffer,
+ buffer->columns);
+ if (rc)
+ return i_msg;
+ sclp_finalize_mto(buffer);
+ }
+ break;
+ case '\b': /* backspace */
+ /* "go to (actual column - 1, actual line)" */
+ /* decrement counter indicating position, */
+ /* do not remove last character */
+ if (buffer->current_line != NULL &&
+ buffer->current_length > 0) {
+ buffer->current_length--;
+ buffer->current_line--;
+ }
+ break;
+ case 0x00: /* end of string */
+ /* transfer current line to SCCB */
+ if (buffer->current_line != NULL)
+ sclp_finalize_mto(buffer);
+ /* skip the rest of the message including the 0 byte */
+ i_msg = count - 1;
+ break;
+ default: /* no escape character */
+ /* do not output unprintable characters */
+ if (!isprint(msg[i_msg]))
+ break;
+ /* check if new mto needs to be created */
+ if (buffer->current_line == NULL) {
+ rc = sclp_initialize_mto(buffer,
+ buffer->columns);
+ if (rc)
+ return i_msg;
+ }
+ *buffer->current_line++ = sclp_ascebc(msg[i_msg]);
+ buffer->current_length++;
+ break;
+ }
+ /* check if current mto is full */
+ if (buffer->current_line != NULL &&
+ buffer->current_length >= buffer->columns)
+ sclp_finalize_mto(buffer);
+ }
+
+ /* return number of processed characters */
+ return i_msg;
+}
+
+/*
+ * Return the number of free bytes in the sccb
+ */
+int
+sclp_buffer_space(struct sclp_buffer *buffer)
+{
+ int count;
+
+ count = MAX_SCCB_ROOM - buffer->sccb->header.length;
+ if (buffer->current_line != NULL)
+ count -= sizeof(struct mto) + buffer->current_length;
+ return count;
+}
+
+/*
+ * Return number of characters in buffer
+ */
+int
+sclp_chars_in_buffer(struct sclp_buffer *buffer)
+{
+ int count;
+
+ count = buffer->mto_char_sum;
+ if (buffer->current_line != NULL)
+ count += buffer->current_length;
+ return count;
+}
+
+/*
+ * sets or provides some values that influence the drivers behaviour
+ */
+void
+sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
+{
+ buffer->columns = columns;
+ if (buffer->current_line != NULL &&
+ buffer->current_length > buffer->columns)
+ sclp_finalize_mto(buffer);
+}
+
+void
+sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
+{
+ buffer->htab = htab;
+}
+
+/*
+ * called by sclp_console_init and/or sclp_tty_init
+ */
+int
+sclp_rw_init(void)
+{
+ static int init_done = 0;
+ int rc;
+
+ if (init_done)
+ return 0;
+
+ rc = sclp_register(&sclp_rw_event);
+ if (rc == 0)
+ init_done = 1;
+ return rc;
+}
+
+#define SCLP_BUFFER_MAX_RETRY 1
+
+/*
+ * second half of Write Event Data-function that has to be done after
+ * interruption indicating completion of Service Call.
+ */
+static void
+sclp_writedata_callback(struct sclp_req *request, void *data)
+{
+ int rc;
+ struct sclp_buffer *buffer;
+ struct write_sccb *sccb;
+
+ buffer = (struct sclp_buffer *) data;
+ sccb = buffer->sccb;
+
+ if (request->status == SCLP_REQ_FAILED) {
+ if (buffer->callback != NULL)
+ buffer->callback(buffer, -EIO);
+ return;
+ }
+ /* check SCLP response code and choose suitable action */
+ switch (sccb->header.response_code) {
+ case 0x0020 :
+ /* Normal completion, buffer processed, message(s) sent */
+ rc = 0;
+ break;
+
+ case 0x0340: /* Contained SCLP equipment check */
+ if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+ rc = -EIO;
+ break;
+ }
+ /* remove processed buffers and requeue rest */
+ if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+ /* not all buffers were processed */
+ sccb->header.response_code = 0x0000;
+ buffer->request.status = SCLP_REQ_FILLED;
+ rc = sclp_add_request(request);
+ if (rc == 0)
+ return;
+ } else
+ rc = 0;
+ break;
+
+ case 0x0040: /* SCLP equipment check */
+ case 0x05f0: /* Target resource in improper state */
+ if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+ rc = -EIO;
+ break;
+ }
+ /* retry request */
+ sccb->header.response_code = 0x0000;
+ buffer->request.status = SCLP_REQ_FILLED;
+ rc = sclp_add_request(request);
+ if (rc == 0)
+ return;
+ break;
+ default:
+ if (sccb->header.response_code == 0x71f0)
+ rc = -ENOMEM;
+ else
+ rc = -EINVAL;
+ break;
+ }
+ if (buffer->callback != NULL)
+ buffer->callback(buffer, rc);
+}
+
+/*
+ * Setup the request structure in the struct sclp_buffer to do SCLP Write
+ * Event Data and pass the request to the core SCLP loop. Return zero on
+ * success, non-zero otherwise.
+ */
+int
+sclp_emit_buffer(struct sclp_buffer *buffer,
+ void (*callback)(struct sclp_buffer *, int))
+{
+ struct write_sccb *sccb;
+
+ /* add current line if there is one */
+ if (buffer->current_line != NULL)
+ sclp_finalize_mto(buffer);
+
+ /* Are there messages in the output buffer ? */
+ if (buffer->mto_number == 0)
+ return -EIO;
+
+ sccb = buffer->sccb;
+ if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
+ /* Use normal write message */
+ sccb->msg_buf.header.type = EvTyp_Msg;
+ else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
+ /* Use write priority message */
+ sccb->msg_buf.header.type = EvTyp_PMsgCmd;
+ else
+ return -ENOSYS;
+ buffer->request.command = SCLP_CMDW_WRITEDATA;
+ buffer->request.status = SCLP_REQ_FILLED;
+ buffer->request.callback = sclp_writedata_callback;
+ buffer->request.callback_data = buffer;
+ buffer->request.sccb = sccb;
+ buffer->callback = callback;
+ return sclp_add_request(&buffer->request);
+}
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
new file mode 100644
index 000000000000..6aa7a6948bc9
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,96 @@
+/*
+ * drivers/s390/char/sclp_rw.h
+ * interface to the SCLP-read/write driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_RW_H__
+#define __SCLP_RW_H__
+
+#include <linux/list.h>
+
+struct mto {
+ u16 length;
+ u16 type;
+ u16 line_type_flags;
+ u8 alarm_control;
+ u8 _reserved[3];
+} __attribute__((packed));
+
+struct go {
+ u16 length;
+ u16 type;
+ u32 domid;
+ u8 hhmmss_time[8];
+ u8 th_time[3];
+ u8 reserved_0;
+ u8 dddyyyy_date[7];
+ u8 _reserved_1;
+ u16 general_msg_flags;
+ u8 _reserved_2[10];
+ u8 originating_system_name[8];
+ u8 job_guest_name[8];
+} __attribute__((packed));
+
+struct mdb_header {
+ u16 length;
+ u16 type;
+ u32 tag;
+ u32 revision_code;
+} __attribute__((packed));
+
+struct mdb {
+ struct mdb_header header;
+ struct go go;
+} __attribute__((packed));
+
+struct msg_buf {
+ struct evbuf_header header;
+ struct mdb mdb;
+} __attribute__((packed));
+
+struct write_sccb {
+ struct sccb_header header;
+ struct msg_buf msg_buf;
+} __attribute__((packed));
+
+/* The number of empty mto buffers that can be contained in a single sccb. */
+#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
+ sizeof(struct write_sccb)) / sizeof(struct mto))
+
+/*
+ * data structure for information about list of SCCBs (only for writing),
+ * will be located at the end of a SCCBs page
+ */
+struct sclp_buffer {
+ struct list_head list; /* list_head for sccb_info chain */
+ struct sclp_req request;
+ struct write_sccb *sccb;
+ char *current_line;
+ int current_length;
+ int retry_count;
+ /* output format settings */
+ unsigned short columns;
+ unsigned short htab;
+ /* statistics about this buffer */
+ unsigned int mto_char_sum; /* # chars in sccb */
+ unsigned int mto_number; /* # mtos in sccb */
+ /* Callback that is called after reaching final status. */
+ void (*callback)(struct sclp_buffer *, int);
+};
+
+int sclp_rw_init(void);
+struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
+void *sclp_unmake_buffer(struct sclp_buffer *);
+int sclp_buffer_space(struct sclp_buffer *);
+int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
+int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
+void sclp_set_columns(struct sclp_buffer *, unsigned short);
+void sclp_set_htab(struct sclp_buffer *, unsigned short);
+int sclp_chars_in_buffer(struct sclp_buffer *);
+
+#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
new file mode 100644
index 000000000000..a20d7c89341d
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,813 @@
+/*
+ * drivers/s390/char/sclp_tty.c
+ * SCLP line mode terminal driver.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+
+#include "ctrlchar.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
+
+/*
+ * size of a buffer that collects single characters coming in
+ * via sclp_tty_put_char()
+ */
+#define SCLP_TTY_BUF_SIZE 512
+
+/*
+ * There is exactly one SCLP terminal, so we can keep things simple
+ * and allocate all variables statically.
+ */
+
+/* Lock to guard over changes to global variables. */
+static spinlock_t sclp_tty_lock;
+/* List of free pages that can be used for console output buffering. */
+static struct list_head sclp_tty_pages;
+/* List of full struct sclp_buffer structures ready for output. */
+static struct list_head sclp_tty_outqueue;
+/* Counter how many buffers are emitted. */
+static int sclp_tty_buffer_count;
+/* Pointer to current console buffer. */
+static struct sclp_buffer *sclp_ttybuf;
+/* Timer for delayed output of console messages. */
+static struct timer_list sclp_tty_timer;
+/* Waitqueue to wait for buffers to get empty. */
+static wait_queue_head_t sclp_tty_waitq;
+
+static struct tty_struct *sclp_tty;
+static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
+static unsigned short int sclp_tty_chars_count;
+
+struct tty_driver *sclp_tty_driver;
+
+extern struct termios tty_std_termios;
+
+static struct sclp_ioctls sclp_ioctls;
+static struct sclp_ioctls sclp_ioctls_init =
+{
+ 8, /* 1 hor. tab. = 8 spaces */
+ 0, /* no echo of input by this driver */
+ 80, /* 80 characters/line */
+ 1, /* write after 1/10 s without final new line */
+ MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
+ MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
+ 0, /* do not convert to lower case */
+ 0x6c /* to seprate upper and lower case */
+ /* ('%' in EBCDIC) */
+};
+
+/* This routine is called whenever we try to open a SCLP terminal. */
+static int
+sclp_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ sclp_tty = tty;
+ tty->driver_data = NULL;
+ tty->low_latency = 0;
+ return 0;
+}
+
+/* This routine is called when the SCLP terminal is closed. */
+static void
+sclp_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (tty->count > 1)
+ return;
+ sclp_tty = NULL;
+}
+
+/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
+static int
+sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned long flags;
+ unsigned int obuf;
+ int check;
+ int rc;
+
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ rc = 0;
+ check = 0;
+ switch (cmd) {
+ case TIOCSCLPSHTAB:
+ /* set width of horizontal tab */
+ if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
+ rc = -EFAULT;
+ else
+ check = 1;
+ break;
+ case TIOCSCLPGHTAB:
+ /* get width of horizontal tab */
+ if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSECHO:
+ /* enable/disable echo of input */
+ if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGECHO:
+ /* Is echo of input enabled ? */
+ if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSCOLS:
+ /* set number of columns for output */
+ if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
+ rc = -EFAULT;
+ else
+ check = 1;
+ break;
+ case TIOCSCLPGCOLS:
+ /* get number of columns for output */
+ if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSNL:
+ /* enable/disable writing without final new line character */
+ if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGNL:
+ /* Is writing without final new line character enabled ? */
+ if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSOBUF:
+ /*
+ * set the maximum buffers size for output, will be rounded
+ * up to next 4kB boundary and stored as number of SCCBs
+ * (4kB Buffers) limitation: 256 x 4kB
+ */
+ if (get_user(obuf, (unsigned int __user *) arg) == 0) {
+ if (obuf & 0xFFF)
+ sclp_ioctls.max_sccb = (obuf >> 12) + 1;
+ else
+ sclp_ioctls.max_sccb = (obuf >> 12);
+ } else
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGOBUF:
+ /* get the maximum buffers size for output */
+ obuf = sclp_ioctls.max_sccb << 12;
+ if (put_user(obuf, (unsigned int __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGKBUF:
+ /* get the number of buffers got from kernel at startup */
+ if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSCASE:
+ /* enable/disable conversion from upper to lower case */
+ if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGCASE:
+ /* Is conversion from upper to lower case of input enabled? */
+ if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSDELIM:
+ /*
+ * set special character used for separating upper and
+ * lower case, 0x00 disables this feature
+ */
+ if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPGDELIM:
+ /*
+ * get special character used for separating upper and
+ * lower case, 0x00 disables this feature
+ */
+ if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
+ rc = -EFAULT;
+ break;
+ case TIOCSCLPSINIT:
+ /* set initial (default) sclp ioctls */
+ sclp_ioctls = sclp_ioctls_init;
+ check = 1;
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ if (check) {
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ if (sclp_ttybuf != NULL) {
+ sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
+ sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
+ }
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ }
+ return rc;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted. This is not an exact number because not every
+ * character needs the same space in the sccb. The worst case is
+ * a string of newlines. Every newlines creates a new mto which
+ * needs 8 bytes.
+ */
+static int
+sclp_tty_write_room (struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct list_head *l;
+ int count;
+
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ count = 0;
+ if (sclp_ttybuf != NULL)
+ count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
+ list_for_each(l, &sclp_tty_pages)
+ count += NR_EMPTY_MTO_PER_SCCB;
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ return count;
+}
+
+static void
+sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
+{
+ unsigned long flags;
+ void *page;
+
+ do {
+ page = sclp_unmake_buffer(buffer);
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ /* Remove buffer from outqueue */
+ list_del(&buffer->list);
+ sclp_tty_buffer_count--;
+ list_add_tail((struct list_head *) page, &sclp_tty_pages);
+ /* Check if there is a pending buffer on the out queue. */
+ buffer = NULL;
+ if (!list_empty(&sclp_tty_outqueue))
+ buffer = list_entry(sclp_tty_outqueue.next,
+ struct sclp_buffer, list);
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
+ wake_up(&sclp_tty_waitq);
+ /* check if the tty needs a wake up call */
+ if (sclp_tty != NULL) {
+ tty_wakeup(sclp_tty);
+ }
+}
+
+static inline void
+__sclp_ttybuf_emit(struct sclp_buffer *buffer)
+{
+ unsigned long flags;
+ int count;
+ int rc;
+
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ list_add_tail(&buffer->list, &sclp_tty_outqueue);
+ count = sclp_tty_buffer_count++;
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ if (count)
+ return;
+ rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
+ if (rc)
+ sclp_ttybuf_callback(buffer, rc);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer.
+ */
+static void
+sclp_tty_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct sclp_buffer *buf;
+
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ buf = sclp_ttybuf;
+ sclp_ttybuf = NULL;
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+
+ if (buf != NULL) {
+ __sclp_ttybuf_emit(buf);
+ }
+}
+
+/*
+ * Write a string to the sclp tty.
+ */
+static void
+sclp_tty_write_string(const unsigned char *str, int count)
+{
+ unsigned long flags;
+ void *page;
+ int written;
+ struct sclp_buffer *buf;
+
+ if (count <= 0)
+ return;
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ do {
+ /* Create a sclp output buffer if none exists yet */
+ if (sclp_ttybuf == NULL) {
+ while (list_empty(&sclp_tty_pages)) {
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ if (in_interrupt())
+ sclp_sync_wait();
+ else
+ wait_event(sclp_tty_waitq,
+ !list_empty(&sclp_tty_pages));
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ }
+ page = sclp_tty_pages.next;
+ list_del((struct list_head *) page);
+ sclp_ttybuf = sclp_make_buffer(page,
+ sclp_ioctls.columns,
+ sclp_ioctls.htab);
+ }
+ /* try to write the string to the current output buffer */
+ written = sclp_write(sclp_ttybuf, str, count);
+ if (written == count)
+ break;
+ /*
+ * Not all characters could be written to the current
+ * output buffer. Emit the buffer, create a new buffer
+ * and then output the rest of the string.
+ */
+ buf = sclp_ttybuf;
+ sclp_ttybuf = NULL;
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ __sclp_ttybuf_emit(buf);
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ str += written;
+ count -= written;
+ } while (count > 0);
+ /* Setup timer to output current console buffer after 1/10 second */
+ if (sclp_ioctls.final_nl) {
+ if (sclp_ttybuf != NULL &&
+ sclp_chars_in_buffer(sclp_ttybuf) != 0 &&
+ !timer_pending(&sclp_tty_timer)) {
+ init_timer(&sclp_tty_timer);
+ sclp_tty_timer.function = sclp_tty_timeout;
+ sclp_tty_timer.data = 0UL;
+ sclp_tty_timer.expires = jiffies + HZ/10;
+ add_timer(&sclp_tty_timer);
+ }
+ } else {
+ if (sclp_ttybuf != NULL &&
+ sclp_chars_in_buffer(sclp_ttybuf) != 0) {
+ buf = sclp_ttybuf;
+ sclp_ttybuf = NULL;
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ __sclp_ttybuf_emit(buf);
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+}
+
+/*
+ * This routine is called by the kernel to write a series of characters to the
+ * tty device. The characters may come from user space or kernel space. This
+ * routine will return the number of characters actually accepted for writing.
+ */
+static int
+sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ if (sclp_tty_chars_count > 0) {
+ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
+ sclp_tty_chars_count = 0;
+ }
+ sclp_tty_write_string(buf, count);
+ return count;
+}
+
+/*
+ * This routine is called by the kernel to write a single character to the tty
+ * device. If the kernel uses this routine, it must call the flush_chars()
+ * routine (if defined) when it is done stuffing characters into the driver.
+ *
+ * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
+ * If the given character is a '\n' the contents of the SCLP write buffer
+ * - including previous characters from sclp_tty_put_char() and strings from
+ * sclp_write() without final '\n' - will be written.
+ */
+static void
+sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ sclp_tty_chars[sclp_tty_chars_count++] = ch;
+ if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
+ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
+ sclp_tty_chars_count = 0;
+ }
+}
+
+/*
+ * This routine is called by the kernel after it has written a series of
+ * characters to the tty device using put_char().
+ */
+static void
+sclp_tty_flush_chars(struct tty_struct *tty)
+{
+ if (sclp_tty_chars_count > 0) {
+ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
+ sclp_tty_chars_count = 0;
+ }
+}
+
+/*
+ * This routine returns the number of characters in the write buffer of the
+ * SCLP driver. The provided number includes all characters that are stored
+ * in the SCCB (will be written next time the SCLP is not busy) as well as
+ * characters in the write buffer (will not be written as long as there is a
+ * final line feed missing).
+ */
+static int
+sclp_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct list_head *l;
+ struct sclp_buffer *t;
+ int count;
+
+ spin_lock_irqsave(&sclp_tty_lock, flags);
+ count = 0;
+ if (sclp_ttybuf != NULL)
+ count = sclp_chars_in_buffer(sclp_ttybuf);
+ list_for_each(l, &sclp_tty_outqueue) {
+ t = list_entry(l, struct sclp_buffer, list);
+ count += sclp_chars_in_buffer(t);
+ }
+ spin_unlock_irqrestore(&sclp_tty_lock, flags);
+ return count;
+}
+
+/*
+ * removes all content from buffers of low level driver
+ */
+static void
+sclp_tty_flush_buffer(struct tty_struct *tty)
+{
+ if (sclp_tty_chars_count > 0) {
+ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
+ sclp_tty_chars_count = 0;
+ }
+}
+
+/*
+ * push input to tty
+ */
+static void
+sclp_tty_input(unsigned char* buf, unsigned int count)
+{
+ unsigned int cchar;
+
+ /*
+ * If this tty driver is currently closed
+ * then throw the received input away.
+ */
+ if (sclp_tty == NULL)
+ return;
+ cchar = ctrlchar_handle(buf, count, sclp_tty);
+ switch (cchar & CTRLCHAR_MASK) {
+ case CTRLCHAR_SYSRQ:
+ break;
+ case CTRLCHAR_CTRL:
+ sclp_tty->flip.count++;
+ *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *sclp_tty->flip.char_buf_ptr++ = cchar;
+ tty_flip_buffer_push(sclp_tty);
+ break;
+ case CTRLCHAR_NONE:
+ /* send (normal) input to line discipline */
+ memcpy(sclp_tty->flip.char_buf_ptr, buf, count);
+ if (count < 2 ||
+ (strncmp ((const char *) buf + count - 2, "^n", 2) &&
+ strncmp ((const char *) buf + count - 2, "\0252n", 2))) {
+ sclp_tty->flip.char_buf_ptr[count] = '\n';
+ count++;
+ } else
+ count -= 2;
+ memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
+ sclp_tty->flip.char_buf_ptr += count;
+ sclp_tty->flip.flag_buf_ptr += count;
+ sclp_tty->flip.count += count;
+ tty_flip_buffer_push(sclp_tty);
+ break;
+ }
+}
+
+/*
+ * get a EBCDIC string in upper/lower case,
+ * find out characters in lower/upper case separated by a special character,
+ * modifiy original string,
+ * returns length of resulting string
+ */
+static int
+sclp_switch_cases(unsigned char *buf, int count,
+ unsigned char delim, int tolower)
+{
+ unsigned char *ip, *op;
+ int toggle;
+
+ /* initially changing case is off */
+ toggle = 0;
+ ip = op = buf;
+ while (count-- > 0) {
+ /* compare with special character */
+ if (*ip == delim) {
+ /* followed by another special character? */
+ if (count && ip[1] == delim) {
+ /*
+ * ... then put a single copy of the special
+ * character to the output string
+ */
+ *op++ = *ip++;
+ count--;
+ } else
+ /*
+ * ... special character follower by a normal
+ * character toggles the case change behaviour
+ */
+ toggle = ~toggle;
+ /* skip special character */
+ ip++;
+ } else
+ /* not the special character */
+ if (toggle)
+ /* but case switching is on */
+ if (tolower)
+ /* switch to uppercase */
+ *op++ = _ebc_toupper[(int) *ip++];
+ else
+ /* switch to lowercase */
+ *op++ = _ebc_tolower[(int) *ip++];
+ else
+ /* no case switching, copy the character */
+ *op++ = *ip++;
+ }
+ /* return length of reformatted string. */
+ return op - buf;
+}
+
+static void
+sclp_get_input(unsigned char *start, unsigned char *end)
+{
+ int count;
+
+ count = end - start;
+ /*
+ * if set in ioctl convert EBCDIC to lower case
+ * (modify original input in SCCB)
+ */
+ if (sclp_ioctls.tolower)
+ EBC_TOLOWER(start, count);
+
+ /*
+ * if set in ioctl find out characters in lower or upper case
+ * (depends on current case) separated by a special character,
+ * works on EBCDIC
+ */
+ if (sclp_ioctls.delim)
+ count = sclp_switch_cases(start, count,
+ sclp_ioctls.delim,
+ sclp_ioctls.tolower);
+
+ /* convert EBCDIC to ASCII (modify original input in SCCB) */
+ sclp_ebcasc_str(start, count);
+
+ /* if set in ioctl write operators input to console */
+ if (sclp_ioctls.echo)
+ sclp_tty_write(sclp_tty, start, count);
+
+ /* transfer input to high level driver */
+ sclp_tty_input(start, count);
+}
+
+static inline struct gds_vector *
+find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
+{
+ struct gds_vector *vec;
+
+ for (vec = start; vec < end; vec = (void *) vec + vec->length)
+ if (vec->gds_id == id)
+ return vec;
+ return NULL;
+}
+
+static inline struct gds_subvector *
+find_gds_subvector(struct gds_subvector *start,
+ struct gds_subvector *end, u8 key)
+{
+ struct gds_subvector *subvec;
+
+ for (subvec = start; subvec < end;
+ subvec = (void *) subvec + subvec->length)
+ if (subvec->key == key)
+ return subvec;
+ return NULL;
+}
+
+static inline void
+sclp_eval_selfdeftextmsg(struct gds_subvector *start,
+ struct gds_subvector *end)
+{
+ struct gds_subvector *subvec;
+
+ subvec = start;
+ while (subvec < end) {
+ subvec = find_gds_subvector(subvec, end, 0x30);
+ if (!subvec)
+ break;
+ sclp_get_input((unsigned char *)(subvec + 1),
+ (unsigned char *) subvec + subvec->length);
+ subvec = (void *) subvec + subvec->length;
+ }
+}
+
+static inline void
+sclp_eval_textcmd(struct gds_subvector *start,
+ struct gds_subvector *end)
+{
+ struct gds_subvector *subvec;
+
+ subvec = start;
+ while (subvec < end) {
+ subvec = find_gds_subvector(subvec, end,
+ GDS_KEY_SelfDefTextMsg);
+ if (!subvec)
+ break;
+ sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
+ (void *)subvec + subvec->length);
+ subvec = (void *) subvec + subvec->length;
+ }
+}
+
+static inline void
+sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
+{
+ struct gds_vector *vec;
+
+ vec = start;
+ while (vec < end) {
+ vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
+ if (!vec)
+ break;
+ sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
+ (void *) vec + vec->length);
+ vec = (void *) vec + vec->length;
+ }
+}
+
+
+static inline void
+sclp_eval_mdsmu(struct gds_vector *start, void *end)
+{
+ struct gds_vector *vec;
+
+ vec = find_gds_vector(start, end, GDS_ID_CPMSU);
+ if (vec)
+ sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
+}
+
+static void
+sclp_tty_receiver(struct evbuf_header *evbuf)
+{
+ struct gds_vector *start, *end, *vec;
+
+ start = (struct gds_vector *)(evbuf + 1);
+ end = (void *) evbuf + evbuf->length;
+ vec = find_gds_vector(start, end, GDS_ID_MDSMU);
+ if (vec)
+ sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
+}
+
+static void
+sclp_tty_state_change(struct sclp_register *reg)
+{
+}
+
+static struct sclp_register sclp_input_event =
+{
+ .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
+ .state_change_fn = sclp_tty_state_change,
+ .receiver_fn = sclp_tty_receiver
+};
+
+static struct tty_operations sclp_ops = {
+ .open = sclp_tty_open,
+ .close = sclp_tty_close,
+ .write = sclp_tty_write,
+ .put_char = sclp_tty_put_char,
+ .flush_chars = sclp_tty_flush_chars,
+ .write_room = sclp_tty_write_room,
+ .chars_in_buffer = sclp_tty_chars_in_buffer,
+ .flush_buffer = sclp_tty_flush_buffer,
+ .ioctl = sclp_tty_ioctl,
+};
+
+int __init
+sclp_tty_init(void)
+{
+ struct tty_driver *driver;
+ void *page;
+ int i;
+ int rc;
+
+ if (!CONSOLE_IS_SCLP)
+ return 0;
+ driver = alloc_tty_driver(1);
+ if (!driver)
+ return -ENOMEM;
+
+ rc = sclp_rw_init();
+ if (rc) {
+ printk(KERN_ERR SCLP_TTY_PRINT_HEADER
+ "could not register tty - "
+ "sclp_rw_init returned %d\n", rc);
+ put_tty_driver(driver);
+ return rc;
+ }
+ /* Allocate pages for output buffering */
+ INIT_LIST_HEAD(&sclp_tty_pages);
+ for (i = 0; i < MAX_KMEM_PAGES; i++) {
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (page == NULL) {
+ put_tty_driver(driver);
+ return -ENOMEM;
+ }
+ list_add_tail((struct list_head *) page, &sclp_tty_pages);
+ }
+ INIT_LIST_HEAD(&sclp_tty_outqueue);
+ spin_lock_init(&sclp_tty_lock);
+ init_waitqueue_head(&sclp_tty_waitq);
+ init_timer(&sclp_tty_timer);
+ sclp_ttybuf = NULL;
+ sclp_tty_buffer_count = 0;
+ if (MACHINE_IS_VM) {
+ /*
+ * save 4 characters for the CPU number
+ * written at start of each line by VM/CP
+ */
+ sclp_ioctls_init.columns = 76;
+ /* case input lines to lowercase */
+ sclp_ioctls_init.tolower = 1;
+ }
+ sclp_ioctls = sclp_ioctls_init;
+ sclp_tty_chars_count = 0;
+ sclp_tty = NULL;
+
+ rc = sclp_register(&sclp_input_event);
+ if (rc) {
+ put_tty_driver(driver);
+ return rc;
+ }
+
+ driver->owner = THIS_MODULE;
+ driver->driver_name = "sclp_line";
+ driver->name = "sclp_line";
+ driver->major = TTY_MAJOR;
+ driver->minor_start = 64;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_TTY;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+ driver->init_termios.c_oflag = ONLCR | XTABS;
+ driver->init_termios.c_lflag = ISIG | ECHO;
+ driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(driver, &sclp_ops);
+ rc = tty_register_driver(driver);
+ if (rc) {
+ printk(KERN_ERR SCLP_TTY_PRINT_HEADER
+ "could not register tty - "
+ "tty_register_driver returned %d\n", rc);
+ put_tty_driver(driver);
+ return rc;
+ }
+ sclp_tty_driver = driver;
+ return 0;
+}
+module_init(sclp_tty_init);
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
new file mode 100644
index 000000000000..0ce2c1fc5340
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/s390/char/sclp_tty.h
+ * interface to the SCLP-read/write driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_TTY_H__
+#define __SCLP_TTY_H__
+
+#include <linux/ioctl.h>
+#include <linux/termios.h>
+#include <linux/tty_driver.h>
+
+/* This is the type of data structures storing sclp ioctl setting. */
+struct sclp_ioctls {
+ unsigned short htab;
+ unsigned char echo;
+ unsigned short columns;
+ unsigned char final_nl;
+ unsigned short max_sccb;
+ unsigned short kmem_sccb; /* can't be modified at run time */
+ unsigned char tolower;
+ unsigned char delim;
+};
+
+/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
+#define SCLP_IOCTL_LETTER 'B'
+
+/* set width of horizontal tabulator */
+#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
+/* enable/disable echo of input (independent from line discipline) */
+#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
+/* set number of colums for output */
+#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
+/* enable/disable writing without final new line character */
+#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
+/* set the maximum buffers size for output, rounded up to next 4kB boundary */
+#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
+/* set initial (default) sclp ioctls */
+#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
+/* enable/disable conversion from upper to lower case of input */
+#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
+/* set special character used for separating upper and lower case, */
+/* 0x00 disables this feature */
+#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
+
+/* get width of horizontal tabulator */
+#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
+/* Is echo of input enabled ? (independent from line discipline) */
+#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
+/* get number of colums for output */
+#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
+/* Is writing without final new line character enabled ? */
+#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
+/* get the maximum buffers size for output */
+#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
+/* Is conversion from upper to lower case of input enabled ? */
+#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
+/* get special character used for separating upper and lower case, */
+/* 0x00 disables this feature */
+#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
+/* get the number of buffers/pages got from kernel at startup */
+#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
+
+extern struct tty_driver *sclp_tty_driver;
+
+#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 000000000000..06bd85824d7b
--- /dev/null
+++ b/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,785 @@
+/*
+ * drivers/s390/char/sclp_vt220.c
+ * SCLP VT220 terminal driver.
+ *
+ * S390 version
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/bootmem.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include "sclp.h"
+
+#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
+#define SCLP_VT220_MAJOR TTY_MAJOR
+#define SCLP_VT220_MINOR 65
+#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
+#define SCLP_VT220_DEVICE_NAME "ttysclp"
+#define SCLP_VT220_CONSOLE_NAME "ttyS"
+#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
+#define SCLP_VT220_BUF_SIZE 80
+
+/* Representation of a single write request */
+struct sclp_vt220_request {
+ struct list_head list;
+ struct sclp_req sclp_req;
+ int retry_count;
+};
+
+/* VT220 SCCB */
+struct sclp_vt220_sccb {
+ struct sccb_header header;
+ struct evbuf_header evbuf;
+};
+
+#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
+ sizeof(struct sclp_vt220_request) - \
+ sizeof(struct sclp_vt220_sccb))
+
+/* Structures and data needed to register tty driver */
+static struct tty_driver *sclp_vt220_driver;
+
+/* The tty_struct that the kernel associated with us */
+static struct tty_struct *sclp_vt220_tty;
+
+/* Lock to protect internal data from concurrent access */
+static spinlock_t sclp_vt220_lock;
+
+/* List of empty pages to be used as write request buffers */
+static struct list_head sclp_vt220_empty;
+
+/* List of pending requests */
+static struct list_head sclp_vt220_outqueue;
+
+/* Number of requests in outqueue */
+static int sclp_vt220_outqueue_count;
+
+/* Wait queue used to delay write requests while we've run out of buffers */
+static wait_queue_head_t sclp_vt220_waitq;
+
+/* Timer used for delaying write requests to merge subsequent messages into
+ * a single buffer */
+static struct timer_list sclp_vt220_timer;
+
+/* Pointer to current request buffer which has been partially filled but not
+ * yet sent */
+static struct sclp_vt220_request *sclp_vt220_current_request;
+
+/* Number of characters in current request buffer */
+static int sclp_vt220_buffered_chars;
+
+/* Flag indicating whether this driver has already been initialized */
+static int sclp_vt220_initialized = 0;
+
+/* Flag indicating that sclp_vt220_current_request should really
+ * have been already queued but wasn't because the SCLP was processing
+ * another buffer */
+static int sclp_vt220_flush_later;
+
+static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static int __sclp_vt220_emit(struct sclp_vt220_request *request);
+static void sclp_vt220_emit_current(void);
+
+/* Registration structure for our interest in SCLP event buffers */
+static struct sclp_register sclp_vt220_register = {
+ .send_mask = EvTyp_VT220Msg_Mask,
+ .receive_mask = EvTyp_VT220Msg_Mask,
+ .state_change_fn = NULL,
+ .receiver_fn = sclp_vt220_receiver_fn
+};
+
+
+/*
+ * Put provided request buffer back into queue and check emit pending
+ * buffers if necessary.
+ */
+static void
+sclp_vt220_process_queue(struct sclp_vt220_request *request)
+{
+ unsigned long flags;
+ void *page;
+
+ do {
+ /* Put buffer back to list of empty buffers */
+ page = request->sclp_req.sccb;
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ /* Move request from outqueue to empty queue */
+ list_del(&request->list);
+ sclp_vt220_outqueue_count--;
+ list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ /* Check if there is a pending buffer on the out queue. */
+ request = NULL;
+ if (!list_empty(&sclp_vt220_outqueue))
+ request = list_entry(sclp_vt220_outqueue.next,
+ struct sclp_vt220_request, list);
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ } while (request && __sclp_vt220_emit(request));
+ if (request == NULL && sclp_vt220_flush_later)
+ sclp_vt220_emit_current();
+ wake_up(&sclp_vt220_waitq);
+ /* Check if the tty needs a wake up call */
+ if (sclp_vt220_tty != NULL) {
+ tty_wakeup(sclp_vt220_tty);
+ }
+}
+
+#define SCLP_BUFFER_MAX_RETRY 1
+
+/*
+ * Callback through which the result of a write request is reported by the
+ * SCLP.
+ */
+static void
+sclp_vt220_callback(struct sclp_req *request, void *data)
+{
+ struct sclp_vt220_request *vt220_request;
+ struct sclp_vt220_sccb *sccb;
+
+ vt220_request = (struct sclp_vt220_request *) data;
+ if (request->status == SCLP_REQ_FAILED) {
+ sclp_vt220_process_queue(vt220_request);
+ return;
+ }
+ sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
+
+ /* Check SCLP response code and choose suitable action */
+ switch (sccb->header.response_code) {
+ case 0x0020 :
+ break;
+
+ case 0x05f0: /* Target resource in improper state */
+ break;
+
+ case 0x0340: /* Contained SCLP equipment check */
+ if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+ break;
+ /* Remove processed buffers and requeue rest */
+ if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+ /* Not all buffers were processed */
+ sccb->header.response_code = 0x0000;
+ vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+ if (sclp_add_request(request) == 0)
+ return;
+ }
+ break;
+
+ case 0x0040: /* SCLP equipment check */
+ if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+ break;
+ sccb->header.response_code = 0x0000;
+ vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+ if (sclp_add_request(request) == 0)
+ return;
+ break;
+
+ default:
+ break;
+ }
+ sclp_vt220_process_queue(vt220_request);
+}
+
+/*
+ * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
+ * otherwise.
+ */
+static int
+__sclp_vt220_emit(struct sclp_vt220_request *request)
+{
+ if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
+ request->sclp_req.status = SCLP_REQ_FAILED;
+ return -EIO;
+ }
+ request->sclp_req.command = SCLP_CMDW_WRITEDATA;
+ request->sclp_req.status = SCLP_REQ_FILLED;
+ request->sclp_req.callback = sclp_vt220_callback;
+ request->sclp_req.callback_data = (void *) request;
+
+ return sclp_add_request(&request->sclp_req);
+}
+
+/*
+ * Queue and emit given request.
+ */
+static void
+sclp_vt220_emit(struct sclp_vt220_request *request)
+{
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ list_add_tail(&request->list, &sclp_vt220_outqueue);
+ count = sclp_vt220_outqueue_count++;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ /* Emit only the first buffer immediately - callback takes care of
+ * the rest */
+ if (count == 0 && __sclp_vt220_emit(request))
+ sclp_vt220_process_queue(request);
+}
+
+/*
+ * Queue and emit current request. Return zero on success, non-zero otherwise.
+ */
+static void
+sclp_vt220_emit_current(void)
+{
+ unsigned long flags;
+ struct sclp_vt220_request *request;
+ struct sclp_vt220_sccb *sccb;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ request = NULL;
+ if (sclp_vt220_current_request != NULL) {
+ sccb = (struct sclp_vt220_sccb *)
+ sclp_vt220_current_request->sclp_req.sccb;
+ /* Only emit buffers with content */
+ if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
+ request = sclp_vt220_current_request;
+ sclp_vt220_current_request = NULL;
+ if (timer_pending(&sclp_vt220_timer))
+ del_timer(&sclp_vt220_timer);
+ }
+ sclp_vt220_flush_later = 0;
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ if (request != NULL)
+ sclp_vt220_emit(request);
+}
+
+#define SCLP_NORMAL_WRITE 0x00
+
+/*
+ * Helper function to initialize a page with the sclp request structure.
+ */
+static struct sclp_vt220_request *
+sclp_vt220_initialize_page(void *page)
+{
+ struct sclp_vt220_request *request;
+ struct sclp_vt220_sccb *sccb;
+
+ /* Place request structure at end of page */
+ request = ((struct sclp_vt220_request *)
+ ((addr_t) page + PAGE_SIZE)) - 1;
+ request->retry_count = 0;
+ request->sclp_req.sccb = page;
+ /* SCCB goes at start of page */
+ sccb = (struct sclp_vt220_sccb *) page;
+ memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
+ sccb->header.length = sizeof(struct sclp_vt220_sccb);
+ sccb->header.function_code = SCLP_NORMAL_WRITE;
+ sccb->header.response_code = 0x0000;
+ sccb->evbuf.type = EvTyp_VT220Msg;
+ sccb->evbuf.length = sizeof(struct evbuf_header);
+
+ return request;
+}
+
+static inline unsigned int
+sclp_vt220_space_left(struct sclp_vt220_request *request)
+{
+ struct sclp_vt220_sccb *sccb;
+ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+ return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
+ sccb->header.length;
+}
+
+static inline unsigned int
+sclp_vt220_chars_stored(struct sclp_vt220_request *request)
+{
+ struct sclp_vt220_sccb *sccb;
+ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+ return sccb->evbuf.length - sizeof(struct evbuf_header);
+}
+
+/*
+ * Add msg to buffer associated with request. Return the number of characters
+ * added.
+ */
+static int
+sclp_vt220_add_msg(struct sclp_vt220_request *request,
+ const unsigned char *msg, int count, int convertlf)
+{
+ struct sclp_vt220_sccb *sccb;
+ void *buffer;
+ unsigned char c;
+ int from;
+ int to;
+
+ if (count > sclp_vt220_space_left(request))
+ count = sclp_vt220_space_left(request);
+ if (count <= 0)
+ return 0;
+
+ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+ buffer = (void *) ((addr_t) sccb + sccb->header.length);
+
+ if (convertlf) {
+ /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
+ for (from=0, to=0;
+ (from < count) && (to < sclp_vt220_space_left(request));
+ from++) {
+ /* Retrieve character */
+ c = msg[from];
+ /* Perform conversion */
+ if (c == 0x0a) {
+ if (to + 1 < sclp_vt220_space_left(request)) {
+ ((unsigned char *) buffer)[to++] = c;
+ ((unsigned char *) buffer)[to++] = 0x0d;
+ } else
+ break;
+
+ } else
+ ((unsigned char *) buffer)[to++] = c;
+ }
+ sccb->header.length += to;
+ sccb->evbuf.length += to;
+ return from;
+ } else {
+ memcpy(buffer, (const void *) msg, count);
+ sccb->header.length += count;
+ sccb->evbuf.length += count;
+ return count;
+ }
+}
+
+/*
+ * Emit buffer after having waited long enough for more data to arrive.
+ */
+static void
+sclp_vt220_timeout(unsigned long data)
+{
+ sclp_vt220_emit_current();
+}
+
+#define BUFFER_MAX_DELAY HZ/2
+
+/*
+ * Internal implementation of the write function. Write COUNT bytes of data
+ * from memory at BUF
+ * to the SCLP interface. In case that the data does not fit into the current
+ * write buffer, emit the current one and allocate a new one. If there are no
+ * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
+ * is non-zero, the buffer will be scheduled for emitting after a timeout -
+ * otherwise the user has to explicitly call the flush function.
+ * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
+ * buffer should be converted to 0x0a 0x0d. After completion, return the number
+ * of bytes written.
+ */
+static int
+__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
+ int convertlf)
+{
+ unsigned long flags;
+ void *page;
+ int written;
+ int overall_written;
+
+ if (count <= 0)
+ return 0;
+ overall_written = 0;
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ do {
+ /* Create a sclp output buffer if none exists yet */
+ if (sclp_vt220_current_request == NULL) {
+ while (list_empty(&sclp_vt220_empty)) {
+ spin_unlock_irqrestore(&sclp_vt220_lock,
+ flags);
+ if (in_interrupt())
+ sclp_sync_wait();
+ else
+ wait_event(sclp_vt220_waitq,
+ !list_empty(&sclp_vt220_empty));
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ }
+ page = (void *) sclp_vt220_empty.next;
+ list_del((struct list_head *) page);
+ sclp_vt220_current_request =
+ sclp_vt220_initialize_page(page);
+ }
+ /* Try to write the string to the current request buffer */
+ written = sclp_vt220_add_msg(sclp_vt220_current_request,
+ buf, count, convertlf);
+ overall_written += written;
+ if (written == count)
+ break;
+ /*
+ * Not all characters could be written to the current
+ * output buffer. Emit the buffer, create a new buffer
+ * and then output the rest of the string.
+ */
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ sclp_vt220_emit_current();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ buf += written;
+ count -= written;
+ } while (count > 0);
+ /* Setup timer to output current console buffer after some time */
+ if (sclp_vt220_current_request != NULL &&
+ !timer_pending(&sclp_vt220_timer) && do_schedule) {
+ sclp_vt220_timer.function = sclp_vt220_timeout;
+ sclp_vt220_timer.data = 0UL;
+ sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
+ add_timer(&sclp_vt220_timer);
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device. The characters may come from
+ * user space or kernel space. This routine will return the
+ * number of characters actually accepted for writing.
+ */
+static int
+sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ return __sclp_vt220_write(buf, count, 1, 0);
+}
+
+#define SCLP_VT220_SESSION_ENDED 0x01
+#define SCLP_VT220_SESSION_STARTED 0x80
+#define SCLP_VT220_SESSION_DATA 0x00
+
+/*
+ * Called by the SCLP to report incoming event buffers.
+ */
+static void
+sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
+{
+ char *buffer;
+ unsigned int count;
+
+ /* Ignore input if device is not open */
+ if (sclp_vt220_tty == NULL)
+ return;
+
+ buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
+ count = evbuf->length - sizeof(struct evbuf_header);
+
+ switch (*buffer) {
+ case SCLP_VT220_SESSION_ENDED:
+ case SCLP_VT220_SESSION_STARTED:
+ break;
+ case SCLP_VT220_SESSION_DATA:
+ /* Send input to line discipline */
+ buffer++;
+ count--;
+ /* Prevent buffer overrun by discarding input. Note that
+ * because buffer_push works asynchronously, we cannot wait
+ * for the buffer to be emptied. */
+ if (count + sclp_vt220_tty->flip.count > TTY_FLIPBUF_SIZE)
+ count = TTY_FLIPBUF_SIZE - sclp_vt220_tty->flip.count;
+ memcpy(sclp_vt220_tty->flip.char_buf_ptr, buffer, count);
+ memset(sclp_vt220_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
+ sclp_vt220_tty->flip.char_buf_ptr += count;
+ sclp_vt220_tty->flip.flag_buf_ptr += count;
+ sclp_vt220_tty->flip.count += count;
+ tty_flip_buffer_push(sclp_vt220_tty);
+ break;
+ }
+}
+
+/*
+ * This routine is called when a particular tty device is opened.
+ */
+static int
+sclp_vt220_open(struct tty_struct *tty, struct file *filp)
+{
+ if (tty->count == 1) {
+ sclp_vt220_tty = tty;
+ tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
+ if (tty->driver_data == NULL)
+ return -ENOMEM;
+ tty->low_latency = 0;
+ }
+ return 0;
+}
+
+/*
+ * This routine is called when a particular tty device is closed.
+ */
+static void
+sclp_vt220_close(struct tty_struct *tty, struct file *filp)
+{
+ if (tty->count == 1) {
+ sclp_vt220_tty = NULL;
+ kfree(tty->driver_data);
+ tty->driver_data = NULL;
+ }
+}
+
+/*
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver.
+ *
+ * NOTE: include/linux/tty_driver.h specifies that a character should be
+ * ignored if there is no room in the queue. This driver implements a different
+ * semantic in that it will block when there is no more room left.
+ */
+static void
+sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ __sclp_vt220_write(&ch, 1, 0, 0);
+}
+
+/*
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ */
+static void
+sclp_vt220_flush_chars(struct tty_struct *tty)
+{
+ if (sclp_vt220_outqueue_count == 0)
+ sclp_vt220_emit_current();
+ else
+ sclp_vt220_flush_later = 1;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ */
+static int
+sclp_vt220_write_room(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct list_head *l;
+ int count;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ count = 0;
+ if (sclp_vt220_current_request != NULL)
+ count = sclp_vt220_space_left(sclp_vt220_current_request);
+ list_for_each(l, &sclp_vt220_empty)
+ count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ return count;
+}
+
+/*
+ * Return number of buffered chars.
+ */
+static int
+sclp_vt220_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct list_head *l;
+ struct sclp_vt220_request *r;
+ int count;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ count = 0;
+ if (sclp_vt220_current_request != NULL)
+ count = sclp_vt220_chars_stored(sclp_vt220_current_request);
+ list_for_each(l, &sclp_vt220_outqueue) {
+ r = list_entry(l, struct sclp_vt220_request, list);
+ count += sclp_vt220_chars_stored(r);
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ return count;
+}
+
+static void
+__sclp_vt220_flush_buffer(void)
+{
+ unsigned long flags;
+
+ sclp_vt220_emit_current();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ if (timer_pending(&sclp_vt220_timer))
+ del_timer(&sclp_vt220_timer);
+ while (sclp_vt220_outqueue_count > 0) {
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+/*
+ * Pass on all buffers to the hardware. Return only when there are no more
+ * buffers pending.
+ */
+static void
+sclp_vt220_flush_buffer(struct tty_struct *tty)
+{
+ sclp_vt220_emit_current();
+}
+
+/*
+ * Initialize all relevant components and register driver with system.
+ */
+static int
+__sclp_vt220_init(int early)
+{
+ void *page;
+ int i;
+
+ if (sclp_vt220_initialized)
+ return 0;
+ sclp_vt220_initialized = 1;
+ spin_lock_init(&sclp_vt220_lock);
+ INIT_LIST_HEAD(&sclp_vt220_empty);
+ INIT_LIST_HEAD(&sclp_vt220_outqueue);
+ init_waitqueue_head(&sclp_vt220_waitq);
+ init_timer(&sclp_vt220_timer);
+ sclp_vt220_current_request = NULL;
+ sclp_vt220_buffered_chars = 0;
+ sclp_vt220_outqueue_count = 0;
+ sclp_vt220_tty = NULL;
+ sclp_vt220_flush_later = 0;
+
+ /* Allocate pages for output buffering */
+ for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
+ if (early)
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
+ else
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
+ return -ENOMEM;
+ list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ }
+ return 0;
+}
+
+static struct tty_operations sclp_vt220_ops = {
+ .open = sclp_vt220_open,
+ .close = sclp_vt220_close,
+ .write = sclp_vt220_write,
+ .put_char = sclp_vt220_put_char,
+ .flush_chars = sclp_vt220_flush_chars,
+ .write_room = sclp_vt220_write_room,
+ .chars_in_buffer = sclp_vt220_chars_in_buffer,
+ .flush_buffer = sclp_vt220_flush_buffer
+};
+
+/*
+ * Register driver with SCLP and Linux and initialize internal tty structures.
+ */
+int __init
+sclp_vt220_tty_init(void)
+{
+ struct tty_driver *driver;
+ int rc;
+
+ /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
+ * symmetry between VM and LPAR systems regarding ttyS1. */
+ driver = alloc_tty_driver(1);
+ if (!driver)
+ return -ENOMEM;
+ rc = __sclp_vt220_init(0);
+ if (rc) {
+ put_tty_driver(driver);
+ return rc;
+ }
+ rc = sclp_register(&sclp_vt220_register);
+ if (rc) {
+ printk(KERN_ERR SCLP_VT220_PRINT_HEADER
+ "could not register tty - "
+ "sclp_register returned %d\n", rc);
+ put_tty_driver(driver);
+ return rc;
+ }
+
+ driver->owner = THIS_MODULE;
+ driver->driver_name = SCLP_VT220_DRIVER_NAME;
+ driver->name = SCLP_VT220_DEVICE_NAME;
+ driver->major = SCLP_VT220_MAJOR;
+ driver->minor_start = SCLP_VT220_MINOR;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_TTY;
+ driver->init_termios = tty_std_termios;
+ driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(driver, &sclp_vt220_ops);
+
+ rc = tty_register_driver(driver);
+ if (rc) {
+ printk(KERN_ERR SCLP_VT220_PRINT_HEADER
+ "could not register tty - "
+ "tty_register_driver returned %d\n", rc);
+ put_tty_driver(driver);
+ return rc;
+ }
+ sclp_vt220_driver = driver;
+ return 0;
+}
+
+module_init(sclp_vt220_tty_init);
+
+#ifdef CONFIG_SCLP_VT220_CONSOLE
+
+static void
+sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
+{
+ __sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
+}
+
+static struct tty_driver *
+sclp_vt220_con_device(struct console *c, int *index)
+{
+ *index = 0;
+ return sclp_vt220_driver;
+}
+
+/*
+ * This routine is called from panic when the kernel is going to give up.
+ * We have to make sure that all buffers will be flushed to the SCLP.
+ * Note that this function may be called from within an interrupt context.
+ */
+static void
+sclp_vt220_con_unblank(void)
+{
+ __sclp_vt220_flush_buffer();
+}
+
+/* Structure needed to register with printk */
+static struct console sclp_vt220_console =
+{
+ .name = SCLP_VT220_CONSOLE_NAME,
+ .write = sclp_vt220_con_write,
+ .device = sclp_vt220_con_device,
+ .unblank = sclp_vt220_con_unblank,
+ .flags = CON_PRINTBUFFER,
+ .index = SCLP_VT220_CONSOLE_INDEX
+};
+
+static int __init
+sclp_vt220_con_init(void)
+{
+ int rc;
+
+ if (!CONSOLE_IS_SCLP)
+ return 0;
+ rc = __sclp_vt220_init(1);
+ if (rc)
+ return rc;
+ /* Attach linux console */
+ register_console(&sclp_vt220_console);
+ return 0;
+}
+
+console_initcall(sclp_vt220_con_init);
+#endif /* CONFIG_SCLP_VT220_CONSOLE */
+
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
new file mode 100644
index 000000000000..d04e6c2c3cc1
--- /dev/null
+++ b/drivers/s390/char/tape.h
@@ -0,0 +1,384 @@
+/*
+ * drivers/s390/char/tape.h
+ * tape device driver for 3480/3490E/3590 tapes.
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_H
+#define _TAPE_H
+
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <linux/config.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct gendisk;
+
+/*
+ * Define DBF_LIKE_HELL for lots of messages in the debug feature.
+ */
+#define DBF_LIKE_HELL
+#ifdef DBF_LIKE_HELL
+#define DBF_LH(level, str, ...) \
+do { \
+ debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
+} while (0)
+#else
+#define DBF_LH(level, str, ...) do {} while(0)
+#endif
+
+/*
+ * macros s390 debug feature (dbf)
+ */
+#define DBF_EVENT(d_level, d_str...) \
+do { \
+ debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define DBF_EXCEPTION(d_level, d_str...) \
+do { \
+ debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define TAPE_VERSION_MAJOR 2
+#define TAPE_VERSION_MINOR 0
+#define TAPE_MAGIC "tape"
+
+#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
+#define TAPEBLOCK_HSEC_SIZE 2048
+#define TAPEBLOCK_HSEC_S2B 2
+#define TAPEBLOCK_RETRIES 5
+
+enum tape_medium_state {
+ MS_UNKNOWN,
+ MS_LOADED,
+ MS_UNLOADED,
+ MS_SIZE
+};
+
+enum tape_state {
+ TS_UNUSED=0,
+ TS_IN_USE,
+ TS_BLKUSE,
+ TS_INIT,
+ TS_NOT_OPER,
+ TS_SIZE
+};
+
+enum tape_op {
+ TO_BLOCK, /* Block read */
+ TO_BSB, /* Backward space block */
+ TO_BSF, /* Backward space filemark */
+ TO_DSE, /* Data security erase */
+ TO_FSB, /* Forward space block */
+ TO_FSF, /* Forward space filemark */
+ TO_LBL, /* Locate block label */
+ TO_NOP, /* No operation */
+ TO_RBA, /* Read backward */
+ TO_RBI, /* Read block information */
+ TO_RFO, /* Read forward */
+ TO_REW, /* Rewind tape */
+ TO_RUN, /* Rewind and unload tape */
+ TO_WRI, /* Write block */
+ TO_WTM, /* Write tape mark */
+ TO_MSEN, /* Medium sense */
+ TO_LOAD, /* Load tape */
+ TO_READ_CONFIG, /* Read configuration data */
+ TO_READ_ATTMSG, /* Read attention message */
+ TO_DIS, /* Tape display */
+ TO_ASSIGN, /* Assign tape to channel path */
+ TO_UNASSIGN, /* Unassign tape from channel path */
+ TO_SIZE /* #entries in tape_op_t */
+};
+
+/* Forward declaration */
+struct tape_device;
+
+/* tape_request->status can be: */
+enum tape_request_status {
+ TAPE_REQUEST_INIT, /* request is ready to be processed */
+ TAPE_REQUEST_QUEUED, /* request is queued to be processed */
+ TAPE_REQUEST_IN_IO, /* request is currently in IO */
+ TAPE_REQUEST_DONE, /* request is completed. */
+};
+
+/* Tape CCW request */
+struct tape_request {
+ struct list_head list; /* list head for request queueing. */
+ struct tape_device *device; /* tape device of this request */
+ struct ccw1 *cpaddr; /* address of the channel program. */
+ void *cpdata; /* pointer to ccw data. */
+ enum tape_request_status status;/* status of this request */
+ int options; /* options for execution. */
+ int retries; /* retry counter for error recovery. */
+ int rescnt; /* residual count from devstat. */
+
+ /* Callback for delivering final status. */
+ void (*callback)(struct tape_request *, void *);
+ void *callback_data;
+
+ enum tape_op op;
+ int rc;
+};
+
+/* Function type for magnetic tape commands */
+typedef int (*tape_mtop_fn)(struct tape_device *, int);
+
+/* Size of the array containing the mtops for a discipline */
+#define TAPE_NR_MTOPS (MTMKPART+1)
+
+/* Tape Discipline */
+struct tape_discipline {
+ struct module *owner;
+ int (*setup_device)(struct tape_device *);
+ void (*cleanup_device)(struct tape_device *);
+ int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
+ struct tape_request *(*read_block)(struct tape_device *, size_t);
+ struct tape_request *(*write_block)(struct tape_device *, size_t);
+ void (*process_eov)(struct tape_device*);
+#ifdef CONFIG_S390_TAPE_BLOCK
+ /* Block device stuff. */
+ struct tape_request *(*bread)(struct tape_device *, struct request *);
+ void (*check_locate)(struct tape_device *, struct tape_request *);
+ void (*free_bread)(struct tape_request *);
+#endif
+ /* ioctl function for additional ioctls. */
+ int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
+ /* Array of tape commands with TAPE_NR_MTOPS entries */
+ tape_mtop_fn *mtop_array;
+};
+
+/*
+ * The discipline irq function either returns an error code (<0) which
+ * means that the request has failed with an error or one of the following:
+ */
+#define TAPE_IO_SUCCESS 0 /* request successful */
+#define TAPE_IO_PENDING 1 /* request still running */
+#define TAPE_IO_RETRY 2 /* retry to current request */
+#define TAPE_IO_STOP 3 /* stop the running request */
+
+/* Char Frontend Data */
+struct tape_char_data {
+ struct idal_buffer *idal_buf; /* idal buffer for user char data */
+ int block_size; /* of size block_size. */
+};
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+/* Block Frontend Data */
+struct tape_blk_data
+{
+ /* Block device request queue. */
+ request_queue_t * request_queue;
+ spinlock_t request_queue_lock;
+
+ /* Task to move entries from block request to CCS request queue. */
+ struct work_struct requeue_task;
+ atomic_t requeue_scheduled;
+
+ /* Current position on the tape. */
+ long block_position;
+ int medium_changed;
+ struct gendisk * disk;
+};
+#endif
+
+/* Tape Info */
+struct tape_device {
+ /* entry in tape_device_list */
+ struct list_head node;
+
+ int cdev_id;
+ struct ccw_device * cdev;
+ struct tape_class_device * nt;
+ struct tape_class_device * rt;
+
+ /* Device discipline information. */
+ struct tape_discipline * discipline;
+ void * discdata;
+
+ /* Generic status flags */
+ long tape_generic_status;
+
+ /* Device state information. */
+ wait_queue_head_t state_change_wq;
+ enum tape_state tape_state;
+ enum tape_medium_state medium_state;
+ unsigned char * modeset_byte;
+
+ /* Reference count. */
+ atomic_t ref_count;
+
+ /* Request queue. */
+ struct list_head req_queue;
+
+ /* Each tape device has (currently) two minor numbers. */
+ int first_minor;
+
+ /* Number of tapemarks required for correct termination. */
+ int required_tapemarks;
+
+ /* Block ID of the BOF */
+ unsigned int bof;
+
+ /* Character device frontend data */
+ struct tape_char_data char_data;
+#ifdef CONFIG_S390_TAPE_BLOCK
+ /* Block dev frontend data */
+ struct tape_blk_data blk_data;
+#endif
+};
+
+/* Externals from tape_core.c */
+extern struct tape_request *tape_alloc_request(int cplength, int datasize);
+extern void tape_free_request(struct tape_request *);
+extern int tape_do_io(struct tape_device *, struct tape_request *);
+extern int tape_do_io_async(struct tape_device *, struct tape_request *);
+extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
+void tape_hotplug_event(struct tape_device *, int major, int action);
+
+static inline int
+tape_do_io_free(struct tape_device *device, struct tape_request *request)
+{
+ int rc;
+
+ rc = tape_do_io(device, request);
+ tape_free_request(request);
+ return rc;
+}
+
+extern int tape_oper_handler(int irq, int status);
+extern void tape_noper_handler(int irq, int status);
+extern int tape_open(struct tape_device *);
+extern int tape_release(struct tape_device *);
+extern int tape_mtop(struct tape_device *, int, int);
+extern void tape_state_set(struct tape_device *, enum tape_state);
+
+extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
+extern int tape_generic_offline(struct tape_device *device);
+
+/* Externals from tape_devmap.c */
+extern int tape_generic_probe(struct ccw_device *);
+extern void tape_generic_remove(struct ccw_device *);
+
+extern struct tape_device *tape_get_device(int devindex);
+extern struct tape_device *tape_get_device_reference(struct tape_device *);
+extern struct tape_device *tape_put_device(struct tape_device *);
+
+/* Externals from tape_char.c */
+extern int tapechar_init(void);
+extern void tapechar_exit(void);
+extern int tapechar_setup_device(struct tape_device *);
+extern void tapechar_cleanup_device(struct tape_device *);
+
+/* Externals from tape_block.c */
+#ifdef CONFIG_S390_TAPE_BLOCK
+extern int tapeblock_init (void);
+extern void tapeblock_exit(void);
+extern int tapeblock_setup_device(struct tape_device *);
+extern void tapeblock_cleanup_device(struct tape_device *);
+#else
+static inline int tapeblock_init (void) {return 0;}
+static inline void tapeblock_exit (void) {;}
+static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
+static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
+#endif
+
+/* tape initialisation functions */
+#ifdef CONFIG_PROC_FS
+extern void tape_proc_init (void);
+extern void tape_proc_cleanup (void);
+#else
+static inline void tape_proc_init (void) {;}
+static inline void tape_proc_cleanup (void) {;}
+#endif
+
+/* a function for dumping device sense info */
+extern void tape_dump_sense(struct tape_device *, struct tape_request *,
+ struct irb *);
+extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
+ struct irb *);
+
+/* functions for handling the status of a device */
+extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
+
+/* The debug area */
+extern debug_info_t *TAPE_DBF_AREA;
+
+/* functions for building ccws */
+static inline struct ccw1 *
+tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = memsize;
+ ccw->cda = (__u32)(addr_t) cda;
+ return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = 0;
+ ccw->count = memsize;
+ ccw->cda = (__u32)(addr_t) cda;
+ return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = 0;
+ ccw->count = 0;
+ ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
+{
+ while (count-- > 0) {
+ ccw->cmd_code = cmd_code;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = 0;
+ ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw++;
+ }
+ return ccw;
+}
+
+static inline struct ccw1 *
+tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = CCW_FLAG_CC;
+ idal_buffer_set_cda(idal, ccw);
+ return ccw++;
+}
+
+static inline struct ccw1 *
+tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = 0;
+ idal_buffer_set_cda(idal, ccw);
+ return ccw++;
+}
+
+/* Global vars */
+extern const char *tape_state_verbose[];
+extern const char *tape_op_verbose[];
+
+#endif /* for ifdef tape.h */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
new file mode 100644
index 000000000000..480ec87976fb
--- /dev/null
+++ b/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1385 @@
+/*
+ * drivers/s390/char/tape_34xx.c
+ * tape device discipline for 3480/3490 tapes.
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+
+#define TAPE_DBF_AREA tape_34xx_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define PRINTK_HEADER "TAPE_34XX: "
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+enum tape_34xx_type {
+ tape_3480,
+ tape_3490,
+};
+
+#define TAPE34XX_FMT_3480 0
+#define TAPE34XX_FMT_3480_2_XF 1
+#define TAPE34XX_FMT_3480_XF 2
+
+struct tape_34xx_block_id {
+ unsigned int wrap : 1;
+ unsigned int segment : 7;
+ unsigned int format : 2;
+ unsigned int block : 22;
+};
+
+/*
+ * A list of block ID's is used to faster seek blocks.
+ */
+struct tape_34xx_sbid {
+ struct list_head list;
+ struct tape_34xx_block_id bid;
+};
+
+static void tape_34xx_delete_sbid_from(struct tape_device *, int);
+
+/*
+ * Medium sense for 34xx tapes. There is no 'real' medium sense call.
+ * So we just do a normal sense.
+ */
+static int
+tape_34xx_medium_sense(struct tape_device *device)
+{
+ struct tape_request *request;
+ unsigned char *sense;
+ int rc;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return PTR_ERR(request);
+ }
+
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+
+ rc = tape_do_io_interruptible(device, request);
+ if (request->rc == 0) {
+ sense = request->cpdata;
+
+ /*
+ * This isn't quite correct. But since INTERVENTION_REQUIRED
+ * means that the drive is 'neither ready nor on-line' it is
+ * only slightly inaccurate to say there is no tape loaded if
+ * the drive isn't online...
+ */
+ if (sense[0] & SENSE_INTERVENTION_REQUIRED)
+ tape_med_state_set(device, MS_UNLOADED);
+ else
+ tape_med_state_set(device, MS_LOADED);
+
+ if (sense[1] & SENSE_WRITE_PROTECT)
+ device->tape_generic_status |= GMT_WR_PROT(~0);
+ else
+ device->tape_generic_status &= ~GMT_WR_PROT(~0);
+ } else {
+ DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
+ request->rc);
+ }
+ tape_free_request(request);
+
+ return rc;
+}
+
+/*
+ * These functions are currently used only to schedule a medium_sense for
+ * later execution. This is because we get an interrupt whenever a medium
+ * is inserted but cannot call tape_do_io* from an interrupt context.
+ * Maybe that's useful for other actions we want to start from the
+ * interrupt handler.
+ */
+static void
+tape_34xx_work_handler(void *data)
+{
+ struct {
+ struct tape_device *device;
+ enum tape_op op;
+ struct work_struct work;
+ } *p = data;
+
+ switch(p->op) {
+ case TO_MSEN:
+ tape_34xx_medium_sense(p->device);
+ break;
+ default:
+ DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+ }
+
+ p->device = tape_put_device(p->device);
+ kfree(p);
+}
+
+static int
+tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
+{
+ struct {
+ struct tape_device *device;
+ enum tape_op op;
+ struct work_struct work;
+ } *p;
+
+ if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+ return -ENOMEM;
+
+ memset(p, 0, sizeof(*p));
+ INIT_WORK(&p->work, tape_34xx_work_handler, p);
+
+ p->device = tape_get_device_reference(device);
+ p->op = op;
+
+ schedule_work(&p->work);
+ return 0;
+}
+
+/*
+ * Done Handler is called when dev stat = DEVICE-END (successful operation)
+ */
+static inline int
+tape_34xx_done(struct tape_request *request)
+{
+ DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+ switch (request->op) {
+ case TO_DSE:
+ case TO_RUN:
+ case TO_WRI:
+ case TO_WTM:
+ case TO_ASSIGN:
+ case TO_UNASSIGN:
+ tape_34xx_delete_sbid_from(request->device, 0);
+ break;
+ default:
+ ;
+ }
+ return TAPE_IO_SUCCESS;
+}
+
+static inline int
+tape_34xx_erp_failed(struct tape_request *request, int rc)
+{
+ DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
+ tape_op_verbose[request->op], rc);
+ return rc;
+}
+
+static inline int
+tape_34xx_erp_succeeded(struct tape_request *request)
+{
+ DBF_EVENT(3, "Error Recovery successful for %s\n",
+ tape_op_verbose[request->op]);
+ return tape_34xx_done(request);
+}
+
+static inline int
+tape_34xx_erp_retry(struct tape_request *request)
+{
+ DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
+ return TAPE_IO_RETRY;
+}
+
+/*
+ * This function is called, when no request is outstanding and we get an
+ * interrupt
+ */
+static int
+tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+ if (irb->scsw.dstat == 0x85 /* READY */) {
+ /* A medium was inserted in the drive. */
+ DBF_EVENT(6, "xuud med\n");
+ tape_34xx_delete_sbid_from(device, 0);
+ tape_34xx_schedule_work(device, TO_MSEN);
+ } else {
+ DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+ PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
+ tape_dump_sense(device, NULL, irb);
+ }
+ return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static int
+tape_34xx_erp_read_opposite(struct tape_device *device,
+ struct tape_request *request)
+{
+ if (request->op == TO_RFO) {
+ /*
+ * We did read forward, but the data could not be read
+ * *correctly*. We transform the request to a read backward
+ * and try again.
+ */
+ tape_std_read_backward(device, request);
+ return tape_34xx_erp_retry(request);
+ }
+ if (request->op != TO_RBA)
+ PRINT_ERR("read_opposite called with state:%s\n",
+ tape_op_verbose[request->op]);
+ /*
+ * We tried to read forward and backward, but hat no
+ * success -> failed.
+ */
+ return tape_34xx_erp_failed(request, -EIO);
+}
+
+static int
+tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
+ struct irb *irb, int no)
+{
+ if (request->op != TO_ASSIGN) {
+ PRINT_WARN("An unexpected condition #%d was caught in "
+ "tape error recovery.\n", no);
+ PRINT_WARN("Please report this incident.\n");
+ if (request)
+ PRINT_WARN("Operation of tape:%s\n",
+ tape_op_verbose[request->op]);
+ tape_dump_sense(device, request, irb);
+ }
+ return tape_34xx_erp_failed(request, -EIO);
+}
+
+/*
+ * Handle data overrun between cu and drive. The channel speed might
+ * be too slow.
+ */
+static int
+tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
+ struct irb *irb)
+{
+ if (irb->ecw[3] == 0x40) {
+ PRINT_WARN ("Data overrun error between control-unit "
+ "and drive. Use a faster channel connection, "
+ "if possible! \n");
+ return tape_34xx_erp_failed(request, -EIO);
+ }
+ return tape_34xx_erp_bug(device, request, irb, -1);
+}
+
+/*
+ * Handle record sequence error.
+ */
+static int
+tape_34xx_erp_sequence(struct tape_device *device,
+ struct tape_request *request, struct irb *irb)
+{
+ if (irb->ecw[3] == 0x41) {
+ /*
+ * cu detected incorrect block-id sequence on tape.
+ */
+ PRINT_WARN("Illegal block-id sequence found!\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ }
+ /*
+ * Record sequence error bit is set, but erpa does not
+ * show record sequence error.
+ */
+ return tape_34xx_erp_bug(device, request, irb, -2);
+}
+
+/*
+ * This function analyses the tape's sense-data in case of a unit-check.
+ * If possible, it tries to recover from the error. Else the user is
+ * informed about the problem.
+ */
+static int
+tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
+ struct irb *irb)
+{
+ int inhibit_cu_recovery;
+ __u8* sense;
+
+ inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
+ sense = irb->ecw;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+ if (request->op == TO_BLOCK) {
+ /*
+ * Recovery for block device requests. Set the block_position
+ * to something invalid and retry.
+ */
+ device->blk_data.block_position = -1;
+ if (request->retries-- <= 0)
+ return tape_34xx_erp_failed(request, -EIO);
+ else
+ return tape_34xx_erp_retry(request);
+ }
+#endif
+
+ if (
+ sense[0] & SENSE_COMMAND_REJECT &&
+ sense[1] & SENSE_WRITE_PROTECT
+ ) {
+ if (
+ request->op == TO_DSE ||
+ request->op == TO_WRI ||
+ request->op == TO_WTM
+ ) {
+ /* medium is write protected */
+ return tape_34xx_erp_failed(request, -EACCES);
+ } else {
+ return tape_34xx_erp_bug(device, request, irb, -3);
+ }
+ }
+
+ /*
+ * Special cases for various tape-states when reaching
+ * end of recorded area
+ *
+ * FIXME: Maybe a special case of the special case:
+ * sense[0] == SENSE_EQUIPMENT_CHECK &&
+ * sense[1] == SENSE_DRIVE_ONLINE &&
+ * sense[3] == 0x47 (Volume Fenced)
+ *
+ * This was caused by continued FSF or FSR after an
+ * 'End Of Data'.
+ */
+ if ((
+ sense[0] == SENSE_DATA_CHECK ||
+ sense[0] == SENSE_EQUIPMENT_CHECK ||
+ sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+ ) && (
+ sense[1] == SENSE_DRIVE_ONLINE ||
+ sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+ )) {
+ switch (request->op) {
+ /*
+ * sense[0] == SENSE_DATA_CHECK &&
+ * sense[1] == SENSE_DRIVE_ONLINE
+ * sense[3] == 0x36 (End Of Data)
+ *
+ * Further seeks might return a 'Volume Fenced'.
+ */
+ case TO_FSF:
+ case TO_FSB:
+ /* Trying to seek beyond end of recorded area */
+ return tape_34xx_erp_failed(request, -ENOSPC);
+ case TO_BSB:
+ return tape_34xx_erp_retry(request);
+
+ /*
+ * sense[0] == SENSE_DATA_CHECK &&
+ * sense[1] == SENSE_DRIVE_ONLINE &&
+ * sense[3] == 0x36 (End Of Data)
+ */
+ case TO_LBL:
+ /* Block could not be located. */
+ tape_34xx_delete_sbid_from(device, 0);
+ return tape_34xx_erp_failed(request, -EIO);
+
+ case TO_RFO:
+ /* Read beyond end of recorded area -> 0 bytes read */
+ return tape_34xx_erp_failed(request, 0);
+
+ /*
+ * sense[0] == SENSE_EQUIPMENT_CHECK &&
+ * sense[1] == SENSE_DRIVE_ONLINE &&
+ * sense[3] == 0x38 (Physical End Of Volume)
+ */
+ case TO_WRI:
+ /* Writing at physical end of volume */
+ return tape_34xx_erp_failed(request, -ENOSPC);
+ default:
+ PRINT_ERR("Invalid op in %s:%i\n",
+ __FUNCTION__, __LINE__);
+ return tape_34xx_erp_failed(request, 0);
+ }
+ }
+
+ /* Sensing special bits */
+ if (sense[0] & SENSE_BUS_OUT_CHECK)
+ return tape_34xx_erp_retry(request);
+
+ if (sense[0] & SENSE_DATA_CHECK) {
+ /*
+ * hardware failure, damaged tape or improper
+ * operating conditions
+ */
+ switch (sense[3]) {
+ case 0x23:
+ /* a read data check occurred */
+ if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+ inhibit_cu_recovery)
+ // data check is not permanent, may be
+ // recovered. We always use async-mode with
+ // cu-recovery, so this should *never* happen.
+ return tape_34xx_erp_bug(device, request,
+ irb, -4);
+
+ /* data check is permanent, CU recovery has failed */
+ PRINT_WARN("Permanent read error\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x25:
+ // a write data check occurred
+ if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+ inhibit_cu_recovery)
+ // data check is not permanent, may be
+ // recovered. We always use async-mode with
+ // cu-recovery, so this should *never* happen.
+ return tape_34xx_erp_bug(device, request,
+ irb, -5);
+
+ // data check is permanent, cu-recovery has failed
+ PRINT_WARN("Permanent write error\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x26:
+ /* Data Check (read opposite) occurred. */
+ return tape_34xx_erp_read_opposite(device, request);
+ case 0x28:
+ /* ID-Mark at tape start couldn't be written */
+ PRINT_WARN("ID-Mark could not be written.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x31:
+ /* Tape void. Tried to read beyond end of device. */
+ PRINT_WARN("Read beyond end of recorded area.\n");
+ return tape_34xx_erp_failed(request, -ENOSPC);
+ case 0x41:
+ /* Record sequence error. */
+ PRINT_WARN("Invalid block-id sequence found.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ default:
+ /* all data checks for 3480 should result in one of
+ * the above erpa-codes. For 3490, other data-check
+ * conditions do exist. */
+ if (device->cdev->id.driver_info == tape_3480)
+ return tape_34xx_erp_bug(device, request,
+ irb, -6);
+ }
+ }
+
+ if (sense[0] & SENSE_OVERRUN)
+ return tape_34xx_erp_overrun(device, request, irb);
+
+ if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
+ return tape_34xx_erp_sequence(device, request, irb);
+
+ /* Sensing erpa codes */
+ switch (sense[3]) {
+ case 0x00:
+ /* Unit check with erpa code 0. Report and ignore. */
+ PRINT_WARN("Non-error sense was found. "
+ "Unit-check will be ignored.\n");
+ return TAPE_IO_SUCCESS;
+ case 0x21:
+ /*
+ * Data streaming not operational. CU will switch to
+ * interlock mode. Reissue the command.
+ */
+ PRINT_WARN("Data streaming not operational. "
+ "Switching to interlock-mode.\n");
+ return tape_34xx_erp_retry(request);
+ case 0x22:
+ /*
+ * Path equipment check. Might be drive adapter error, buffer
+ * error on the lower interface, internal path not usable,
+ * or error during cartridge load.
+ */
+ PRINT_WARN("A path equipment check occurred. One of the "
+ "following conditions occurred:\n");
+ PRINT_WARN("drive adapter error, buffer error on the lower "
+ "interface, internal path not usable, error "
+ "during cartridge load.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x24:
+ /*
+ * Load display check. Load display was command was issued,
+ * but the drive is displaying a drive check message. Can
+ * be threated as "device end".
+ */
+ return tape_34xx_erp_succeeded(request);
+ case 0x27:
+ /*
+ * Command reject. May indicate illegal channel program or
+ * buffer over/underrun. Since all channel programs are
+ * issued by this driver and ought be correct, we assume a
+ * over/underrun situation and retry the channel program.
+ */
+ return tape_34xx_erp_retry(request);
+ case 0x29:
+ /*
+ * Function incompatible. Either the tape is idrc compressed
+ * but the hardware isn't capable to do idrc, or a perform
+ * subsystem func is issued and the CU is not on-line.
+ */
+ PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x2a:
+ /*
+ * Unsolicited environmental data. An internal counter
+ * overflows, we can ignore this and reissue the cmd.
+ */
+ return tape_34xx_erp_retry(request);
+ case 0x2b:
+ /*
+ * Environmental data present. Indicates either unload
+ * completed ok or read buffered log command completed ok.
+ */
+ if (request->op == TO_RUN) {
+ /* Rewind unload completed ok. */
+ tape_med_state_set(device, MS_UNLOADED);
+ return tape_34xx_erp_succeeded(request);
+ }
+ /* tape_34xx doesn't use read buffered log commands. */
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x2c:
+ /*
+ * Permanent equipment check. CU has tried recovery, but
+ * did not succeed.
+ */
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x2d:
+ /* Data security erase failure. */
+ if (request->op == TO_DSE)
+ return tape_34xx_erp_failed(request, -EIO);
+ /* Data security erase failure, but no such command issued. */
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x2e:
+ /*
+ * Not capable. This indicates either that the drive fails
+ * reading the format id mark or that that format specified
+ * is not supported by the drive.
+ */
+ PRINT_WARN("Drive not capable processing the tape format!\n");
+ return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+ case 0x30:
+ /* The medium is write protected. */
+ PRINT_WARN("Medium is write protected!\n");
+ return tape_34xx_erp_failed(request, -EACCES);
+ case 0x32:
+ // Tension loss. We cannot recover this, it's an I/O error.
+ PRINT_WARN("The drive lost tape tension.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x33:
+ /*
+ * Load Failure. The cartridge was not inserted correctly or
+ * the tape is not threaded correctly.
+ */
+ PRINT_WARN("Cartridge load failure. Reload the cartridge "
+ "and try again.\n");
+ tape_34xx_delete_sbid_from(device, 0);
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x34:
+ /*
+ * Unload failure. The drive cannot maintain tape tension
+ * and control tape movement during an unload operation.
+ */
+ PRINT_WARN("Failure during cartridge unload. "
+ "Please try manually.\n");
+ if (request->op == TO_RUN)
+ return tape_34xx_erp_failed(request, -EIO);
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x35:
+ /*
+ * Drive equipment check. One of the following:
+ * - cu cannot recover from a drive detected error
+ * - a check code message is shown on drive display
+ * - the cartridge loader does not respond correctly
+ * - a failure occurs during an index, load, or unload cycle
+ */
+ PRINT_WARN("Equipment check! Please check the drive and "
+ "the cartridge loader.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x36:
+ if (device->cdev->id.driver_info == tape_3490)
+ /* End of data. */
+ return tape_34xx_erp_failed(request, -EIO);
+ /* This erpa is reserved for 3480 */
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x37:
+ /*
+ * Tape length error. The tape is shorter than reported in
+ * the beginning-of-tape data.
+ */
+ PRINT_WARN("Tape length error.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x38:
+ /*
+ * Physical end of tape. A read/write operation reached
+ * the physical end of tape.
+ */
+ if (request->op==TO_WRI ||
+ request->op==TO_DSE ||
+ request->op==TO_WTM)
+ return tape_34xx_erp_failed(request, -ENOSPC);
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x39:
+ /* Backward at Beginning of tape. */
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x3a:
+ /* Drive switched to not ready. */
+ PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
+ "to ready position and try again.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x3b:
+ /* Manual rewind or unload. This causes an I/O error. */
+ PRINT_WARN("Medium was rewound or unloaded manually.\n");
+ tape_34xx_delete_sbid_from(device, 0);
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x42:
+ /*
+ * Degraded mode. A condition that can cause degraded
+ * performance is detected.
+ */
+ PRINT_WARN("Subsystem is running in degraded mode.\n");
+ return tape_34xx_erp_retry(request);
+ case 0x43:
+ /* Drive not ready. */
+ tape_34xx_delete_sbid_from(device, 0);
+ tape_med_state_set(device, MS_UNLOADED);
+ /* Some commands commands are successful even in this case */
+ if (sense[1] & SENSE_DRIVE_ONLINE) {
+ switch(request->op) {
+ case TO_ASSIGN:
+ case TO_UNASSIGN:
+ case TO_DIS:
+ case TO_NOP:
+ return tape_34xx_done(request);
+ break;
+ default:
+ break;
+ }
+ }
+ PRINT_WARN("The drive is not ready.\n");
+ return tape_34xx_erp_failed(request, -ENOMEDIUM);
+ case 0x44:
+ /* Locate Block unsuccessful. */
+ if (request->op != TO_BLOCK && request->op != TO_LBL)
+ /* No locate block was issued. */
+ return tape_34xx_erp_bug(device, request,
+ irb, sense[3]);
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x45:
+ /* The drive is assigned to a different channel path. */
+ PRINT_WARN("The drive is assigned elsewhere.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x46:
+ /*
+ * Drive not on-line. Drive may be switched offline,
+ * the power supply may be switched off or
+ * the drive address may not be set correctly.
+ */
+ PRINT_WARN("The drive is not on-line.");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x47:
+ /* Volume fenced. CU reports volume integrity is lost. */
+ PRINT_WARN("Volume fenced. The volume integrity is lost.\n");
+ tape_34xx_delete_sbid_from(device, 0);
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x48:
+ /* Log sense data and retry request. */
+ return tape_34xx_erp_retry(request);
+ case 0x49:
+ /* Bus out check. A parity check error on the bus was found. */
+ PRINT_WARN("Bus out check. A data transfer over the bus "
+ "has been corrupted.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x4a:
+ /* Control unit erp failed. */
+ PRINT_WARN("The control unit I/O error recovery failed.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x4b:
+ /*
+ * CU and drive incompatible. The drive requests micro-program
+ * patches, which are not available on the CU.
+ */
+ PRINT_WARN("The drive needs microprogram patches from the "
+ "control unit, which are not available.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x4c:
+ /*
+ * Recovered Check-One failure. Cu develops a hardware error,
+ * but is able to recover.
+ */
+ return tape_34xx_erp_retry(request);
+ case 0x4d:
+ if (device->cdev->id.driver_info == tape_3490)
+ /*
+ * Resetting event received. Since the driver does
+ * not support resetting event recovery (which has to
+ * be handled by the I/O Layer), retry our command.
+ */
+ return tape_34xx_erp_retry(request);
+ /* This erpa is reserved for 3480. */
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x4e:
+ if (device->cdev->id.driver_info == tape_3490) {
+ /*
+ * Maximum block size exceeded. This indicates, that
+ * the block to be written is larger than allowed for
+ * buffered mode.
+ */
+ PRINT_WARN("Maximum block size for buffered "
+ "mode exceeded.\n");
+ return tape_34xx_erp_failed(request, -ENOBUFS);
+ }
+ /* This erpa is reserved for 3480. */
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x50:
+ /*
+ * Read buffered log (Overflow). CU is running in extended
+ * buffered log mode, and a counter overflows. This should
+ * never happen, since we're never running in extended
+ * buffered log mode.
+ */
+ return tape_34xx_erp_retry(request);
+ case 0x51:
+ /*
+ * Read buffered log (EOV). EOF processing occurs while the
+ * CU is in extended buffered log mode. This should never
+ * happen, since we're never running in extended buffered
+ * log mode.
+ */
+ return tape_34xx_erp_retry(request);
+ case 0x52:
+ /* End of Volume complete. Rewind unload completed ok. */
+ if (request->op == TO_RUN) {
+ tape_med_state_set(device, MS_UNLOADED);
+ tape_34xx_delete_sbid_from(device, 0);
+ return tape_34xx_erp_succeeded(request);
+ }
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ case 0x53:
+ /* Global command intercept. */
+ return tape_34xx_erp_retry(request);
+ case 0x54:
+ /* Channel interface recovery (temporary). */
+ return tape_34xx_erp_retry(request);
+ case 0x55:
+ /* Channel interface recovery (permanent). */
+ PRINT_WARN("A permanent channel interface error occurred.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x56:
+ /* Channel protocol error. */
+ PRINT_WARN("A channel protocol error occurred.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x57:
+ if (device->cdev->id.driver_info == tape_3480) {
+ /* Attention intercept. */
+ PRINT_WARN("An attention intercept occurred, "
+ "which will be recovered.\n");
+ return tape_34xx_erp_retry(request);
+ } else {
+ /* Global status intercept. */
+ PRINT_WARN("An global status intercept was received, "
+ "which will be recovered.\n");
+ return tape_34xx_erp_retry(request);
+ }
+ case 0x5a:
+ /*
+ * Tape length incompatible. The tape inserted is too long,
+ * which could cause damage to the tape or the drive.
+ */
+ PRINT_WARN("Tape Length Incompatible\n");
+ PRINT_WARN("Tape length exceeds IBM enhanced capacity "
+ "cartdridge length or a medium\n");
+ PRINT_WARN("with EC-CST identification mark has been mounted "
+ "in a device that writes\n");
+ PRINT_WARN("3480 or 3480 XF format.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x5b:
+ /* Format 3480 XF incompatible */
+ if (sense[1] & SENSE_BEGINNING_OF_TAPE)
+ /* The tape will get overwritten. */
+ return tape_34xx_erp_retry(request);
+ PRINT_WARN("Format 3480 XF Incompatible\n");
+ PRINT_WARN("Medium has been created in 3480 format. "
+ "To change the format writes\n");
+ PRINT_WARN("must be issued at BOT.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x5c:
+ /* Format 3480-2 XF incompatible */
+ PRINT_WARN("Format 3480-2 XF Incompatible\n");
+ PRINT_WARN("Device can only read 3480 or 3480 XF format.\n");
+ return tape_34xx_erp_failed(request, -EIO);
+ case 0x5d:
+ /* Tape length violation. */
+ PRINT_WARN("Tape Length Violation\n");
+ PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity "
+ "Cartdridge System Tape length.\n");
+ PRINT_WARN("This may cause damage to the drive or tape when "
+ "processing to the EOV\n");
+ return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+ case 0x5e:
+ /* Compaction algorithm incompatible. */
+ PRINT_WARN("Compaction Algorithm Incompatible\n");
+ PRINT_WARN("The volume is recorded using an incompatible "
+ "compaction algorithm,\n");
+ PRINT_WARN("which is not supported by the device.\n");
+ return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+
+ /* The following erpas should have been covered earlier. */
+ case 0x23: /* Read data check. */
+ case 0x25: /* Write data check. */
+ case 0x26: /* Data check (read opposite). */
+ case 0x28: /* Write id mark check. */
+ case 0x31: /* Tape void. */
+ case 0x40: /* Overrun error. */
+ case 0x41: /* Record sequence error. */
+ /* All other erpas are reserved for future use. */
+ default:
+ return tape_34xx_erp_bug(device, request, irb, sense[3]);
+ }
+}
+
+/*
+ * 3480/3490 interrupt handler
+ */
+static int
+tape_34xx_irq(struct tape_device *device, struct tape_request *request,
+ struct irb *irb)
+{
+ if (request == NULL)
+ return tape_34xx_unsolicited_irq(device, irb);
+
+ if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
+ (irb->scsw.dstat & DEV_STAT_DEV_END) &&
+ (request->op == TO_WRI)) {
+ /* Write at end of volume */
+ PRINT_INFO("End of volume\n"); /* XXX */
+ return tape_34xx_erp_failed(request, -ENOSPC);
+ }
+
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
+ return tape_34xx_unit_check(device, request, irb);
+
+ if (irb->scsw.dstat & DEV_STAT_DEV_END) {
+ /*
+ * A unit exception occurs on skipping over a tapemark block.
+ */
+ if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
+ if (request->op == TO_BSB || request->op == TO_FSB)
+ request->rescnt++;
+ else
+ DBF_EVENT(5, "Unit Exception!\n");
+ }
+ return tape_34xx_done(request);
+ }
+
+ DBF_EVENT(6, "xunknownirq\n");
+ PRINT_ERR("Unexpected interrupt.\n");
+ PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
+ tape_dump_sense(device, request, irb);
+ return TAPE_IO_STOP;
+}
+
+/*
+ * ioctl_overload
+ */
+static int
+tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == TAPE390_DISPLAY) {
+ struct display_struct disp;
+
+ if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
+ return -EFAULT;
+
+ return tape_std_display(device, &disp);
+ } else
+ return -EINVAL;
+}
+
+static inline void
+tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
+{
+ struct tape_34xx_sbid * new_sbid;
+
+ new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
+ if (!new_sbid)
+ return;
+
+ new_sbid->bid = bid;
+ list_add(&new_sbid->list, l);
+}
+
+/*
+ * Build up the search block ID list. The block ID consists of a logical
+ * block number and a hardware specific part. The hardware specific part
+ * helps the tape drive to speed up searching for a specific block.
+ */
+static void
+tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
+{
+ struct list_head * sbid_list;
+ struct tape_34xx_sbid * sbid;
+ struct list_head * l;
+
+ /*
+ * immediately return if there is no list at all or the block to add
+ * is located in segment 1 of wrap 0 because this position is used
+ * if no hardware position data is supplied.
+ */
+ sbid_list = (struct list_head *) device->discdata;
+ if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
+ return;
+
+ /*
+ * Search the position where to insert the new entry. Hardware
+ * acceleration uses only the segment and wrap number. So we
+ * need only one entry for a specific wrap/segment combination.
+ * If there is a block with a lower number but the same hard-
+ * ware position data we just update the block number in the
+ * existing entry.
+ */
+ list_for_each(l, sbid_list) {
+ sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+ if (
+ (sbid->bid.segment == bid.segment) &&
+ (sbid->bid.wrap == bid.wrap)
+ ) {
+ if (bid.block < sbid->bid.block)
+ sbid->bid = bid;
+ else return;
+ break;
+ }
+
+ /* Sort in according to logical block number. */
+ if (bid.block < sbid->bid.block) {
+ tape_34xx_append_new_sbid(bid, l->prev);
+ break;
+ }
+ }
+ /* List empty or new block bigger than last entry. */
+ if (l == sbid_list)
+ tape_34xx_append_new_sbid(bid, l->prev);
+
+ DBF_LH(4, "Current list is:\n");
+ list_for_each(l, sbid_list) {
+ sbid = list_entry(l, struct tape_34xx_sbid, list);
+ DBF_LH(4, "%d:%03d@%05d\n",
+ sbid->bid.wrap,
+ sbid->bid.segment,
+ sbid->bid.block
+ );
+ }
+}
+
+/*
+ * Delete all entries from the search block ID list that belong to tape blocks
+ * equal or higher than the given number.
+ */
+static void
+tape_34xx_delete_sbid_from(struct tape_device *device, int from)
+{
+ struct list_head * sbid_list;
+ struct tape_34xx_sbid * sbid;
+ struct list_head * l;
+ struct list_head * n;
+
+ sbid_list = (struct list_head *) device->discdata;
+ if (!sbid_list)
+ return;
+
+ list_for_each_safe(l, n, sbid_list) {
+ sbid = list_entry(l, struct tape_34xx_sbid, list);
+ if (sbid->bid.block >= from) {
+ DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
+ sbid->bid.wrap,
+ sbid->bid.segment,
+ sbid->bid.block
+ );
+ list_del(l);
+ kfree(sbid);
+ }
+ }
+}
+
+/*
+ * Merge hardware position data into a block id.
+ */
+static void
+tape_34xx_merge_sbid(
+ struct tape_device * device,
+ struct tape_34xx_block_id * bid
+) {
+ struct tape_34xx_sbid * sbid;
+ struct tape_34xx_sbid * sbid_to_use;
+ struct list_head * sbid_list;
+ struct list_head * l;
+
+ sbid_list = (struct list_head *) device->discdata;
+ bid->wrap = 0;
+ bid->segment = 1;
+
+ if (!sbid_list || list_empty(sbid_list))
+ return;
+
+ sbid_to_use = NULL;
+ list_for_each(l, sbid_list) {
+ sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+ if (sbid->bid.block >= bid->block)
+ break;
+ sbid_to_use = sbid;
+ }
+ if (sbid_to_use) {
+ bid->wrap = sbid_to_use->bid.wrap;
+ bid->segment = sbid_to_use->bid.segment;
+ DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
+ sbid_to_use->bid.wrap,
+ sbid_to_use->bid.segment,
+ sbid_to_use->bid.block,
+ bid->block
+ );
+ }
+}
+
+static int
+tape_34xx_setup_device(struct tape_device * device)
+{
+ int rc;
+ struct list_head * discdata;
+
+ DBF_EVENT(6, "34xx device setup\n");
+ if ((rc = tape_std_assign(device)) == 0) {
+ if ((rc = tape_34xx_medium_sense(device)) != 0) {
+ DBF_LH(3, "34xx medium sense returned %d\n", rc);
+ }
+ }
+ discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (discdata) {
+ INIT_LIST_HEAD(discdata);
+ device->discdata = discdata;
+ }
+
+ return rc;
+}
+
+static void
+tape_34xx_cleanup_device(struct tape_device *device)
+{
+ tape_std_unassign(device);
+
+ if (device->discdata) {
+ tape_34xx_delete_sbid_from(device, 0);
+ kfree(device->discdata);
+ device->discdata = NULL;
+ }
+}
+
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_34xx_mttell(struct tape_device *device, int mt_count)
+{
+ struct {
+ struct tape_34xx_block_id cbid;
+ struct tape_34xx_block_id dbid;
+ } __attribute__ ((packed)) block_id;
+ int rc;
+
+ rc = tape_std_read_block_id(device, (__u64 *) &block_id);
+ if (rc)
+ return rc;
+
+ tape_34xx_add_sbid(device, block_id.cbid);
+ return block_id.cbid.block;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_34xx_mtseek(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct tape_34xx_block_id * bid;
+
+ if (mt_count > 0x3fffff) {
+ DBF_EXCEPTION(6, "xsee parm\n");
+ return -EINVAL;
+ }
+ request = tape_alloc_request(3, 4);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+
+ /* setup ccws */
+ request->op = TO_LBL;
+ bid = (struct tape_34xx_block_id *) request->cpdata;
+ bid->format = (*device->modeset_byte & 0x08) ?
+ TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
+ bid->block = mt_count;
+ tape_34xx_merge_sbid(device, bid);
+
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+/*
+ * Tape block read for 34xx.
+ */
+static struct tape_request *
+tape_34xx_bread(struct tape_device *device, struct request *req)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+ int count = 0, i;
+ unsigned off;
+ char *dst;
+ struct bio_vec *bv;
+ struct bio *bio;
+ struct tape_34xx_block_id * start_block;
+
+ DBF_EVENT(6, "xBREDid:");
+
+ /* Count the number of blocks for the request. */
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+ }
+ }
+
+ /* Allocate the ccw request. */
+ request = tape_alloc_request(3+count+1, 8);
+ if (IS_ERR(request))
+ return request;
+
+ /* Setup ccws. */
+ request->op = TO_BLOCK;
+ start_block = (struct tape_34xx_block_id *) request->cpdata;
+ start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ DBF_EVENT(6, "start_block = %i\n", start_block->block);
+
+ ccw = request->cpaddr;
+ ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
+
+ /*
+ * We always setup a nop after the mode set ccw. This slot is
+ * used in tape_std_check_locate to insert a locate ccw if the
+ * current tape position doesn't match the start block to be read.
+ * The second nop will be filled with a read block id which is in
+ * turn used by tape_34xx_free_bread to populate the segment bid
+ * table.
+ */
+ ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
+ ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
+
+ rq_for_each_bio(bio, req) {
+ bio_for_each_segment(bv, bio, i) {
+ dst = kmap(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len;
+ off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void*) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
+ }
+ }
+ }
+
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+ DBF_EVENT(6, "xBREDccwg\n");
+ return request;
+}
+
+static void
+tape_34xx_free_bread (struct tape_request *request)
+{
+ struct ccw1* ccw;
+
+ ccw = request->cpaddr;
+ if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
+ struct {
+ struct tape_34xx_block_id cbid;
+ struct tape_34xx_block_id dbid;
+ } __attribute__ ((packed)) *rbi_data;
+
+ rbi_data = request->cpdata;
+
+ if (request->device)
+ tape_34xx_add_sbid(request->device, rbi_data->cbid);
+ }
+
+ /* Last ccw is a nop and doesn't need clear_normalized_cda */
+ for (; ccw->flags & CCW_FLAG_CC; ccw++)
+ if (ccw->cmd_code == READ_FORWARD)
+ clear_normalized_cda(ccw);
+ tape_free_request(request);
+}
+
+/*
+ * check_locate is called just before the tape request is passed to
+ * the common io layer for execution. It has to check the current
+ * tape position and insert a locate ccw if it doesn't match the
+ * start block for the request.
+ */
+static void
+tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
+{
+ struct tape_34xx_block_id * start_block;
+
+ start_block = (struct tape_34xx_block_id *) request->cpdata;
+ if (start_block->block == device->blk_data.block_position)
+ return;
+
+ DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
+ start_block->wrap = 0;
+ start_block->segment = 1;
+ start_block->format = (*device->modeset_byte & 0x08) ?
+ TAPE34XX_FMT_3480_XF :
+ TAPE34XX_FMT_3480;
+ start_block->block = start_block->block + device->bof;
+ tape_34xx_merge_sbid(device, start_block);
+ tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+ tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
+}
+#endif
+
+/*
+ * List of 3480/3490 magnetic tape commands.
+ */
+static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
+ [MTRESET] = tape_std_mtreset,
+ [MTFSF] = tape_std_mtfsf,
+ [MTBSF] = tape_std_mtbsf,
+ [MTFSR] = tape_std_mtfsr,
+ [MTBSR] = tape_std_mtbsr,
+ [MTWEOF] = tape_std_mtweof,
+ [MTREW] = tape_std_mtrew,
+ [MTOFFL] = tape_std_mtoffl,
+ [MTNOP] = tape_std_mtnop,
+ [MTRETEN] = tape_std_mtreten,
+ [MTBSFM] = tape_std_mtbsfm,
+ [MTFSFM] = tape_std_mtfsfm,
+ [MTEOM] = tape_std_mteom,
+ [MTERASE] = tape_std_mterase,
+ [MTRAS1] = NULL,
+ [MTRAS2] = NULL,
+ [MTRAS3] = NULL,
+ [MTSETBLK] = tape_std_mtsetblk,
+ [MTSETDENSITY] = NULL,
+ [MTSEEK] = tape_34xx_mtseek,
+ [MTTELL] = tape_34xx_mttell,
+ [MTSETDRVBUFFER] = NULL,
+ [MTFSS] = NULL,
+ [MTBSS] = NULL,
+ [MTWSM] = NULL,
+ [MTLOCK] = NULL,
+ [MTUNLOCK] = NULL,
+ [MTLOAD] = tape_std_mtload,
+ [MTUNLOAD] = tape_std_mtunload,
+ [MTCOMPRESSION] = tape_std_mtcompression,
+ [MTSETPART] = NULL,
+ [MTMKPART] = NULL
+};
+
+/*
+ * Tape discipline structure for 3480 and 3490.
+ */
+static struct tape_discipline tape_discipline_34xx = {
+ .owner = THIS_MODULE,
+ .setup_device = tape_34xx_setup_device,
+ .cleanup_device = tape_34xx_cleanup_device,
+ .process_eov = tape_std_process_eov,
+ .irq = tape_34xx_irq,
+ .read_block = tape_std_read_block,
+ .write_block = tape_std_write_block,
+#ifdef CONFIG_S390_TAPE_BLOCK
+ .bread = tape_34xx_bread,
+ .free_bread = tape_34xx_free_bread,
+ .check_locate = tape_34xx_check_locate,
+#endif
+ .ioctl_fn = tape_34xx_ioctl,
+ .mtop_array = tape_34xx_mtop
+};
+
+static struct ccw_device_id tape_34xx_ids[] = {
+ { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), driver_info: tape_3480},
+ { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), driver_info: tape_3490},
+ { /* end of list */ }
+};
+
+static int
+tape_34xx_online(struct ccw_device *cdev)
+{
+ return tape_generic_online(
+ cdev->dev.driver_data,
+ &tape_discipline_34xx
+ );
+}
+
+static int
+tape_34xx_offline(struct ccw_device *cdev)
+{
+ return tape_generic_offline(cdev->dev.driver_data);
+}
+
+static struct ccw_driver tape_34xx_driver = {
+ .name = "tape_34xx",
+ .owner = THIS_MODULE,
+ .ids = tape_34xx_ids,
+ .probe = tape_generic_probe,
+ .remove = tape_generic_remove,
+ .set_online = tape_34xx_online,
+ .set_offline = tape_34xx_offline,
+};
+
+static int
+tape_34xx_init (void)
+{
+ int rc;
+
+ TAPE_DBF_AREA = debug_register ( "tape_34xx", 1, 2, 4*sizeof(long));
+ debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+ debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+ DBF_EVENT(3, "34xx init: $Revision: 1.21 $\n");
+ /* Register driver for 3480/3490 tapes. */
+ rc = ccw_driver_register(&tape_34xx_driver);
+ if (rc)
+ DBF_EVENT(3, "34xx init failed\n");
+ else
+ DBF_EVENT(3, "34xx registered\n");
+ return rc;
+}
+
+static void
+tape_34xx_exit(void)
+{
+ ccw_driver_unregister(&tape_34xx_driver);
+
+ debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
+MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape "
+ "device driver ($Revision: 1.21 $)");
+MODULE_LICENSE("GPL");
+
+module_init(tape_34xx_init);
+module_exit(tape_34xx_exit);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
new file mode 100644
index 000000000000..1efc9f21229e
--- /dev/null
+++ b/drivers/s390/char/tape_block.c
@@ -0,0 +1,492 @@
+/*
+ * drivers/s390/char/tape_block.c
+ * block device frontend for tape device driver
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Stefan Bader <shbader@de.ibm.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/buffer_head.h>
+
+#include <asm/debug.h>
+
+#define TAPE_DBF_AREA tape_core_dbf
+
+#include "tape.h"
+
+#define PRINTK_HEADER "TAPE_BLOCK: "
+
+#define TAPEBLOCK_MAX_SEC 100
+#define TAPEBLOCK_MIN_REQUEUE 3
+
+/*
+ * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
+ *
+ * In 2.5/2.6 the block device request function is very likely to be called
+ * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
+ * just call any function that tries to allocate CCW requests from that con-
+ * text since it might sleep. There are two choices to work around this:
+ * a) do not allocate with kmalloc but use its own memory pool
+ * b) take requests from the queue outside that context, knowing that
+ * allocation might sleep
+ */
+
+/*
+ * file operation structure for tape block frontend
+ */
+static int tapeblock_open(struct inode *, struct file *);
+static int tapeblock_release(struct inode *, struct file *);
+static int tapeblock_ioctl(struct inode *, struct file *, unsigned int,
+ unsigned long);
+static int tapeblock_medium_changed(struct gendisk *);
+static int tapeblock_revalidate_disk(struct gendisk *);
+
+static struct block_device_operations tapeblock_fops = {
+ .owner = THIS_MODULE,
+ .open = tapeblock_open,
+ .release = tapeblock_release,
+ .ioctl = tapeblock_ioctl,
+ .media_changed = tapeblock_medium_changed,
+ .revalidate_disk = tapeblock_revalidate_disk,
+};
+
+static int tapeblock_major = 0;
+
+static void
+tapeblock_trigger_requeue(struct tape_device *device)
+{
+ /* Protect against rescheduling. */
+ if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled))
+ return;
+ schedule_work(&device->blk_data.requeue_task);
+}
+
+/*
+ * Post finished request.
+ */
+static inline void
+tapeblock_end_request(struct request *req, int uptodate)
+{
+ if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+ BUG();
+ end_that_request_last(req);
+}
+
+static void
+__tapeblock_end_request(struct tape_request *ccw_req, void *data)
+{
+ struct tape_device *device;
+ struct request *req;
+
+ DBF_LH(6, "__tapeblock_end_request()\n");
+
+ device = ccw_req->device;
+ req = (struct request *) data;
+ tapeblock_end_request(req, ccw_req->rc == 0);
+ if (ccw_req->rc == 0)
+ /* Update position. */
+ device->blk_data.block_position =
+ (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
+ else
+ /* We lost the position information due to an error. */
+ device->blk_data.block_position = -1;
+ device->discipline->free_bread(ccw_req);
+ if (!list_empty(&device->req_queue) ||
+ elv_next_request(device->blk_data.request_queue))
+ tapeblock_trigger_requeue(device);
+}
+
+/*
+ * Feed the tape device CCW queue with requests supplied in a list.
+ */
+static inline int
+tapeblock_start_request(struct tape_device *device, struct request *req)
+{
+ struct tape_request * ccw_req;
+ int rc;
+
+ DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
+
+ ccw_req = device->discipline->bread(device, req);
+ if (IS_ERR(ccw_req)) {
+ DBF_EVENT(1, "TBLOCK: bread failed\n");
+ tapeblock_end_request(req, 0);
+ return PTR_ERR(ccw_req);
+ }
+ ccw_req->callback = __tapeblock_end_request;
+ ccw_req->callback_data = (void *) req;
+ ccw_req->retries = TAPEBLOCK_RETRIES;
+
+ rc = tape_do_io_async(device, ccw_req);
+ if (rc) {
+ /*
+ * Start/enqueueing failed. No retries in
+ * this case.
+ */
+ tapeblock_end_request(req, 0);
+ device->discipline->free_bread(ccw_req);
+ }
+
+ return rc;
+}
+
+/*
+ * Move requests from the block device request queue to the tape device ccw
+ * queue.
+ */
+static void
+tapeblock_requeue(void *data) {
+ struct tape_device * device;
+ request_queue_t * queue;
+ int nr_queued;
+ struct request * req;
+ struct list_head * l;
+ int rc;
+
+ device = (struct tape_device *) data;
+ if (!device)
+ return;
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ queue = device->blk_data.request_queue;
+
+ /* Count number of requests on ccw queue. */
+ nr_queued = 0;
+ list_for_each(l, &device->req_queue)
+ nr_queued++;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
+ spin_lock(&device->blk_data.request_queue_lock);
+ while (
+ !blk_queue_plugged(queue) &&
+ elv_next_request(queue) &&
+ nr_queued < TAPEBLOCK_MIN_REQUEUE
+ ) {
+ req = elv_next_request(queue);
+ if (rq_data_dir(req) == WRITE) {
+ DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
+ blkdev_dequeue_request(req);
+ tapeblock_end_request(req, 0);
+ continue;
+ }
+ spin_unlock_irq(&device->blk_data.request_queue_lock);
+ rc = tapeblock_start_request(device, req);
+ spin_lock_irq(&device->blk_data.request_queue_lock);
+ blkdev_dequeue_request(req);
+ nr_queued++;
+ }
+ spin_unlock_irq(&device->blk_data.request_queue_lock);
+ atomic_set(&device->blk_data.requeue_scheduled, 0);
+}
+
+/*
+ * Tape request queue function. Called from ll_rw_blk.c
+ */
+static void
+tapeblock_request_fn(request_queue_t *queue)
+{
+ struct tape_device *device;
+
+ device = (struct tape_device *) queue->queuedata;
+ DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
+ if (device == NULL)
+ BUG();
+
+ tapeblock_trigger_requeue(device);
+}
+
+/*
+ * This function is called for every new tapedevice
+ */
+int
+tapeblock_setup_device(struct tape_device * device)
+{
+ struct tape_blk_data * blkdat;
+ struct gendisk * disk;
+ int rc;
+
+ blkdat = &device->blk_data;
+ spin_lock_init(&blkdat->request_queue_lock);
+ atomic_set(&blkdat->requeue_scheduled, 0);
+
+ blkdat->request_queue = blk_init_queue(
+ tapeblock_request_fn,
+ &blkdat->request_queue_lock
+ );
+ if (!blkdat->request_queue)
+ return -ENOMEM;
+
+ elevator_exit(blkdat->request_queue->elevator);
+ rc = elevator_init(blkdat->request_queue, "noop");
+ if (rc)
+ goto cleanup_queue;
+
+ blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
+ blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
+ blk_queue_max_phys_segments(blkdat->request_queue, -1L);
+ blk_queue_max_hw_segments(blkdat->request_queue, -1L);
+ blk_queue_max_segment_size(blkdat->request_queue, -1L);
+ blk_queue_segment_boundary(blkdat->request_queue, -1L);
+
+ disk = alloc_disk(1);
+ if (!disk) {
+ rc = -ENOMEM;
+ goto cleanup_queue;
+ }
+
+ disk->major = tapeblock_major;
+ disk->first_minor = device->first_minor;
+ disk->fops = &tapeblock_fops;
+ disk->private_data = tape_get_device_reference(device);
+ disk->queue = blkdat->request_queue;
+ set_capacity(disk, 0);
+ sprintf(disk->disk_name, "btibm%d",
+ device->first_minor / TAPE_MINORS_PER_DEV);
+
+ blkdat->disk = disk;
+ blkdat->medium_changed = 1;
+ blkdat->request_queue->queuedata = tape_get_device_reference(device);
+
+ add_disk(disk);
+
+ INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
+ tape_get_device_reference(device));
+
+ return 0;
+
+cleanup_queue:
+ blk_cleanup_queue(blkdat->request_queue);
+ blkdat->request_queue = NULL;
+
+ return rc;
+}
+
+void
+tapeblock_cleanup_device(struct tape_device *device)
+{
+ flush_scheduled_work();
+ device->blk_data.requeue_task.data = tape_put_device(device);
+
+ if (!device->blk_data.disk) {
+ PRINT_ERR("(%s): No gendisk to clean up!\n",
+ device->cdev->dev.bus_id);
+ goto cleanup_queue;
+ }
+
+ del_gendisk(device->blk_data.disk);
+ device->blk_data.disk->private_data =
+ tape_put_device(device->blk_data.disk->private_data);
+ put_disk(device->blk_data.disk);
+
+ device->blk_data.disk = NULL;
+cleanup_queue:
+ device->blk_data.request_queue->queuedata = tape_put_device(device);
+
+ blk_cleanup_queue(device->blk_data.request_queue);
+ device->blk_data.request_queue = NULL;
+}
+
+/*
+ * Detect number of blocks of the tape.
+ * FIXME: can we extent this to detect the blocks size as well ?
+ */
+static int
+tapeblock_revalidate_disk(struct gendisk *disk)
+{
+ struct tape_device * device;
+ unsigned int nr_of_blks;
+ int rc;
+
+ device = (struct tape_device *) disk->private_data;
+ if (!device)
+ BUG();
+
+ if (!device->blk_data.medium_changed)
+ return 0;
+
+ PRINT_INFO("Detecting media size...\n");
+ rc = tape_mtop(device, MTFSFM, 1);
+ if (rc)
+ return rc;
+
+ rc = tape_mtop(device, MTTELL, 1);
+ if (rc < 0)
+ return rc;
+
+ DBF_LH(3, "Image file ends at %d\n", rc);
+ nr_of_blks = rc;
+
+ /* This will fail for the first file. Catch the error by checking the
+ * position. */
+ tape_mtop(device, MTBSF, 1);
+
+ rc = tape_mtop(device, MTTELL, 1);
+ if (rc < 0)
+ return rc;
+
+ if (rc > nr_of_blks)
+ return -EINVAL;
+
+ DBF_LH(3, "Image file starts at %d\n", rc);
+ device->bof = rc;
+ nr_of_blks -= rc;
+
+ PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
+ set_capacity(device->blk_data.disk,
+ nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
+
+ device->blk_data.block_position = 0;
+ device->blk_data.medium_changed = 0;
+ return 0;
+}
+
+static int
+tapeblock_medium_changed(struct gendisk *disk)
+{
+ struct tape_device *device;
+
+ device = (struct tape_device *) disk->private_data;
+ DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
+ device, device->blk_data.medium_changed);
+
+ return device->blk_data.medium_changed;
+}
+
+/*
+ * Block frontend tape device open function.
+ */
+static int
+tapeblock_open(struct inode *inode, struct file *filp)
+{
+ struct gendisk * disk;
+ struct tape_device * device;
+ int rc;
+
+ disk = inode->i_bdev->bd_disk;
+ device = tape_get_device_reference(disk->private_data);
+
+ if (device->required_tapemarks) {
+ DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
+ PRINT_ERR("TBLOCK: Refusing to open tape with missing"
+ " end of file marks.\n");
+ rc = -EPERM;
+ goto put_device;
+ }
+
+ rc = tape_open(device);
+ if (rc)
+ goto put_device;
+
+ rc = tapeblock_revalidate_disk(disk);
+ if (rc)
+ goto release;
+
+ /*
+ * Note: The reference to <device> is hold until the release function
+ * is called.
+ */
+ tape_state_set(device, TS_BLKUSE);
+ return 0;
+
+release:
+ tape_release(device);
+ put_device:
+ tape_put_device(device);
+ return rc;
+}
+
+/*
+ * Block frontend tape device release function.
+ *
+ * Note: One reference to the tape device was made by the open function. So
+ * we just get the pointer here and release the reference.
+ */
+static int
+tapeblock_release(struct inode *inode, struct file *filp)
+{
+ struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct tape_device *device = disk->private_data;
+
+ tape_state_set(device, TS_IN_USE);
+ tape_release(device);
+ tape_put_device(device);
+
+ return 0;
+}
+
+/*
+ * Support of some generic block device IOCTLs.
+ */
+static int
+tapeblock_ioctl(
+ struct inode * inode,
+ struct file * file,
+ unsigned int command,
+ unsigned long arg
+) {
+ int rc;
+ int minor;
+ struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct tape_device *device = disk->private_data;
+
+ rc = 0;
+ disk = inode->i_bdev->bd_disk;
+ if (!disk)
+ BUG();
+ device = disk->private_data;
+ if (!device)
+ BUG();
+ minor = iminor(inode);
+
+ DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
+ DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
+
+ switch (command) {
+ /* Refuse some IOCTL calls without complaining (mount). */
+ case 0x5310: /* CDROMMULTISESSION */
+ rc = -EINVAL;
+ break;
+ default:
+ PRINT_WARN("invalid ioctl 0x%x\n", command);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Initialize block device frontend.
+ */
+int
+tapeblock_init(void)
+{
+ int rc;
+
+ /* Register the tape major number to the kernel */
+ rc = register_blkdev(tapeblock_major, "tBLK");
+ if (rc < 0)
+ return rc;
+
+ if (tapeblock_major == 0)
+ tapeblock_major = rc;
+ PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
+ return 0;
+}
+
+/*
+ * Deregister major for block device frontend
+ */
+void
+tapeblock_exit(void)
+{
+ unregister_blkdev(tapeblock_major, "tBLK");
+}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
new file mode 100644
index 000000000000..86262a13f7c6
--- /dev/null
+++ b/drivers/s390/char/tape_char.c
@@ -0,0 +1,492 @@
+/*
+ * drivers/s390/char/tape_char.c
+ * character device frontend for tape device driver
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/mtio.h>
+
+#include <asm/uaccess.h>
+
+#define TAPE_DBF_AREA tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_class.h"
+
+#define PRINTK_HEADER "TAPE_CHAR: "
+
+#define TAPECHAR_MAJOR 0 /* get dynamic major */
+
+/*
+ * file operation structure for tape character frontend
+ */
+static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
+static int tapechar_open(struct inode *,struct file *);
+static int tapechar_release(struct inode *,struct file *);
+static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
+ unsigned long);
+
+static struct file_operations tape_fops =
+{
+ .owner = THIS_MODULE,
+ .read = tapechar_read,
+ .write = tapechar_write,
+ .ioctl = tapechar_ioctl,
+ .open = tapechar_open,
+ .release = tapechar_release,
+};
+
+static int tapechar_major = TAPECHAR_MAJOR;
+
+/*
+ * This function is called for every new tapedevice
+ */
+int
+tapechar_setup_device(struct tape_device * device)
+{
+ char device_name[20];
+
+ sprintf(device_name, "ntibm%i", device->first_minor / 2);
+ device->nt = register_tape_dev(
+ &device->cdev->dev,
+ MKDEV(tapechar_major, device->first_minor),
+ &tape_fops,
+ device_name,
+ "non-rewinding"
+ );
+ device_name[0] = 'r';
+ device->rt = register_tape_dev(
+ &device->cdev->dev,
+ MKDEV(tapechar_major, device->first_minor + 1),
+ &tape_fops,
+ device_name,
+ "rewinding"
+ );
+
+ return 0;
+}
+
+void
+tapechar_cleanup_device(struct tape_device *device)
+{
+ unregister_tape_dev(device->rt);
+ device->rt = NULL;
+ unregister_tape_dev(device->nt);
+ device->nt = NULL;
+}
+
+/*
+ * Terminate write command (we write two TMs and skip backward over last)
+ * This ensures that the tape is always correctly terminated.
+ * When the user writes afterwards a new file, he will overwrite the
+ * second TM and therefore one TM will remain to separate the
+ * two files on the tape...
+ */
+static inline void
+tapechar_terminate_write(struct tape_device *device)
+{
+ if (tape_mtop(device, MTWEOF, 1) == 0 &&
+ tape_mtop(device, MTWEOF, 1) == 0)
+ tape_mtop(device, MTBSR, 1);
+}
+
+static inline int
+tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
+{
+ struct idal_buffer *new;
+
+ if (device->char_data.idal_buf != NULL &&
+ device->char_data.idal_buf->size == block_size)
+ return 0;
+
+ if (block_size > MAX_BLOCKSIZE) {
+ DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
+ block_size, MAX_BLOCKSIZE);
+ PRINT_ERR("Invalid blocksize (%zd> %d)\n",
+ block_size, MAX_BLOCKSIZE);
+ return -EINVAL;
+ }
+
+ /* The current idal buffer is not correct. Allocate a new one. */
+ new = idal_buffer_alloc(block_size, 0);
+ if (new == NULL)
+ return -ENOMEM;
+
+ if (device->char_data.idal_buf != NULL)
+ idal_buffer_free(device->char_data.idal_buf);
+
+ device->char_data.idal_buf = new;
+
+ return 0;
+}
+
+/*
+ * Tape device read function
+ */
+ssize_t
+tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+{
+ struct tape_device *device;
+ struct tape_request *request;
+ size_t block_size;
+ int rc;
+
+ DBF_EVENT(6, "TCHAR:read\n");
+ device = (struct tape_device *) filp->private_data;
+
+ /*
+ * If the tape isn't terminated yet, do it now. And since we then
+ * are at the end of the tape there wouldn't be anything to read
+ * anyways. So we return immediatly.
+ */
+ if(device->required_tapemarks) {
+ return tape_std_terminate_write(device);
+ }
+
+ /* Find out block size to use */
+ if (device->char_data.block_size != 0) {
+ if (count < device->char_data.block_size) {
+ DBF_EVENT(3, "TCHAR:read smaller than block "
+ "size was requested\n");
+ return -EINVAL;
+ }
+ block_size = device->char_data.block_size;
+ } else {
+ block_size = count;
+ }
+
+ rc = tapechar_check_idalbuffer(device, block_size);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+ /* Changes position. */
+ device->blk_data.medium_changed = 1;
+#endif
+
+ DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
+ /* Let the discipline build the ccw chain. */
+ request = device->discipline->read_block(device, block_size);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ /* Execute it. */
+ rc = tape_do_io(device, request);
+ if (rc == 0) {
+ rc = block_size - request->rescnt;
+ DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
+ filp->f_pos += rc;
+ /* Copy data from idal buffer to user space. */
+ if (idal_buffer_to_user(device->char_data.idal_buf,
+ data, rc) != 0)
+ rc = -EFAULT;
+ }
+ tape_free_request(request);
+ return rc;
+}
+
+/*
+ * Tape device write function
+ */
+ssize_t
+tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
+{
+ struct tape_device *device;
+ struct tape_request *request;
+ size_t block_size;
+ size_t written;
+ int nblocks;
+ int i, rc;
+
+ DBF_EVENT(6, "TCHAR:write\n");
+ device = (struct tape_device *) filp->private_data;
+ /* Find out block size and number of blocks */
+ if (device->char_data.block_size != 0) {
+ if (count < device->char_data.block_size) {
+ DBF_EVENT(3, "TCHAR:write smaller than block "
+ "size was requested\n");
+ return -EINVAL;
+ }
+ block_size = device->char_data.block_size;
+ nblocks = count / block_size;
+ } else {
+ block_size = count;
+ nblocks = 1;
+ }
+
+ rc = tapechar_check_idalbuffer(device, block_size);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+ /* Changes position. */
+ device->blk_data.medium_changed = 1;
+#endif
+
+ DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
+ DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
+ /* Let the discipline build the ccw chain. */
+ request = device->discipline->write_block(device, block_size);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ rc = 0;
+ written = 0;
+ for (i = 0; i < nblocks; i++) {
+ /* Copy data from user space to idal buffer. */
+ if (idal_buffer_from_user(device->char_data.idal_buf,
+ data, block_size)) {
+ rc = -EFAULT;
+ break;
+ }
+ rc = tape_do_io(device, request);
+ if (rc)
+ break;
+ DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
+ block_size - request->rescnt);
+ filp->f_pos += block_size - request->rescnt;
+ written += block_size - request->rescnt;
+ if (request->rescnt != 0)
+ break;
+ data += block_size;
+ }
+ tape_free_request(request);
+ if (rc == -ENOSPC) {
+ /*
+ * Ok, the device has no more space. It has NOT written
+ * the block.
+ */
+ if (device->discipline->process_eov)
+ device->discipline->process_eov(device);
+ if (written > 0)
+ rc = 0;
+
+ }
+
+ /*
+ * After doing a write we always need two tapemarks to correctly
+ * terminate the tape (one to terminate the file, the second to
+ * flag the end of recorded data.
+ * Since process_eov positions the tape in front of the written
+ * tapemark it doesn't hurt to write two marks again.
+ */
+ if (!rc)
+ device->required_tapemarks = 2;
+
+ return rc ? rc : written;
+}
+
+/*
+ * Character frontend tape device open function.
+ */
+int
+tapechar_open (struct inode *inode, struct file *filp)
+{
+ struct tape_device *device;
+ int minor, rc;
+
+ DBF_EVENT(6, "TCHAR:open: %i:%i\n",
+ imajor(filp->f_dentry->d_inode),
+ iminor(filp->f_dentry->d_inode));
+
+ if (imajor(filp->f_dentry->d_inode) != tapechar_major)
+ return -ENODEV;
+
+ minor = iminor(filp->f_dentry->d_inode);
+ device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
+ if (IS_ERR(device)) {
+ DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
+ return PTR_ERR(device);
+ }
+
+
+ rc = tape_open(device);
+ if (rc == 0) {
+ filp->private_data = device;
+ return nonseekable_open(inode, filp);
+ }
+ tape_put_device(device);
+
+ return rc;
+}
+
+/*
+ * Character frontend tape device release function.
+ */
+
+int
+tapechar_release(struct inode *inode, struct file *filp)
+{
+ struct tape_device *device;
+
+ DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
+ device = (struct tape_device *) filp->private_data;
+
+ /*
+ * If this is the rewinding tape minor then rewind. In that case we
+ * write all required tapemarks. Otherwise only one to terminate the
+ * file.
+ */
+ if ((iminor(inode) & 1) != 0) {
+ if (device->required_tapemarks)
+ tape_std_terminate_write(device);
+ tape_mtop(device, MTREW, 1);
+ } else {
+ if (device->required_tapemarks > 1) {
+ if (tape_mtop(device, MTWEOF, 1) == 0)
+ device->required_tapemarks--;
+ }
+ }
+
+ if (device->char_data.idal_buf != NULL) {
+ idal_buffer_free(device->char_data.idal_buf);
+ device->char_data.idal_buf = NULL;
+ }
+ tape_release(device);
+ filp->private_data = tape_put_device(device);
+
+ return 0;
+}
+
+/*
+ * Tape device io controls.
+ */
+static int
+tapechar_ioctl(struct inode *inp, struct file *filp,
+ unsigned int no, unsigned long data)
+{
+ struct tape_device *device;
+ int rc;
+
+ DBF_EVENT(6, "TCHAR:ioct\n");
+
+ device = (struct tape_device *) filp->private_data;
+
+ if (no == MTIOCTOP) {
+ struct mtop op;
+
+ if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+ return -EFAULT;
+ if (op.mt_count < 0)
+ return -EINVAL;
+
+ /*
+ * Operations that change tape position should write final
+ * tapemarks.
+ */
+ switch (op.mt_op) {
+ case MTFSF:
+ case MTBSF:
+ case MTFSR:
+ case MTBSR:
+ case MTREW:
+ case MTOFFL:
+ case MTEOM:
+ case MTRETEN:
+ case MTBSFM:
+ case MTFSFM:
+ case MTSEEK:
+#ifdef CONFIG_S390_TAPE_BLOCK
+ device->blk_data.medium_changed = 1;
+#endif
+ if (device->required_tapemarks)
+ tape_std_terminate_write(device);
+ default:
+ ;
+ }
+ rc = tape_mtop(device, op.mt_op, op.mt_count);
+
+ if (op.mt_op == MTWEOF && rc == 0) {
+ if (op.mt_count > device->required_tapemarks)
+ device->required_tapemarks = 0;
+ else
+ device->required_tapemarks -= op.mt_count;
+ }
+ return rc;
+ }
+ if (no == MTIOCPOS) {
+ /* MTIOCPOS: query the tape position. */
+ struct mtpos pos;
+
+ rc = tape_mtop(device, MTTELL, 1);
+ if (rc < 0)
+ return rc;
+ pos.mt_blkno = rc;
+ if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
+ return -EFAULT;
+ return 0;
+ }
+ if (no == MTIOCGET) {
+ /* MTIOCGET: query the tape drive status. */
+ struct mtget get;
+
+ memset(&get, 0, sizeof(get));
+ get.mt_type = MT_ISUNKNOWN;
+ get.mt_resid = 0 /* device->devstat.rescnt */;
+ get.mt_dsreg = device->tape_state;
+ /* FIXME: mt_gstat, mt_erreg, mt_fileno */
+ get.mt_gstat = 0;
+ get.mt_erreg = 0;
+ get.mt_fileno = 0;
+ get.mt_gstat = device->tape_generic_status;
+
+ if (device->medium_state == MS_LOADED) {
+ rc = tape_mtop(device, MTTELL, 1);
+
+ if (rc < 0)
+ return rc;
+
+ if (rc == 0)
+ get.mt_gstat |= GMT_BOT(~0);
+
+ get.mt_blkno = rc;
+ }
+
+ if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
+ return -EFAULT;
+
+ return 0;
+ }
+ /* Try the discipline ioctl function. */
+ if (device->discipline->ioctl_fn == NULL)
+ return -EINVAL;
+ return device->discipline->ioctl_fn(device, no, data);
+}
+
+/*
+ * Initialize character device frontend.
+ */
+int
+tapechar_init (void)
+{
+ dev_t dev;
+
+ if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
+ return -1;
+
+ tapechar_major = MAJOR(dev);
+ PRINT_INFO("tape gets major %d for character devices\n", MAJOR(dev));
+
+ return 0;
+}
+
+/*
+ * cleanup
+ */
+void
+tapechar_exit(void)
+{
+ PRINT_INFO("tape releases major %d for character devices\n",
+ tapechar_major);
+ unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
+}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
new file mode 100644
index 000000000000..0f8ffd4167ca
--- /dev/null
+++ b/drivers/s390/char/tape_class.c
@@ -0,0 +1,126 @@
+/*
+ * (C) Copyright IBM Corp. 2004
+ * tape_class.c ($Revision: 1.8 $)
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+#include "tape_class.h"
+
+MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
+MODULE_DESCRIPTION(
+ "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
+ "tape_class.c ($Revision: 1.8 $)"
+);
+MODULE_LICENSE("GPL");
+
+struct class_simple *tape_class;
+
+/*
+ * Register a tape device and return a pointer to the cdev structure.
+ *
+ * device
+ * The pointer to the struct device of the physical (base) device.
+ * drivername
+ * The pointer to the drivers name for it's character devices.
+ * dev
+ * The intended major/minor number. The major number may be 0 to
+ * get a dynamic major number.
+ * fops
+ * The pointer to the drivers file operations for the tape device.
+ * devname
+ * The pointer to the name of the character device.
+ */
+struct tape_class_device *register_tape_dev(
+ struct device * device,
+ dev_t dev,
+ struct file_operations *fops,
+ char * device_name,
+ char * mode_name)
+{
+ struct tape_class_device * tcd;
+ int rc;
+ char * s;
+
+ tcd = kmalloc(sizeof(struct tape_class_device), GFP_KERNEL);
+ if (!tcd)
+ return ERR_PTR(-ENOMEM);
+
+ memset(tcd, 0, sizeof(struct tape_class_device));
+ strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
+ *s = '!';
+ strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
+ *s = '!';
+
+ tcd->char_device = cdev_alloc();
+ if (!tcd->char_device) {
+ rc = -ENOMEM;
+ goto fail_with_tcd;
+ }
+
+ tcd->char_device->owner = fops->owner;
+ tcd->char_device->ops = fops;
+ tcd->char_device->dev = dev;
+
+ rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+ if (rc)
+ goto fail_with_cdev;
+
+ tcd->class_device = class_simple_device_add(
+ tape_class,
+ tcd->char_device->dev,
+ device,
+ "%s", tcd->device_name
+ );
+ sysfs_create_link(
+ &device->kobj,
+ &tcd->class_device->kobj,
+ tcd->mode_name
+ );
+
+ return tcd;
+
+fail_with_cdev:
+ cdev_del(tcd->char_device);
+
+fail_with_tcd:
+ kfree(tcd);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(register_tape_dev);
+
+void unregister_tape_dev(struct tape_class_device *tcd)
+{
+ if (tcd != NULL && !IS_ERR(tcd)) {
+ sysfs_remove_link(
+ &tcd->class_device->dev->kobj,
+ tcd->mode_name
+ );
+ class_simple_device_remove(tcd->char_device->dev);
+ cdev_del(tcd->char_device);
+ kfree(tcd);
+ }
+}
+EXPORT_SYMBOL(unregister_tape_dev);
+
+
+static int __init tape_init(void)
+{
+ tape_class = class_simple_create(THIS_MODULE, "tape390");
+
+ return 0;
+}
+
+static void __exit tape_exit(void)
+{
+ class_simple_destroy(tape_class);
+ tape_class = NULL;
+}
+
+postcore_initcall(tape_init);
+module_exit(tape_exit);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
new file mode 100644
index 000000000000..33133ad00ba2
--- /dev/null
+++ b/drivers/s390/char/tape_class.h
@@ -0,0 +1,61 @@
+/*
+ * (C) Copyright IBM Corp. 2004 All Rights Reserved.
+ * tape_class.h ($Revision: 1.4 $)
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+#ifndef __TAPE_CLASS_H__
+#define __TAPE_CLASS_H__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/kobject.h>
+#include <linux/kobj_map.h>
+#include <linux/cdev.h>
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+#define TAPECLASS_NAME_LEN 32
+
+struct tape_class_device {
+ struct cdev * char_device;
+ struct class_device * class_device;
+ char device_name[TAPECLASS_NAME_LEN];
+ char mode_name[TAPECLASS_NAME_LEN];
+};
+
+/*
+ * Register a tape device and return a pointer to the tape class device
+ * created by the call.
+ *
+ * device
+ * The pointer to the struct device of the physical (base) device.
+ * dev
+ * The intended major/minor number. The major number may be 0 to
+ * get a dynamic major number.
+ * fops
+ * The pointer to the drivers file operations for the tape device.
+ * device_name
+ * Pointer to the logical device name (will also be used as kobject name
+ * of the cdev). This can also be called the name of the tape class
+ * device.
+ * mode_name
+ * Points to the name of the tape mode. This creates a link with that
+ * name from the physical device to the logical device (class).
+ */
+struct tape_class_device *register_tape_dev(
+ struct device * device,
+ dev_t dev,
+ struct file_operations *fops,
+ char * device_name,
+ char * node_name
+);
+void unregister_tape_dev(struct tape_class_device *tcd);
+
+#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
new file mode 100644
index 000000000000..e51046ab8adc
--- /dev/null
+++ b/drivers/s390/char/tape_core.c
@@ -0,0 +1,1242 @@
+/*
+ * drivers/s390/char/tape_core.c
+ * basic function of the tape device driver
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h> // for kernel parameters
+#include <linux/kmod.h> // for requesting modules
+#include <linux/spinlock.h> // for locks
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+
+#include <asm/types.h> // for variable types
+
+#define TAPE_DBF_AREA tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define PRINTK_HEADER "TAPE_CORE: "
+
+static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
+static void __tape_remove_request(struct tape_device *, struct tape_request *);
+
+/*
+ * One list to contain all tape devices of all disciplines, so
+ * we can assign the devices to minor numbers of the same major
+ * The list is protected by the rwlock
+ */
+static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
+static DEFINE_RWLOCK(tape_device_lock);
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*
+ * Printable strings for tape enumerations.
+ */
+const char *tape_state_verbose[TS_SIZE] =
+{
+ [TS_UNUSED] = "UNUSED",
+ [TS_IN_USE] = "IN_USE",
+ [TS_BLKUSE] = "BLKUSE",
+ [TS_INIT] = "INIT ",
+ [TS_NOT_OPER] = "NOT_OP"
+};
+
+const char *tape_op_verbose[TO_SIZE] =
+{
+ [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
+ [TO_BSF] = "BSF", [TO_DSE] = "DSE",
+ [TO_FSB] = "FSB", [TO_FSF] = "FSF",
+ [TO_LBL] = "LBL", [TO_NOP] = "NOP",
+ [TO_RBA] = "RBA", [TO_RBI] = "RBI",
+ [TO_RFO] = "RFO", [TO_REW] = "REW",
+ [TO_RUN] = "RUN", [TO_WRI] = "WRI",
+ [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
+ [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
+ [TO_READ_ATTMSG] = "RAT",
+ [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
+ [TO_UNASSIGN] = "UAS"
+};
+
+static inline int
+busid_to_int(char *bus_id)
+{
+ int dec;
+ int d;
+ char * s;
+
+ for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
+ d = (d * 10) + (*s - '0');
+ dec = d;
+ for(s++, d = 0; *s != '\0' && *s != '.'; s++)
+ d = (d * 10) + (*s - '0');
+ dec = (dec << 8) + d;
+
+ for(s++; *s != '\0'; s++) {
+ if (*s >= '0' && *s <= '9') {
+ d = *s - '0';
+ } else if (*s >= 'a' && *s <= 'f') {
+ d = *s - 'a' + 10;
+ } else {
+ d = *s - 'A' + 10;
+ }
+ dec = (dec << 4) + d;
+ }
+
+ return dec;
+}
+
+/*
+ * Some channel attached tape specific attributes.
+ *
+ * FIXME: In the future the first_minor and blocksize attribute should be
+ * replaced by a link to the cdev tree.
+ */
+static ssize_t
+tape_medium_state_show(struct device *dev, char *buf)
+{
+ struct tape_device *tdev;
+
+ tdev = (struct tape_device *) dev->driver_data;
+ return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+}
+
+static
+DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
+
+static ssize_t
+tape_first_minor_show(struct device *dev, char *buf)
+{
+ struct tape_device *tdev;
+
+ tdev = (struct tape_device *) dev->driver_data;
+ return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+}
+
+static
+DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
+
+static ssize_t
+tape_state_show(struct device *dev, char *buf)
+{
+ struct tape_device *tdev;
+
+ tdev = (struct tape_device *) dev->driver_data;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
+ "OFFLINE" : tape_state_verbose[tdev->tape_state]);
+}
+
+static
+DEVICE_ATTR(state, 0444, tape_state_show, NULL);
+
+static ssize_t
+tape_operation_show(struct device *dev, char *buf)
+{
+ struct tape_device *tdev;
+ ssize_t rc;
+
+ tdev = (struct tape_device *) dev->driver_data;
+ if (tdev->first_minor < 0)
+ return scnprintf(buf, PAGE_SIZE, "N/A\n");
+
+ spin_lock_irq(get_ccwdev_lock(tdev->cdev));
+ if (list_empty(&tdev->req_queue))
+ rc = scnprintf(buf, PAGE_SIZE, "---\n");
+ else {
+ struct tape_request *req;
+
+ req = list_entry(tdev->req_queue.next, struct tape_request,
+ list);
+ rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+ }
+ spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
+ return rc;
+}
+
+static
+DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
+
+static ssize_t
+tape_blocksize_show(struct device *dev, char *buf)
+{
+ struct tape_device *tdev;
+
+ tdev = (struct tape_device *) dev->driver_data;
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+}
+
+static
+DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
+
+static struct attribute *tape_attrs[] = {
+ &dev_attr_medium_state.attr,
+ &dev_attr_first_minor.attr,
+ &dev_attr_state.attr,
+ &dev_attr_operation.attr,
+ &dev_attr_blocksize.attr,
+ NULL
+};
+
+static struct attribute_group tape_attr_group = {
+ .attrs = tape_attrs,
+};
+
+/*
+ * Tape state functions
+ */
+void
+tape_state_set(struct tape_device *device, enum tape_state newstate)
+{
+ const char *str;
+
+ if (device->tape_state == TS_NOT_OPER) {
+ DBF_EVENT(3, "ts_set err: not oper\n");
+ return;
+ }
+ DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
+ if (device->tape_state < TO_SIZE && device->tape_state >= 0)
+ str = tape_state_verbose[device->tape_state];
+ else
+ str = "UNKNOWN TS";
+ DBF_EVENT(4, "old ts: %s\n", str);
+ if (device->tape_state < TO_SIZE && device->tape_state >=0 )
+ str = tape_state_verbose[device->tape_state];
+ else
+ str = "UNKNOWN TS";
+ DBF_EVENT(4, "%s\n", str);
+ DBF_EVENT(4, "new ts:\t\n");
+ if (newstate < TO_SIZE && newstate >= 0)
+ str = tape_state_verbose[newstate];
+ else
+ str = "UNKNOWN TS";
+ DBF_EVENT(4, "%s\n", str);
+ device->tape_state = newstate;
+ wake_up(&device->state_change_wq);
+}
+
+void
+tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
+{
+ if (device->medium_state == newstate)
+ return;
+ switch(newstate){
+ case MS_UNLOADED:
+ device->tape_generic_status |= GMT_DR_OPEN(~0);
+ PRINT_INFO("(%s): Tape is unloaded\n",
+ device->cdev->dev.bus_id);
+ break;
+ case MS_LOADED:
+ device->tape_generic_status &= ~GMT_DR_OPEN(~0);
+ PRINT_INFO("(%s): Tape has been mounted\n",
+ device->cdev->dev.bus_id);
+ break;
+ default:
+ // print nothing
+ break;
+ }
+ device->medium_state = newstate;
+ wake_up(&device->state_change_wq);
+}
+
+/*
+ * Stop running ccw. Has to be called with the device lock held.
+ */
+static inline int
+__tape_halt_io(struct tape_device *device, struct tape_request *request)
+{
+ int retries;
+ int rc;
+
+ /* Check if interrupt has already been processed */
+ if (request->callback == NULL)
+ return 0;
+
+ rc = 0;
+ for (retries = 0; retries < 5; retries++) {
+ rc = ccw_device_clear(device->cdev, (long) request);
+
+ if (rc == 0) { /* Termination successful */
+ request->rc = -EIO;
+ request->status = TAPE_REQUEST_DONE;
+ return 0;
+ }
+
+ if (rc == -ENODEV)
+ DBF_EXCEPTION(2, "device gone, retry\n");
+ else if (rc == -EIO)
+ DBF_EXCEPTION(2, "I/O error, retry\n");
+ else if (rc == -EBUSY)
+ DBF_EXCEPTION(2, "device busy, retry late\n");
+ else
+ BUG();
+ }
+
+ return rc;
+}
+
+/*
+ * Add device into the sorted list, giving it the first
+ * available minor number.
+ */
+static int
+tape_assign_minor(struct tape_device *device)
+{
+ struct tape_device *tmp;
+ int minor;
+
+ minor = 0;
+ write_lock(&tape_device_lock);
+ list_for_each_entry(tmp, &tape_device_list, node) {
+ if (minor < tmp->first_minor)
+ break;
+ minor += TAPE_MINORS_PER_DEV;
+ }
+ if (minor >= 256) {
+ write_unlock(&tape_device_lock);
+ return -ENODEV;
+ }
+ device->first_minor = minor;
+ list_add_tail(&device->node, &tmp->node);
+ write_unlock(&tape_device_lock);
+ return 0;
+}
+
+/* remove device from the list */
+static void
+tape_remove_minor(struct tape_device *device)
+{
+ write_lock(&tape_device_lock);
+ list_del_init(&device->node);
+ device->first_minor = -1;
+ write_unlock(&tape_device_lock);
+}
+
+/*
+ * Set a device online.
+ *
+ * This function is called by the common I/O layer to move a device from the
+ * detected but offline into the online state.
+ * If we return an error (RC < 0) the device remains in the offline state. This
+ * can happen if the device is assigned somewhere else, for example.
+ */
+int
+tape_generic_online(struct tape_device *device,
+ struct tape_discipline *discipline)
+{
+ int rc;
+
+ DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
+
+ if (device->tape_state != TS_INIT) {
+ DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
+ return -EINVAL;
+ }
+
+ /* Let the discipline have a go at the device. */
+ device->discipline = discipline;
+ if (!try_module_get(discipline->owner)) {
+ PRINT_ERR("Cannot get module. Module gone.\n");
+ return -EINVAL;
+ }
+
+ rc = discipline->setup_device(device);
+ if (rc)
+ goto out;
+ rc = tape_assign_minor(device);
+ if (rc)
+ goto out_discipline;
+
+ rc = tapechar_setup_device(device);
+ if (rc)
+ goto out_minor;
+ rc = tapeblock_setup_device(device);
+ if (rc)
+ goto out_char;
+
+ tape_state_set(device, TS_UNUSED);
+
+ DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
+
+ return 0;
+
+out_char:
+ tapechar_cleanup_device(device);
+out_discipline:
+ device->discipline->cleanup_device(device);
+ device->discipline = NULL;
+out_minor:
+ tape_remove_minor(device);
+out:
+ module_put(discipline->owner);
+ return rc;
+}
+
+static inline void
+tape_cleanup_device(struct tape_device *device)
+{
+ tapeblock_cleanup_device(device);
+ tapechar_cleanup_device(device);
+ device->discipline->cleanup_device(device);
+ module_put(device->discipline->owner);
+ tape_remove_minor(device);
+ tape_med_state_set(device, MS_UNKNOWN);
+}
+
+/*
+ * Set device offline.
+ *
+ * Called by the common I/O layer if the drive should set offline on user
+ * request. We may prevent this by returning an error.
+ * Manual offline is only allowed while the drive is not in use.
+ */
+int
+tape_generic_offline(struct tape_device *device)
+{
+ if (!device) {
+ PRINT_ERR("tape_generic_offline: no such device\n");
+ return -ENODEV;
+ }
+
+ DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
+ device->cdev_id, device);
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ switch (device->tape_state) {
+ case TS_INIT:
+ case TS_NOT_OPER:
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ break;
+ case TS_UNUSED:
+ tape_state_set(device, TS_INIT);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ tape_cleanup_device(device);
+ break;
+ default:
+ DBF_EVENT(3, "(%08x): Set offline failed "
+ "- drive in use.\n",
+ device->cdev_id);
+ PRINT_WARN("(%s): Set offline failed "
+ "- drive in use.\n",
+ device->cdev->dev.bus_id);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return -EBUSY;
+ }
+
+ DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
+ return 0;
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+static struct tape_device *
+tape_alloc_device(void)
+{
+ struct tape_device *device;
+
+ device = (struct tape_device *)
+ kmalloc(sizeof(struct tape_device), GFP_KERNEL);
+ if (device == NULL) {
+ DBF_EXCEPTION(2, "ti:no mem\n");
+ PRINT_INFO ("can't allocate memory for "
+ "tape info structure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(device, 0, sizeof(struct tape_device));
+ device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
+ if (device->modeset_byte == NULL) {
+ DBF_EXCEPTION(2, "ti:no mem\n");
+ PRINT_INFO("can't allocate memory for modeset byte\n");
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&device->req_queue);
+ INIT_LIST_HEAD(&device->node);
+ init_waitqueue_head(&device->state_change_wq);
+ device->tape_state = TS_INIT;
+ device->medium_state = MS_UNKNOWN;
+ *device->modeset_byte = 0;
+ device->first_minor = -1;
+ atomic_set(&device->ref_count, 1);
+
+ return device;
+}
+
+/*
+ * Get a reference to an existing device structure. This will automatically
+ * increment the reference count.
+ */
+struct tape_device *
+tape_get_device_reference(struct tape_device *device)
+{
+ DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
+ atomic_inc_return(&device->ref_count));
+
+ return device;
+}
+
+/*
+ * Decrease the reference counter of a devices structure. If the
+ * reference counter reaches zero free the device structure.
+ * The function returns a NULL pointer to be used by the caller
+ * for clearing reference pointers.
+ */
+struct tape_device *
+tape_put_device(struct tape_device *device)
+{
+ int remain;
+
+ remain = atomic_dec_return(&device->ref_count);
+ if (remain > 0) {
+ DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
+ } else {
+ if (remain < 0) {
+ DBF_EVENT(4, "put device without reference\n");
+ PRINT_ERR("put device without reference\n");
+ } else {
+ DBF_EVENT(4, "tape_free_device(%p)\n", device);
+ kfree(device->modeset_byte);
+ kfree(device);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Find tape device by a device index.
+ */
+struct tape_device *
+tape_get_device(int devindex)
+{
+ struct tape_device *device, *tmp;
+
+ device = ERR_PTR(-ENODEV);
+ read_lock(&tape_device_lock);
+ list_for_each_entry(tmp, &tape_device_list, node) {
+ if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
+ device = tape_get_device_reference(tmp);
+ break;
+ }
+ }
+ read_unlock(&tape_device_lock);
+ return device;
+}
+
+/*
+ * Driverfs tape probe function.
+ */
+int
+tape_generic_probe(struct ccw_device *cdev)
+{
+ struct tape_device *device;
+
+ device = tape_alloc_device();
+ if (IS_ERR(device))
+ return -ENODEV;
+ PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
+ cdev->dev.driver_data = device;
+ device->cdev = cdev;
+ device->cdev_id = busid_to_int(cdev->dev.bus_id);
+ cdev->handler = __tape_do_irq;
+
+ ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
+ sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
+
+ return 0;
+}
+
+static inline void
+__tape_discard_requests(struct tape_device *device)
+{
+ struct tape_request * request;
+ struct list_head * l, *n;
+
+ list_for_each_safe(l, n, &device->req_queue) {
+ request = list_entry(l, struct tape_request, list);
+ if (request->status == TAPE_REQUEST_IN_IO)
+ request->status = TAPE_REQUEST_DONE;
+ list_del(&request->list);
+
+ /* Decrease ref_count for removed request. */
+ request->device = tape_put_device(device);
+ request->rc = -EIO;
+ if (request->callback != NULL)
+ request->callback(request, request->callback_data);
+ }
+}
+
+/*
+ * Driverfs tape remove function.
+ *
+ * This function is called whenever the common I/O layer detects the device
+ * gone. This can happen at any time and we cannot refuse.
+ */
+void
+tape_generic_remove(struct ccw_device *cdev)
+{
+ struct tape_device * device;
+
+ device = cdev->dev.driver_data;
+ if (!device) {
+ PRINT_ERR("No device pointer in tape_generic_remove!\n");
+ return;
+ }
+ DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ switch (device->tape_state) {
+ case TS_INIT:
+ tape_state_set(device, TS_NOT_OPER);
+ case TS_NOT_OPER:
+ /*
+ * Nothing to do.
+ */
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ break;
+ case TS_UNUSED:
+ /*
+ * Need only to release the device.
+ */
+ tape_state_set(device, TS_NOT_OPER);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ tape_cleanup_device(device);
+ break;
+ default:
+ /*
+ * There may be requests on the queue. We will not get
+ * an interrupt for a request that was running. So we
+ * just post them all as I/O errors.
+ */
+ DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
+ device->cdev_id);
+ PRINT_WARN("(%s): Drive in use vanished - "
+ "expect trouble!\n",
+ device->cdev->dev.bus_id);
+ PRINT_WARN("State was %i\n", device->tape_state);
+ tape_state_set(device, TS_NOT_OPER);
+ __tape_discard_requests(device);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ tape_cleanup_device(device);
+ }
+
+ if (cdev->dev.driver_data != NULL) {
+ sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
+ cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
+ }
+}
+
+/*
+ * Allocate a new tape ccw request
+ */
+struct tape_request *
+tape_alloc_request(int cplength, int datasize)
+{
+ struct tape_request *request;
+
+ if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
+ BUG();
+
+ DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
+
+ request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
+ GFP_KERNEL);
+ if (request == NULL) {
+ DBF_EXCEPTION(1, "cqra nomem\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(request, 0, sizeof(struct tape_request));
+ /* allocate channel program */
+ if (cplength > 0) {
+ request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
+ GFP_ATOMIC | GFP_DMA);
+ if (request->cpaddr == NULL) {
+ DBF_EXCEPTION(1, "cqra nomem\n");
+ kfree(request);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
+ }
+ /* alloc small kernel buffer */
+ if (datasize > 0) {
+ request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
+ if (request->cpdata == NULL) {
+ DBF_EXCEPTION(1, "cqra nomem\n");
+ if (request->cpaddr != NULL)
+ kfree(request->cpaddr);
+ kfree(request);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(request->cpdata, 0, datasize);
+ }
+ DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
+ request->cpdata);
+
+ return request;
+}
+
+/*
+ * Free tape ccw request
+ */
+void
+tape_free_request (struct tape_request * request)
+{
+ DBF_LH(6, "Free request %p\n", request);
+
+ if (request->device != NULL) {
+ request->device = tape_put_device(request->device);
+ }
+ if (request->cpdata != NULL)
+ kfree(request->cpdata);
+ if (request->cpaddr != NULL)
+ kfree(request->cpaddr);
+ kfree(request);
+}
+
+static inline void
+__tape_do_io_list(struct tape_device *device)
+{
+ struct list_head *l, *n;
+ struct tape_request *request;
+ int rc;
+
+ DBF_LH(6, "__tape_do_io_list(%p)\n", device);
+ /*
+ * Try to start each request on request queue until one is
+ * started successful.
+ */
+ list_for_each_safe(l, n, &device->req_queue) {
+ request = list_entry(l, struct tape_request, list);
+#ifdef CONFIG_S390_TAPE_BLOCK
+ if (request->op == TO_BLOCK)
+ device->discipline->check_locate(device, request);
+#endif
+ rc = ccw_device_start(device->cdev, request->cpaddr,
+ (unsigned long) request, 0x00,
+ request->options);
+ if (rc == 0) {
+ request->status = TAPE_REQUEST_IN_IO;
+ break;
+ }
+ /* Start failed. Remove request and indicate failure. */
+ DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
+
+ /* Set ending status and do callback. */
+ request->rc = rc;
+ request->status = TAPE_REQUEST_DONE;
+ __tape_remove_request(device, request);
+ }
+}
+
+static void
+__tape_remove_request(struct tape_device *device, struct tape_request *request)
+{
+ /* Remove from request queue. */
+ list_del(&request->list);
+
+ /* Do callback. */
+ if (request->callback != NULL)
+ request->callback(request, request->callback_data);
+
+ /* Start next request. */
+ if (!list_empty(&device->req_queue))
+ __tape_do_io_list(device);
+}
+
+/*
+ * Write sense data to console/dbf
+ */
+void
+tape_dump_sense(struct tape_device* device, struct tape_request *request,
+ struct irb *irb)
+{
+ unsigned int *sptr;
+
+ PRINT_INFO("-------------------------------------------------\n");
+ PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
+ irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
+ PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
+ if (request != NULL)
+ PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
+
+ sptr = (unsigned int *) irb->ecw;
+ PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
+ sptr[0], sptr[1], sptr[2], sptr[3]);
+ PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
+ sptr[4], sptr[5], sptr[6], sptr[7]);
+ PRINT_INFO("--------------------------------------------------\n");
+}
+
+/*
+ * Write sense data to dbf
+ */
+void
+tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
+ struct irb *irb)
+{
+ unsigned int *sptr;
+ const char* op;
+
+ if (request != NULL)
+ op = tape_op_verbose[request->op];
+ else
+ op = "---";
+ DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
+ irb->scsw.dstat,irb->scsw.cstat);
+ DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
+ sptr = (unsigned int *) irb->ecw;
+ DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
+ DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
+ DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
+ DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
+}
+
+/*
+ * I/O helper function. Adds the request to the request queue
+ * and starts it if the tape is idle. Has to be called with
+ * the device lock held.
+ */
+static inline int
+__tape_do_io(struct tape_device *device, struct tape_request *request)
+{
+ int rc;
+
+ switch (request->op) {
+ case TO_MSEN:
+ case TO_ASSIGN:
+ case TO_UNASSIGN:
+ case TO_READ_ATTMSG:
+ if (device->tape_state == TS_INIT)
+ break;
+ if (device->tape_state == TS_UNUSED)
+ break;
+ default:
+ if (device->tape_state == TS_BLKUSE)
+ break;
+ if (device->tape_state != TS_IN_USE)
+ return -ENODEV;
+ }
+
+ /* Increase use count of device for the added request. */
+ request->device = tape_get_device_reference(device);
+
+ if (list_empty(&device->req_queue)) {
+ /* No other requests are on the queue. Start this one. */
+#ifdef CONFIG_S390_TAPE_BLOCK
+ if (request->op == TO_BLOCK)
+ device->discipline->check_locate(device, request);
+#endif
+ rc = ccw_device_start(device->cdev, request->cpaddr,
+ (unsigned long) request, 0x00,
+ request->options);
+ if (rc) {
+ DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
+ return rc;
+ }
+ DBF_LH(5, "Request %p added for execution.\n", request);
+ list_add(&request->list, &device->req_queue);
+ request->status = TAPE_REQUEST_IN_IO;
+ } else {
+ DBF_LH(5, "Request %p add to queue.\n", request);
+ list_add_tail(&request->list, &device->req_queue);
+ request->status = TAPE_REQUEST_QUEUED;
+ }
+ return 0;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * tape is idle. Return without waiting for end of i/o.
+ */
+int
+tape_do_io_async(struct tape_device *device, struct tape_request *request)
+{
+ int rc;
+
+ DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Add request to request queue and try to start it. */
+ rc = __tape_do_io(device, request);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+}
+
+/*
+ * tape_do_io/__tape_wake_up
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up(struct tape_request *request, void *data)
+{
+ request->callback = NULL;
+ wake_up((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io(struct tape_device *device, struct tape_request *request)
+{
+ wait_queue_head_t wq;
+ int rc;
+
+ init_waitqueue_head(&wq);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Setup callback */
+ request->callback = __tape_wake_up;
+ request->callback_data = &wq;
+ /* Add request to request queue and try to start it. */
+ rc = __tape_do_io(device, request);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ if (rc)
+ return rc;
+ /* Request added to the queue. Wait for its completion. */
+ wait_event(wq, (request->callback == NULL));
+ /* Get rc from request */
+ return request->rc;
+}
+
+/*
+ * tape_do_io_interruptible/__tape_wake_up_interruptible
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up_interruptible(struct tape_request *request, void *data)
+{
+ request->callback = NULL;
+ wake_up_interruptible((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io_interruptible(struct tape_device *device,
+ struct tape_request *request)
+{
+ wait_queue_head_t wq;
+ int rc;
+
+ init_waitqueue_head(&wq);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Setup callback */
+ request->callback = __tape_wake_up_interruptible;
+ request->callback_data = &wq;
+ rc = __tape_do_io(device, request);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ if (rc)
+ return rc;
+ /* Request added to the queue. Wait for its completion. */
+ rc = wait_event_interruptible(wq, (request->callback == NULL));
+ if (rc != -ERESTARTSYS)
+ /* Request finished normally. */
+ return request->rc;
+ /* Interrupted by a signal. We have to stop the current request. */
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = __tape_halt_io(device, request);
+ if (rc == 0) {
+ DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
+ rc = -ERESTARTSYS;
+ }
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+}
+
+/*
+ * Handle requests that return an i/o error in the irb.
+ */
+static inline void
+tape_handle_killed_request(
+ struct tape_device *device,
+ struct tape_request *request)
+{
+ if(request != NULL) {
+ /* Set ending status. FIXME: Should the request be retried? */
+ request->rc = -EIO;
+ request->status = TAPE_REQUEST_DONE;
+ __tape_remove_request(device, request);
+ } else {
+ __tape_do_io_list(device);
+ }
+}
+
+/*
+ * Tape interrupt routine, called from the ccw_device layer
+ */
+static void
+__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct tape_device *device;
+ struct tape_request *request;
+ int final;
+ int rc;
+
+ device = (struct tape_device *) cdev->dev.driver_data;
+ if (device == NULL) {
+ PRINT_ERR("could not get device structure for %s "
+ "in interrupt\n", cdev->dev.bus_id);
+ return;
+ }
+ request = (struct tape_request *) intparm;
+
+ DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
+
+ /* On special conditions irb is an error pointer */
+ if (IS_ERR(irb)) {
+ switch (PTR_ERR(irb)) {
+ case -ETIMEDOUT:
+ PRINT_WARN("(%s): Request timed out\n",
+ cdev->dev.bus_id);
+ case -EIO:
+ tape_handle_killed_request(device, request);
+ break;
+ default:
+ PRINT_ERR("(%s): Unexpected i/o error %li\n",
+ cdev->dev.bus_id,
+ PTR_ERR(irb));
+ }
+ return;
+ }
+
+ /* May be an unsolicited irq */
+ if(request != NULL)
+ request->rescnt = irb->scsw.count;
+
+ if (irb->scsw.dstat != 0x0c) {
+ /* Set the 'ONLINE' flag depending on sense byte 1 */
+ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
+ device->tape_generic_status |= GMT_ONLINE(~0);
+ else
+ device->tape_generic_status &= ~GMT_ONLINE(~0);
+
+ /*
+ * Any request that does not come back with channel end
+ * and device end is unusual. Log the sense data.
+ */
+ DBF_EVENT(3,"-- Tape Interrupthandler --\n");
+ tape_dump_sense_dbf(device, request, irb);
+ } else {
+ /* Upon normal completion the device _is_ online */
+ device->tape_generic_status |= GMT_ONLINE(~0);
+ }
+ if (device->tape_state == TS_NOT_OPER) {
+ DBF_EVENT(6, "tape:device is not operational\n");
+ return;
+ }
+
+ /*
+ * Request that were canceled still come back with an interrupt.
+ * To detect these request the state will be set to TAPE_REQUEST_DONE.
+ */
+ if(request != NULL && request->status == TAPE_REQUEST_DONE) {
+ __tape_remove_request(device, request);
+ return;
+ }
+
+ rc = device->discipline->irq(device, request, irb);
+ /*
+ * rc < 0 : request finished unsuccessfully.
+ * rc == TAPE_IO_SUCCESS: request finished successfully.
+ * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
+ * rc == TAPE_IO_RETRY: request finished but needs another go.
+ * rc == TAPE_IO_STOP: request needs to get terminated.
+ */
+ final = 0;
+ switch (rc) {
+ case TAPE_IO_SUCCESS:
+ /* Upon normal completion the device _is_ online */
+ device->tape_generic_status |= GMT_ONLINE(~0);
+ final = 1;
+ break;
+ case TAPE_IO_PENDING:
+ break;
+ case TAPE_IO_RETRY:
+#ifdef CONFIG_S390_TAPE_BLOCK
+ if (request->op == TO_BLOCK)
+ device->discipline->check_locate(device, request);
+#endif
+ rc = ccw_device_start(cdev, request->cpaddr,
+ (unsigned long) request, 0x00,
+ request->options);
+ if (rc) {
+ DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
+ final = 1;
+ }
+ break;
+ case TAPE_IO_STOP:
+ __tape_halt_io(device, request);
+ break;
+ default:
+ if (rc > 0) {
+ DBF_EVENT(6, "xunknownrc\n");
+ PRINT_ERR("Invalid return code from discipline "
+ "interrupt function.\n");
+ rc = -EIO;
+ }
+ final = 1;
+ break;
+ }
+ if (final) {
+ /* May be an unsolicited irq */
+ if(request != NULL) {
+ /* Set ending status. */
+ request->rc = rc;
+ request->status = TAPE_REQUEST_DONE;
+ __tape_remove_request(device, request);
+ } else {
+ __tape_do_io_list(device);
+ }
+ }
+}
+
+/*
+ * Tape device open function used by tape_char & tape_block frontends.
+ */
+int
+tape_open(struct tape_device *device)
+{
+ int rc;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ if (device->tape_state == TS_NOT_OPER) {
+ DBF_EVENT(6, "TAPE:nodev\n");
+ rc = -ENODEV;
+ } else if (device->tape_state == TS_IN_USE) {
+ DBF_EVENT(6, "TAPE:dbusy\n");
+ rc = -EBUSY;
+ } else if (device->tape_state == TS_BLKUSE) {
+ DBF_EVENT(6, "TAPE:dbusy\n");
+ rc = -EBUSY;
+ } else if (device->discipline != NULL &&
+ !try_module_get(device->discipline->owner)) {
+ DBF_EVENT(6, "TAPE:nodisc\n");
+ rc = -ENODEV;
+ } else {
+ tape_state_set(device, TS_IN_USE);
+ rc = 0;
+ }
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ return rc;
+}
+
+/*
+ * Tape device release function used by tape_char & tape_block frontends.
+ */
+int
+tape_release(struct tape_device *device)
+{
+ spin_lock(get_ccwdev_lock(device->cdev));
+ if (device->tape_state == TS_IN_USE)
+ tape_state_set(device, TS_UNUSED);
+ module_put(device->discipline->owner);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ return 0;
+}
+
+/*
+ * Execute a magnetic tape command a number of times.
+ */
+int
+tape_mtop(struct tape_device *device, int mt_op, int mt_count)
+{
+ tape_mtop_fn fn;
+ int rc;
+
+ DBF_EVENT(6, "TAPE:mtio\n");
+ DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
+ DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
+
+ if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
+ return -EINVAL;
+ fn = device->discipline->mtop_array[mt_op];
+ if (fn == NULL)
+ return -EINVAL;
+
+ /* We assume that the backends can handle count up to 500. */
+ if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
+ mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
+ rc = 0;
+ for (; mt_count > 500; mt_count -= 500)
+ if ((rc = fn(device, 500)) != 0)
+ break;
+ if (rc == 0)
+ rc = fn(device, mt_count);
+ } else
+ rc = fn(device, mt_count);
+ return rc;
+
+}
+
+/*
+ * Tape init function.
+ */
+static int
+tape_init (void)
+{
+ TAPE_DBF_AREA = debug_register ( "tape", 1, 2, 4*sizeof(long));
+ debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+ debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+ DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n");
+ tape_proc_init();
+ tapechar_init ();
+ tapeblock_init ();
+ return 0;
+}
+
+/*
+ * Tape exit function.
+ */
+static void
+tape_exit(void)
+{
+ DBF_EVENT(6, "tape exit\n");
+
+ /* Get rid of the frontends */
+ tapechar_exit();
+ tapeblock_exit();
+ tape_proc_cleanup();
+ debug_unregister (TAPE_DBF_AREA);
+}
+
+MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
+ "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
+MODULE_DESCRIPTION("Linux on zSeries channel attached "
+ "tape device driver ($Revision: 1.51 $)");
+MODULE_LICENSE("GPL");
+
+module_init(tape_init);
+module_exit(tape_exit);
+
+EXPORT_SYMBOL(tape_generic_remove);
+EXPORT_SYMBOL(tape_generic_probe);
+EXPORT_SYMBOL(tape_generic_online);
+EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_put_device);
+EXPORT_SYMBOL(tape_get_device_reference);
+EXPORT_SYMBOL(tape_state_verbose);
+EXPORT_SYMBOL(tape_op_verbose);
+EXPORT_SYMBOL(tape_state_set);
+EXPORT_SYMBOL(tape_med_state_set);
+EXPORT_SYMBOL(tape_alloc_request);
+EXPORT_SYMBOL(tape_free_request);
+EXPORT_SYMBOL(tape_dump_sense);
+EXPORT_SYMBOL(tape_dump_sense_dbf);
+EXPORT_SYMBOL(tape_do_io);
+EXPORT_SYMBOL(tape_do_io_async);
+EXPORT_SYMBOL(tape_do_io_interruptible);
+EXPORT_SYMBOL(tape_mtop);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
new file mode 100644
index 000000000000..801d17cca34e
--- /dev/null
+++ b/drivers/s390/char/tape_proc.c
@@ -0,0 +1,145 @@
+/*
+ * drivers/s390/char/tape.c
+ * tape device driver for S/390 and zSeries tapes.
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001 IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *
+ * PROCFS Functions
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+
+#define TAPE_DBF_AREA tape_core_dbf
+
+#include "tape.h"
+
+#define PRINTK_HEADER "TAPE_PROC: "
+
+static const char *tape_med_st_verbose[MS_SIZE] =
+{
+ [MS_UNKNOWN] = "UNKNOWN ",
+ [MS_LOADED] = "LOADED ",
+ [MS_UNLOADED] = "UNLOADED"
+};
+
+/* our proc tapedevices entry */
+static struct proc_dir_entry *tape_proc_devices;
+
+/*
+ * Show function for /proc/tapedevices
+ */
+static int tape_proc_show(struct seq_file *m, void *v)
+{
+ struct tape_device *device;
+ struct tape_request *request;
+ const char *str;
+ unsigned long n;
+
+ n = (unsigned long) v - 1;
+ if (!n) {
+ seq_printf(m, "TapeNo\tBusID CuType/Model\t"
+ "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
+ }
+ device = tape_get_device(n);
+ if (IS_ERR(device))
+ return 0;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ seq_printf(m, "%d\t", (int) n);
+ seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id);
+ seq_printf(m, "%04X/", device->cdev->id.cu_type);
+ seq_printf(m, "%02X\t", device->cdev->id.cu_model);
+ seq_printf(m, "%04X/", device->cdev->id.dev_type);
+ seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
+ if (device->char_data.block_size == 0)
+ seq_printf(m, "auto\t");
+ else
+ seq_printf(m, "%i\t", device->char_data.block_size);
+ if (device->tape_state >= 0 &&
+ device->tape_state < TS_SIZE)
+ str = tape_state_verbose[device->tape_state];
+ else
+ str = "UNKNOWN";
+ seq_printf(m, "%s\t", str);
+ if (!list_empty(&device->req_queue)) {
+ request = list_entry(device->req_queue.next,
+ struct tape_request, list);
+ str = tape_op_verbose[request->op];
+ } else
+ str = "---";
+ seq_printf(m, "%s\t", str);
+ seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ tape_put_device(device);
+ return 0;
+}
+
+static void *tape_proc_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos >= 256 / TAPE_MINORS_PER_DEV)
+ return NULL;
+ return (void *)((unsigned long) *pos + 1);
+}
+
+static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return tape_proc_start(m, pos);
+}
+
+static void tape_proc_stop(struct seq_file *m, void *v)
+{
+}
+
+static struct seq_operations tape_proc_seq = {
+ .start = tape_proc_start,
+ .next = tape_proc_next,
+ .stop = tape_proc_stop,
+ .show = tape_proc_show,
+};
+
+static int tape_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &tape_proc_seq);
+}
+
+static struct file_operations tape_proc_ops =
+{
+ .open = tape_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Initialize procfs stuff on startup
+ */
+void
+tape_proc_init(void)
+{
+ tape_proc_devices =
+ create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
+ &proc_root);
+ if (tape_proc_devices == NULL) {
+ PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
+ return;
+ }
+ tape_proc_devices->proc_fops = &tape_proc_ops;
+ tape_proc_devices->owner = THIS_MODULE;
+}
+
+/*
+ * Cleanup all stuff registered to the procfs
+ */
+void
+tape_proc_cleanup(void)
+{
+ if (tape_proc_devices != NULL)
+ remove_proc_entry ("tapedevices", &proc_root);
+}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
new file mode 100644
index 000000000000..2f9fe30989a7
--- /dev/null
+++ b/drivers/s390/char/tape_std.c
@@ -0,0 +1,765 @@
+/*
+ * drivers/s390/char/tape_std.c
+ * standard tape device functions for ibm tapes.
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Stefan Bader <shbader@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/timer.h>
+
+#include <asm/types.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/tape390.h>
+
+#define TAPE_DBF_AREA tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define PRINTK_HEADER "TAPE_STD: "
+
+/*
+ * tape_std_assign
+ */
+static void
+tape_std_assign_timeout(unsigned long data)
+{
+ struct tape_request * request;
+ struct tape_device * device;
+
+ request = (struct tape_request *) data;
+ if ((device = request->device) == NULL)
+ BUG();
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ if (request->callback != NULL) {
+ DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
+ device->cdev_id);
+ PRINT_ERR("%s: Assignment timeout. Device busy.\n",
+ device->cdev->dev.bus_id);
+ ccw_device_clear(device->cdev, (long) request);
+ }
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+int
+tape_std_assign(struct tape_device *device)
+{
+ int rc;
+ struct timer_list timeout;
+ struct tape_request *request;
+
+ request = tape_alloc_request(2, 11);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+
+ request->op = TO_ASSIGN;
+ tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
+ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+ /*
+ * The assign command sometimes blocks if the device is assigned
+ * to another host (actually this shouldn't happen but it does).
+ * So we set up a timeout for this call.
+ */
+ init_timer(&timeout);
+ timeout.function = tape_std_assign_timeout;
+ timeout.data = (unsigned long) request;
+ timeout.expires = jiffies + 2 * HZ;
+ add_timer(&timeout);
+
+ rc = tape_do_io_interruptible(device, request);
+
+ del_timer(&timeout);
+
+ if (rc != 0) {
+ PRINT_WARN("%s: assign failed - device might be busy\n",
+ device->cdev->dev.bus_id);
+ DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+ device->cdev_id);
+ } else {
+ DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
+ }
+ tape_free_request(request);
+ return rc;
+}
+
+/*
+ * tape_std_unassign
+ */
+int
+tape_std_unassign (struct tape_device *device)
+{
+ int rc;
+ struct tape_request *request;
+
+ if (device->tape_state == TS_NOT_OPER) {
+ DBF_EVENT(3, "(%08x): Can't unassign device\n",
+ device->cdev_id);
+ PRINT_WARN("(%s): Can't unassign device - device gone\n",
+ device->cdev->dev.bus_id);
+ return -EIO;
+ }
+
+ request = tape_alloc_request(2, 11);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+
+ request->op = TO_UNASSIGN;
+ tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
+ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+ if ((rc = tape_do_io(device, request)) != 0) {
+ DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
+ PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id);
+ } else {
+ DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
+ }
+ tape_free_request(request);
+ return rc;
+}
+
+/*
+ * TAPE390_DISPLAY: Show a string on the tape display.
+ */
+int
+tape_std_display(struct tape_device *device, struct display_struct *disp)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(2, 17);
+ if (IS_ERR(request)) {
+ DBF_EVENT(3, "TAPE: load display failed\n");
+ return PTR_ERR(request);
+ }
+ request->op = TO_DIS;
+
+ *(unsigned char *) request->cpdata = disp->cntrl;
+ DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
+ memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
+ memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
+ ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
+
+ tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
+ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+ rc = tape_do_io_interruptible(device, request);
+ tape_free_request(request);
+ return rc;
+}
+
+/*
+ * Read block id.
+ */
+int
+tape_std_read_block_id(struct tape_device *device, __u64 *id)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(3, 8);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_RBI;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
+ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+ /* execute it */
+ rc = tape_do_io(device, request);
+ if (rc == 0)
+ /* Get result from read buffer. */
+ *id = *(__u64 *) request->cpdata;
+ tape_free_request(request);
+ return rc;
+}
+
+int
+tape_std_terminate_write(struct tape_device *device)
+{
+ int rc;
+
+ if(device->required_tapemarks == 0)
+ return 0;
+
+ DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
+ device->required_tapemarks);
+
+ rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
+ if (rc)
+ return rc;
+
+ device->required_tapemarks = 0;
+ return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTLOAD: Loads the tape.
+ * The default implementation just wait until the tape medium state changes
+ * to MS_LOADED.
+ */
+int
+tape_std_mtload(struct tape_device *device, int count)
+{
+ return wait_event_interruptible(device->state_change_wq,
+ (device->medium_state == MS_LOADED));
+}
+
+/*
+ * MTSETBLK: Set block size.
+ */
+int
+tape_std_mtsetblk(struct tape_device *device, int count)
+{
+ struct idal_buffer *new;
+
+ DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
+ if (count <= 0) {
+ /*
+ * Just set block_size to 0. tapechar_read/tapechar_write
+ * will realloc the idal buffer if a bigger one than the
+ * current is needed.
+ */
+ device->char_data.block_size = 0;
+ return 0;
+ }
+ if (device->char_data.idal_buf != NULL &&
+ device->char_data.idal_buf->size == count)
+ /* We already have a idal buffer of that size. */
+ return 0;
+
+ if (count > MAX_BLOCKSIZE) {
+ DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
+ count, MAX_BLOCKSIZE);
+ PRINT_ERR("Invalid block size (%d > %d) given.\n",
+ count, MAX_BLOCKSIZE);
+ return -EINVAL;
+ }
+
+ /* Allocate a new idal buffer. */
+ new = idal_buffer_alloc(count, 0);
+ if (new == NULL)
+ return -ENOMEM;
+ if (device->char_data.idal_buf != NULL)
+ idal_buffer_free(device->char_data.idal_buf);
+ device->char_data.idal_buf = new;
+ device->char_data.block_size = count;
+
+ DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
+
+ return 0;
+}
+
+/*
+ * MTRESET: Set block size to 0.
+ */
+int
+tape_std_mtreset(struct tape_device *device, int count)
+{
+ DBF_EVENT(6, "TCHAR:devreset:\n");
+ device->char_data.block_size = 0;
+ return 0;
+}
+
+/*
+ * MTFSF: Forward space over 'count' file marks. The tape is positioned
+ * at the EOT (End of Tape) side of the file mark.
+ */
+int
+tape_std_mtfsf(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_FSF;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTFSR: Forward space over 'count' tape blocks (blocksize is set
+ * via MTSETBLK.
+ */
+int
+tape_std_mtfsr(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+ int rc;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_FSB;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+ /* execute it */
+ rc = tape_do_io(device, request);
+ if (rc == 0 && request->rescnt > 0) {
+ DBF_LH(3, "FSR over tapemark\n");
+ rc = 1;
+ }
+ tape_free_request(request);
+
+ return rc;
+}
+
+/*
+ * MTBSR: Backward space over 'count' tape blocks.
+ * (blocksize is set via MTSETBLK.
+ */
+int
+tape_std_mtbsr(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+ int rc;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_BSB;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+ /* execute it */
+ rc = tape_do_io(device, request);
+ if (rc == 0 && request->rescnt > 0) {
+ DBF_LH(3, "BSR over tapemark\n");
+ rc = 1;
+ }
+ tape_free_request(request);
+
+ return rc;
+}
+
+/*
+ * MTWEOF: Write 'count' file marks at the current position.
+ */
+int
+tape_std_mtweof(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_WTM;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSFM: Backward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side of the
+ * last skipped file mark.
+ */
+int
+tape_std_mtbsfm(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_BSF;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSF: Backward space over 'count' file marks. The tape is positioned at
+ * the EOT (End of Tape) side of the last skipped file mark.
+ */
+int
+tape_std_mtbsf(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+ int rc;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_BSF;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+ /* execute it */
+ rc = tape_do_io_free(device, request);
+ if (rc == 0) {
+ rc = tape_mtop(device, MTFSR, 1);
+ if (rc > 0)
+ rc = 0;
+ }
+ return rc;
+}
+
+/*
+ * MTFSFM: Forward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side
+ * of the last skipped file mark.
+ */
+int
+tape_std_mtfsfm(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ struct ccw1 *ccw;
+ int rc;
+
+ request = tape_alloc_request(mt_count + 2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_FSF;
+ /* setup ccws */
+ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+ /* execute it */
+ rc = tape_do_io_free(device, request);
+ if (rc == 0) {
+ rc = tape_mtop(device, MTBSR, 1);
+ if (rc > 0)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * MTREW: Rewind the tape.
+ */
+int
+tape_std_mtrew(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(3, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_REW;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+ device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTOFFL: Rewind the tape and put the drive off-line.
+ * Implement 'rewind unload'
+ */
+int
+tape_std_mtoffl(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(3, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_RUN;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
+ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTNOP: 'No operation'.
+ */
+int
+tape_std_mtnop(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_NOP;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTEOM: positions at the end of the portion of the tape already used
+ * for recordind data. MTEOM positions after the last file mark, ready for
+ * appending another file.
+ */
+int
+tape_std_mteom(struct tape_device *device, int mt_count)
+{
+ int rc;
+
+ /*
+ * Seek from the beginning of tape (rewind).
+ */
+ if ((rc = tape_mtop(device, MTREW, 1)) < 0)
+ return rc;
+
+ /*
+ * The logical end of volume is given by two sewuential tapemarks.
+ * Look for this by skipping to the next file (over one tapemark)
+ * and then test for another one (fsr returns 1 if a tapemark was
+ * encountered).
+ */
+ do {
+ if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
+ return rc;
+ if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
+ return rc;
+ } while (rc == 0);
+
+ return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
+ */
+int
+tape_std_mtreten(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(4, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_FSF;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
+ tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
+ tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
+ /* execute it, MTRETEN rc gets ignored */
+ rc = tape_do_io_interruptible(device, request);
+ tape_free_request(request);
+ return tape_mtop(device, MTREW, 1);
+}
+
+/*
+ * MTERASE: erases the tape.
+ */
+int
+tape_std_mterase(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(6, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_DSE;
+ /* setup ccws */
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+ tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
+ tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
+ tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
+ tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
+
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * MTUNLOAD: Rewind the tape and unload it.
+ */
+int
+tape_std_mtunload(struct tape_device *device, int mt_count)
+{
+ return tape_mtop(device, MTOFFL, mt_count);
+}
+
+/*
+ * MTCOMPRESSION: used to enable compression.
+ * Sets the IDRC on/off.
+ */
+int
+tape_std_mtcompression(struct tape_device *device, int mt_count)
+{
+ struct tape_request *request;
+
+ if (mt_count < 0 || mt_count > 1) {
+ DBF_EXCEPTION(6, "xcom parm\n");
+ if (*device->modeset_byte & 0x08)
+ PRINT_INFO("(%s) Compression is currently on\n",
+ device->cdev->dev.bus_id);
+ else
+ PRINT_INFO("(%s) Compression is currently off\n",
+ device->cdev->dev.bus_id);
+ PRINT_INFO("Use 1 to switch compression on, 0 to "
+ "switch it off\n");
+ return -EINVAL;
+ }
+ request = tape_alloc_request(2, 0);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ request->op = TO_NOP;
+ /* setup ccws */
+ *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+ /* execute it */
+ return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Block
+ */
+struct tape_request *
+tape_std_read_block(struct tape_device *device, size_t count)
+{
+ struct tape_request *request;
+
+ /*
+ * We have to alloc 4 ccws in order to be able to transform request
+ * into a read backward request in error case.
+ */
+ request = tape_alloc_request(4, 0);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "xrbl fail");
+ return request;
+ }
+ request->op = TO_RFO;
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
+ device->char_data.idal_buf);
+ DBF_EVENT(6, "xrbl ccwg\n");
+ return request;
+}
+
+/*
+ * Read Block backward transformation function.
+ */
+void
+tape_std_read_backward(struct tape_device *device, struct tape_request *request)
+{
+ /*
+ * We have allocated 4 ccws in tape_std_read, so we can now
+ * transform the request to a read backward, followed by a
+ * forward space block.
+ */
+ request->op = TO_RBA;
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
+ device->char_data.idal_buf);
+ tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+ tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+ DBF_EVENT(6, "xrop ccwg");}
+
+/*
+ * Write Block
+ */
+struct tape_request *
+tape_std_write_block(struct tape_device *device, size_t count)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(2, 0);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "xwbl fail\n");
+ return request;
+ }
+ request->op = TO_WRI;
+ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+ tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
+ device->char_data.idal_buf);
+ DBF_EVENT(6, "xwbl ccwg\n");
+ return request;
+}
+
+/*
+ * This routine is called by frontend after an ENOSP on write
+ */
+void
+tape_std_process_eov(struct tape_device *device)
+{
+ /*
+ * End of volume: We have to backspace the last written record, then
+ * we TRY to write a tapemark and then backspace over the written TM
+ */
+ if (tape_mtop(device, MTBSR, 1) == 0 &&
+ tape_mtop(device, MTWEOF, 1) == 0) {
+ tape_mtop(device, MTBSR, 1);
+ }
+}
+
+EXPORT_SYMBOL(tape_std_assign);
+EXPORT_SYMBOL(tape_std_unassign);
+EXPORT_SYMBOL(tape_std_display);
+EXPORT_SYMBOL(tape_std_read_block_id);
+EXPORT_SYMBOL(tape_std_mtload);
+EXPORT_SYMBOL(tape_std_mtsetblk);
+EXPORT_SYMBOL(tape_std_mtreset);
+EXPORT_SYMBOL(tape_std_mtfsf);
+EXPORT_SYMBOL(tape_std_mtfsr);
+EXPORT_SYMBOL(tape_std_mtbsr);
+EXPORT_SYMBOL(tape_std_mtweof);
+EXPORT_SYMBOL(tape_std_mtbsfm);
+EXPORT_SYMBOL(tape_std_mtbsf);
+EXPORT_SYMBOL(tape_std_mtfsfm);
+EXPORT_SYMBOL(tape_std_mtrew);
+EXPORT_SYMBOL(tape_std_mtoffl);
+EXPORT_SYMBOL(tape_std_mtnop);
+EXPORT_SYMBOL(tape_std_mteom);
+EXPORT_SYMBOL(tape_std_mtreten);
+EXPORT_SYMBOL(tape_std_mterase);
+EXPORT_SYMBOL(tape_std_mtunload);
+EXPORT_SYMBOL(tape_std_mtcompression);
+EXPORT_SYMBOL(tape_std_read_block);
+EXPORT_SYMBOL(tape_std_read_backward);
+EXPORT_SYMBOL(tape_std_write_block);
+EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
new file mode 100644
index 000000000000..3ab6aafb7343
--- /dev/null
+++ b/drivers/s390/char/tape_std.h
@@ -0,0 +1,152 @@
+/*
+ * drivers/s390/char/tape_34xx.h
+ * standard tape device functions for ibm tapes.
+ *
+ * S390 and zSeries version
+ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_STD_H
+#define _TAPE_STD_H
+
+#include <asm/tape390.h>
+
+/*
+ * Biggest block size to handle. Currently 64K because we only build
+ * channel programs without data chaining.
+ */
+#define MAX_BLOCKSIZE 65535
+
+/*
+ * The CCW commands for the Tape type of command.
+ */
+#define INVALID_00 0x00 /* Invalid cmd */
+#define BACKSPACEBLOCK 0x27 /* Back Space block */
+#define BACKSPACEFILE 0x2f /* Back Space file */
+#define DATA_SEC_ERASE 0x97 /* Data security erase */
+#define ERASE_GAP 0x17 /* Erase Gap */
+#define FORSPACEBLOCK 0x37 /* Forward space block */
+#define FORSPACEFILE 0x3F /* Forward Space file */
+#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
+#define NOP 0x03 /* No operation */
+#define READ_FORWARD 0x02 /* Read forward */
+#define REWIND 0x07 /* Rewind */
+#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
+#define SENSE 0x04 /* Sense */
+#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
+#define WRITE_CMD 0x01 /* Write */
+#define WRITETAPEMARK 0x1F /* Write Tape Mark */
+
+#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
+#define CONTROL_ACCESS 0xE3 /* Set high speed */
+#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
+#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
+#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
+#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
+#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
+#define MODE_SET_C3 0xC3 /* for 3420 */
+#define MODE_SET_CB 0xCB /* for 3420 */
+#define MODE_SET_D3 0xD3 /* for 3420 */
+#define READ_BACKWARD 0x0C /* */
+#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
+#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
+#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
+#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
+#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
+#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
+#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
+#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
+#define READ_DEV_CHAR 0x64 /* Read device characteristics */
+#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
+#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
+#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
+#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
+#define SYNC 0x43 /* Synchronize (flush buffer) */
+#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
+#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
+#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
+#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
+#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
+#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
+
+#define SENSE_COMMAND_REJECT 0x80
+#define SENSE_INTERVENTION_REQUIRED 0x40
+#define SENSE_BUS_OUT_CHECK 0x20
+#define SENSE_EQUIPMENT_CHECK 0x10
+#define SENSE_DATA_CHECK 0x08
+#define SENSE_OVERRUN 0x04
+#define SENSE_DEFERRED_UNIT_CHECK 0x02
+#define SENSE_ASSIGNED_ELSEWHERE 0x01
+
+#define SENSE_LOCATE_FAILURE 0x80
+#define SENSE_DRIVE_ONLINE 0x40
+#define SENSE_RESERVED 0x20
+#define SENSE_RECORD_SEQUENCE_ERR 0x10
+#define SENSE_BEGINNING_OF_TAPE 0x08
+#define SENSE_WRITE_MODE 0x04
+#define SENSE_WRITE_PROTECT 0x02
+#define SENSE_NOT_CAPABLE 0x01
+
+#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
+#define SENSE_CHANNEL_ADAPTER_LOC 0x10
+#define SENSE_REPORTING_CU 0x08
+#define SENSE_AUTOMATIC_LOADER 0x04
+#define SENSE_TAPE_SYNC_MODE 0x02
+#define SENSE_TAPE_POSITIONING 0x01
+
+/* discipline functions */
+struct tape_request *tape_std_read_block(struct tape_device *, size_t);
+void tape_std_read_backward(struct tape_device *device,
+ struct tape_request *request);
+struct tape_request *tape_std_write_block(struct tape_device *, size_t);
+struct tape_request *tape_std_bread(struct tape_device *, struct request *);
+void tape_std_free_bread(struct tape_request *);
+void tape_std_check_locate(struct tape_device *, struct tape_request *);
+struct tape_request *tape_std_bwrite(struct request *,
+ struct tape_device *, int);
+
+/* Some non-mtop commands. */
+int tape_std_assign(struct tape_device *);
+int tape_std_unassign(struct tape_device *);
+int tape_std_read_block_id(struct tape_device *device, __u64 *id);
+int tape_std_display(struct tape_device *, struct display_struct *disp);
+int tape_std_terminate_write(struct tape_device *);
+
+/* Standard magnetic tape commands. */
+int tape_std_mtbsf(struct tape_device *, int);
+int tape_std_mtbsfm(struct tape_device *, int);
+int tape_std_mtbsr(struct tape_device *, int);
+int tape_std_mtcompression(struct tape_device *, int);
+int tape_std_mteom(struct tape_device *, int);
+int tape_std_mterase(struct tape_device *, int);
+int tape_std_mtfsf(struct tape_device *, int);
+int tape_std_mtfsfm(struct tape_device *, int);
+int tape_std_mtfsr(struct tape_device *, int);
+int tape_std_mtload(struct tape_device *, int);
+int tape_std_mtnop(struct tape_device *, int);
+int tape_std_mtoffl(struct tape_device *, int);
+int tape_std_mtreset(struct tape_device *, int);
+int tape_std_mtreten(struct tape_device *, int);
+int tape_std_mtrew(struct tape_device *, int);
+int tape_std_mtsetblk(struct tape_device *, int);
+int tape_std_mtunload(struct tape_device *, int);
+int tape_std_mtweof(struct tape_device *, int);
+
+/* Event handlers */
+void tape_std_default_handler(struct tape_device *);
+void tape_std_unexpect_uchk_handler(struct tape_device *);
+void tape_std_irq(struct tape_device *);
+void tape_std_process_eov(struct tape_device *);
+
+// the error recovery stuff:
+void tape_std_error_recovery(struct tape_device *);
+void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
+void tape_std_error_recovery_succeded(struct tape_device *);
+void tape_std_error_recovery_do_retry(struct tape_device *);
+void tape_std_error_recovery_read_opposite(struct tape_device *);
+void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
+
+#endif // _TAPE_STD_H
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
new file mode 100644
index 000000000000..7db5ebce7f0f
--- /dev/null
+++ b/drivers/s390/char/tty3270.c
@@ -0,0 +1,1836 @@
+/*
+ * drivers/s390/char/tty3270.c
+ * IBM/3270 Driver - tty functions.
+ *
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+
+#include "raw3270.h"
+#include "keyboard.h"
+
+#define TTY3270_CHAR_BUF_SIZE 256
+#define TTY3270_OUTPUT_BUFFER_SIZE 1024
+#define TTY3270_STRING_PAGES 5
+
+struct tty_driver *tty3270_driver;
+static int tty3270_max_index;
+
+struct raw3270_fn tty3270_fn;
+
+struct tty3270_cell {
+ unsigned char character;
+ unsigned char highlight;
+ unsigned char f_color;
+};
+
+struct tty3270_line {
+ struct tty3270_cell *cells;
+ int len;
+};
+
+#define ESCAPE_NPAR 8
+
+/*
+ * The main tty view data structure.
+ * FIXME:
+ * 1) describe line orientation & lines list concept against screen
+ * 2) describe conversion of screen to lines
+ * 3) describe line format.
+ */
+struct tty3270 {
+ struct raw3270_view view;
+ struct tty_struct *tty; /* Pointer to tty structure */
+ void **freemem_pages; /* Array of pages used for freemem. */
+ struct list_head freemem; /* List of free memory for strings. */
+
+ /* Output stuff. */
+ struct list_head lines; /* List of lines. */
+ struct list_head update; /* List of lines to update. */
+ unsigned char wcc; /* Write control character. */
+ int nr_lines; /* # lines in list. */
+ int nr_up; /* # lines up in history. */
+ unsigned long update_flags; /* Update indication bits. */
+ struct string *status; /* Lower right of display. */
+ struct raw3270_request *write; /* Single write request. */
+ struct timer_list timer; /* Output delay timer. */
+
+ /* Current tty screen. */
+ unsigned int cx, cy; /* Current output position. */
+ unsigned int highlight; /* Blink/reverse/underscore */
+ unsigned int f_color; /* Foreground color */
+ struct tty3270_line *screen;
+
+ /* Input stuff. */
+ struct string *prompt; /* Output string for input area. */
+ struct string *input; /* Input string for read request. */
+ struct raw3270_request *read; /* Single read request. */
+ struct raw3270_request *kreset; /* Single keyboard reset request. */
+ unsigned char inattr; /* Visible/invisible input. */
+ int throttle, attn; /* tty throttle/unthrottle. */
+ struct tasklet_struct readlet; /* Tasklet to issue read request. */
+ struct kbd_data *kbd; /* key_maps stuff. */
+
+ /* Escape sequence parsing. */
+ int esc_state, esc_ques, esc_npar;
+ int esc_par[ESCAPE_NPAR];
+ unsigned int saved_cx, saved_cy;
+ unsigned int saved_highlight, saved_f_color;
+
+ /* Command recalling. */
+ struct list_head rcl_lines; /* List of recallable lines. */
+ struct list_head *rcl_walk; /* Point in rcl_lines list. */
+ int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
+
+ /* Character array for put_char/flush_chars. */
+ unsigned int char_count;
+ char char_buf[TTY3270_CHAR_BUF_SIZE];
+};
+
+/* tty3270->update_flags. See tty3270_update for details. */
+#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
+#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
+#define TTY_UPDATE_INPUT 4 /* Update input line. */
+#define TTY_UPDATE_STATUS 8 /* Update status line. */
+#define TTY_UPDATE_ALL 15
+
+static void tty3270_update(struct tty3270 *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+void
+tty3270_set_timer(struct tty3270 *tp, int expires)
+{
+ if (expires == 0) {
+ if (timer_pending(&tp->timer) && del_timer(&tp->timer))
+ raw3270_put_view(&tp->view);
+ return;
+ }
+ if (timer_pending(&tp->timer) &&
+ mod_timer(&tp->timer, jiffies + expires))
+ return;
+ raw3270_get_view(&tp->view);
+ tp->timer.function = (void (*)(unsigned long)) tty3270_update;
+ tp->timer.data = (unsigned long) tp;
+ tp->timer.expires = jiffies + expires;
+ add_timer(&tp->timer);
+}
+
+/*
+ * The input line are the two last lines of the screen.
+ */
+static void
+tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
+{
+ struct string *line;
+ unsigned int off;
+
+ line = tp->prompt;
+ if (count != 0)
+ line->string[5] = TF_INMDT;
+ else
+ line->string[5] = tp->inattr;
+ if (count > tp->view.cols * 2 - 11)
+ count = tp->view.cols * 2 - 11;
+ memcpy(line->string + 6, input, count);
+ line->string[6 + count] = TO_IC;
+ /* Clear to end of input line. */
+ if (count < tp->view.cols * 2 - 11) {
+ line->string[7 + count] = TO_RA;
+ line->string[10 + count] = 0;
+ off = tp->view.cols * tp->view.rows - 9;
+ raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
+ line->len = 11 + count;
+ } else
+ line->len = 7 + count;
+ tp->update_flags |= TTY_UPDATE_INPUT;
+}
+
+static void
+tty3270_create_prompt(struct tty3270 *tp)
+{
+ static const unsigned char blueprint[] =
+ { TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
+ /* empty input string */
+ TO_IC, TO_RA, 0, 0, 0 };
+ struct string *line;
+ unsigned int offset;
+
+ line = alloc_string(&tp->freemem,
+ sizeof(blueprint) + tp->view.cols * 2 - 9);
+ tp->prompt = line;
+ tp->inattr = TF_INPUT;
+ /* Copy blueprint to status line */
+ memcpy(line->string, blueprint, sizeof(blueprint));
+ line->len = sizeof(blueprint);
+ /* Set output offsets. */
+ offset = tp->view.cols * (tp->view.rows - 2);
+ raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+ offset = tp->view.cols * tp->view.rows - 9;
+ raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
+
+ /* Allocate input string for reading. */
+ tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "Running"/"Holding" in the lower right corner of the screen.
+ */
+static void
+tty3270_update_status(struct tty3270 * tp)
+{
+ char *str;
+
+ str = (tp->nr_up != 0) ? "History" : "Running";
+ memcpy(tp->status->string + 8, str, 7);
+ codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
+ tp->update_flags |= TTY_UPDATE_STATUS;
+}
+
+static void
+tty3270_create_status(struct tty3270 * tp)
+{
+ static const unsigned char blueprint[] =
+ { TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
+ 0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
+ TAC_RESET };
+ struct string *line;
+ unsigned int offset;
+
+ line = alloc_string(&tp->freemem,sizeof(blueprint));
+ tp->status = line;
+ /* Copy blueprint to status line */
+ memcpy(line->string, blueprint, sizeof(blueprint));
+ /* Set address to start of status string (= last 9 characters). */
+ offset = tp->view.cols * tp->view.rows - 9;
+ raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a tty string.
+ * (TO_SBA offset at the start and TO_RA offset at the end of the string)
+ */
+static void
+tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
+{
+ unsigned char *cp;
+
+ raw3270_buffer_address(tp->view.dev, line->string + 1,
+ tp->view.cols * nr);
+ cp = line->string + line->len - 4;
+ if (*cp == TO_RA)
+ raw3270_buffer_address(tp->view.dev, cp + 1,
+ tp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+tty3270_rebuild_update(struct tty3270 *tp)
+{
+ struct string *s, *n;
+ int line, nr_up;
+
+ /*
+ * Throw away update list and create a new one,
+ * containing all lines that will fit on the screen.
+ */
+ list_for_each_entry_safe(s, n, &tp->update, update)
+ list_del_init(&s->update);
+ line = tp->view.rows - 3;
+ nr_up = tp->nr_up;
+ list_for_each_entry_reverse(s, &tp->lines, list) {
+ if (nr_up > 0) {
+ nr_up--;
+ continue;
+ }
+ tty3270_update_string(tp, s, line);
+ list_add(&s->update, &tp->update);
+ if (--line < 0)
+ break;
+ }
+ tp->update_flags |= TTY_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. If there is not enough room in
+ * freemem, free strings until there is room.
+ */
+static struct string *
+tty3270_alloc_string(struct tty3270 *tp, size_t size)
+{
+ struct string *s, *n;
+
+ s = alloc_string(&tp->freemem, size);
+ if (s)
+ return s;
+ list_for_each_entry_safe(s, n, &tp->lines, list) {
+ BUG_ON(tp->nr_lines <= tp->view.rows - 2);
+ list_del(&s->list);
+ if (!list_empty(&s->update))
+ list_del(&s->update);
+ tp->nr_lines--;
+ if (free_string(&tp->freemem, s) >= size)
+ break;
+ }
+ s = alloc_string(&tp->freemem, size);
+ BUG_ON(!s);
+ if (tp->nr_up != 0 &&
+ tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
+ tp->nr_up = tp->nr_lines - tp->view.rows + 2;
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ }
+ return s;
+}
+
+/*
+ * Add an empty line to the list.
+ */
+static void
+tty3270_blank_line(struct tty3270 *tp)
+{
+ static const unsigned char blueprint[] =
+ { TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
+ TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
+ struct string *s;
+
+ s = tty3270_alloc_string(tp, sizeof(blueprint));
+ memcpy(s->string, blueprint, sizeof(blueprint));
+ s->len = sizeof(blueprint);
+ list_add_tail(&s->list, &tp->lines);
+ tp->nr_lines++;
+ if (tp->nr_up != 0)
+ tp->nr_up++;
+}
+
+/*
+ * Write request completion callback.
+ */
+static void
+tty3270_write_callback(struct raw3270_request *rq, void *data)
+{
+ struct tty3270 *tp;
+
+ tp = (struct tty3270 *) rq->view;
+ if (rq->rc != 0) {
+ /* Write wasn't successfull. Refresh all. */
+ tty3270_rebuild_update(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+ }
+ raw3270_request_reset(rq);
+ xchg(&tp->write, rq);
+}
+
+/*
+ * Update 3270 display.
+ */
+static void
+tty3270_update(struct tty3270 *tp)
+{
+ static char invalid_sba[2] = { 0xff, 0xff };
+ struct raw3270_request *wrq;
+ unsigned long updated;
+ struct string *s, *n;
+ char *sba, *str;
+ int rc, len;
+
+ wrq = xchg(&tp->write, 0);
+ if (!wrq) {
+ tty3270_set_timer(tp, 1);
+ return;
+ }
+
+ spin_lock(&tp->view.lock);
+ updated = 0;
+ if (tp->update_flags & TTY_UPDATE_ERASE) {
+ /* Use erase write alternate to erase display. */
+ raw3270_request_set_cmd(wrq, TC_EWRITEA);
+ updated |= TTY_UPDATE_ERASE;
+ } else
+ raw3270_request_set_cmd(wrq, TC_WRITE);
+
+ raw3270_request_add_data(wrq, &tp->wcc, 1);
+ tp->wcc = TW_NONE;
+
+ /*
+ * Update status line.
+ */
+ if (tp->update_flags & TTY_UPDATE_STATUS)
+ if (raw3270_request_add_data(wrq, tp->status->string,
+ tp->status->len) == 0)
+ updated |= TTY_UPDATE_STATUS;
+
+ /*
+ * Write input line.
+ */
+ if (tp->update_flags & TTY_UPDATE_INPUT)
+ if (raw3270_request_add_data(wrq, tp->prompt->string,
+ tp->prompt->len) == 0)
+ updated |= TTY_UPDATE_INPUT;
+
+ sba = invalid_sba;
+
+ if (tp->update_flags & TTY_UPDATE_LIST) {
+ /* Write strings in the update list to the screen. */
+ list_for_each_entry_safe(s, n, &tp->update, update) {
+ str = s->string;
+ len = s->len;
+ /*
+ * Skip TO_SBA at the start of the string if the
+ * last output position matches the start address
+ * of this line.
+ */
+ if (s->string[1] == sba[0] && s->string[2] == sba[1])
+ str += 3, len -= 3;
+ if (raw3270_request_add_data(wrq, str, len) != 0)
+ break;
+ list_del_init(&s->update);
+ sba = s->string + s->len - 3;
+ }
+ if (list_empty(&tp->update))
+ updated |= TTY_UPDATE_LIST;
+ }
+ wrq->callback = tty3270_write_callback;
+ rc = raw3270_start(&tp->view, wrq);
+ if (rc == 0) {
+ tp->update_flags &= ~updated;
+ if (tp->update_flags)
+ tty3270_set_timer(tp, 1);
+ } else {
+ raw3270_request_reset(wrq);
+ xchg(&tp->write, wrq);
+ }
+ spin_unlock(&tp->view.lock);
+ raw3270_put_view(&tp->view);
+}
+
+/*
+ * Command recalling.
+ */
+static void
+tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
+{
+ struct string *s;
+
+ tp->rcl_walk = 0;
+ if (len <= 0)
+ return;
+ if (tp->rcl_nr >= tp->rcl_max) {
+ s = list_entry(tp->rcl_lines.next, struct string, list);
+ list_del(&s->list);
+ free_string(&tp->freemem, s);
+ tp->rcl_nr--;
+ }
+ s = tty3270_alloc_string(tp, len);
+ memcpy(s->string, input, len);
+ list_add_tail(&s->list, &tp->rcl_lines);
+ tp->rcl_nr++;
+}
+
+static void
+tty3270_rcl_backward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp;
+ struct string *s;
+
+ tp = kbd->tty->driver_data;
+ spin_lock_bh(&tp->view.lock);
+ if (tp->inattr == TF_INPUT) {
+ if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
+ tp->rcl_walk = tp->rcl_walk->prev;
+ else if (!list_empty(&tp->rcl_lines))
+ tp->rcl_walk = tp->rcl_lines.prev;
+ s = tp->rcl_walk ?
+ list_entry(tp->rcl_walk, struct string, list) : 0;
+ if (tp->rcl_walk) {
+ s = list_entry(tp->rcl_walk, struct string, list);
+ tty3270_update_prompt(tp, s->string, s->len);
+ } else
+ tty3270_update_prompt(tp, 0, 0);
+ tty3270_set_timer(tp, 1);
+ }
+ spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Deactivate tty view.
+ */
+static void
+tty3270_exit_tty(struct kbd_data *kbd)
+{
+ struct tty3270 *tp;
+
+ tp = kbd->tty->driver_data;
+ raw3270_deactivate_view(&tp->view);
+}
+
+/*
+ * Scroll forward in history.
+ */
+static void
+tty3270_scroll_forward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp;
+ int nr_up;
+
+ tp = kbd->tty->driver_data;
+ spin_lock_bh(&tp->view.lock);
+ nr_up = tp->nr_up - tp->view.rows + 2;
+ if (nr_up < 0)
+ nr_up = 0;
+ if (nr_up != tp->nr_up) {
+ tp->nr_up = nr_up;
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ tty3270_set_timer(tp, 1);
+ }
+ spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Scroll backward in history.
+ */
+static void
+tty3270_scroll_backward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp;
+ int nr_up;
+
+ tp = kbd->tty->driver_data;
+ spin_lock_bh(&tp->view.lock);
+ nr_up = tp->nr_up + tp->view.rows - 2;
+ if (nr_up + tp->view.rows - 2 > tp->nr_lines)
+ nr_up = tp->nr_lines - tp->view.rows + 2;
+ if (nr_up != tp->nr_up) {
+ tp->nr_up = nr_up;
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ tty3270_set_timer(tp, 1);
+ }
+ spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Pass input line to tty.
+ */
+static void
+tty3270_read_tasklet(struct raw3270_request *rrq)
+{
+ static char kreset_data = TW_KR;
+ struct tty3270 *tp;
+ char *input;
+ int len;
+
+ tp = (struct tty3270 *) rrq->view;
+ spin_lock_bh(&tp->view.lock);
+ /*
+ * Two AID keys are special: For 0x7d (enter) the input line
+ * has to be emitted to the tty and for 0x6d the screen
+ * needs to be redrawn.
+ */
+ input = 0;
+ len = 0;
+ if (tp->input->string[0] == 0x7d) {
+ /* Enter: write input to tty. */
+ input = tp->input->string + 6;
+ len = tp->input->len - 6 - rrq->rescnt;
+ if (tp->inattr != TF_INPUTN)
+ tty3270_rcl_add(tp, input, len);
+ if (tp->nr_up > 0) {
+ tp->nr_up = 0;
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ }
+ /* Clear input area. */
+ tty3270_update_prompt(tp, 0, 0);
+ tty3270_set_timer(tp, 1);
+ } else if (tp->input->string[0] == 0x6d) {
+ /* Display has been cleared. Redraw. */
+ tty3270_rebuild_update(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+ }
+ spin_unlock_bh(&tp->view.lock);
+
+ /* Start keyboard reset command. */
+ raw3270_request_reset(tp->kreset);
+ raw3270_request_set_cmd(tp->kreset, TC_WRITE);
+ raw3270_request_add_data(tp->kreset, &kreset_data, 1);
+ raw3270_start(&tp->view, tp->kreset);
+
+ /* Emit input string. */
+ if (tp->tty) {
+ while (len-- > 0)
+ kbd_keycode(tp->kbd, *input++);
+ /* Emit keycode for AID byte. */
+ kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
+ }
+
+ raw3270_request_reset(rrq);
+ xchg(&tp->read, rrq);
+ raw3270_put_view(&tp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+tty3270_read_callback(struct raw3270_request *rq, void *data)
+{
+ raw3270_get_view(rq->view);
+ /* Schedule tasklet to pass input to tty. */
+ tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Call with device lock.
+ */
+static void
+tty3270_issue_read(struct tty3270 *tp, int lock)
+{
+ struct raw3270_request *rrq;
+ int rc;
+
+ rrq = xchg(&tp->read, 0);
+ if (!rrq)
+ /* Read already scheduled. */
+ return;
+ rrq->callback = tty3270_read_callback;
+ rrq->callback_data = tp;
+ raw3270_request_set_cmd(rrq, TC_READMOD);
+ raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
+ /* Issue the read modified request. */
+ if (lock) {
+ rc = raw3270_start(&tp->view, rrq);
+ } else
+ rc = raw3270_start_irq(&tp->view, rrq);
+ if (rc) {
+ raw3270_request_reset(rrq);
+ xchg(&tp->read, rrq);
+ }
+}
+
+/*
+ * Switch to the tty view.
+ */
+static int
+tty3270_activate(struct raw3270_view *view)
+{
+ struct tty3270 *tp;
+ unsigned long flags;
+
+ tp = (struct tty3270 *) view;
+ spin_lock_irqsave(&tp->view.lock, flags);
+ tp->nr_up = 0;
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+ spin_unlock_irqrestore(&tp->view.lock, flags);
+ start_tty(tp->tty);
+ return 0;
+}
+
+static void
+tty3270_deactivate(struct raw3270_view *view)
+{
+ struct tty3270 *tp;
+
+ tp = (struct tty3270 *) view;
+ if (tp && tp->tty)
+ stop_tty(tp->tty);
+}
+
+static int
+tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+{
+ /* Handle ATTN. Schedule tasklet to read aid. */
+ if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
+ if (!tp->throttle)
+ tty3270_issue_read(tp, 0);
+ else
+ tp->attn = 1;
+ }
+
+ if (rq) {
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
+ rq->rc = -EIO;
+ else
+ /* Normal end. Copy residual count. */
+ rq->rescnt = irb->scsw.count;
+ }
+ return RAW3270_IO_DONE;
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct tty3270 *
+tty3270_alloc_view(void)
+{
+ struct tty3270 *tp;
+ int pages;
+
+ tp = kmalloc(sizeof(struct tty3270),GFP_KERNEL);
+ if (!tp)
+ goto out_err;
+ memset(tp, 0, sizeof(struct tty3270));
+ tp->freemem_pages =
+ kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
+ if (!tp->freemem_pages)
+ goto out_tp;
+ INIT_LIST_HEAD(&tp->freemem);
+ init_timer(&tp->timer);
+ for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
+ tp->freemem_pages[pages] = (void *)
+ __get_free_pages(GFP_KERNEL|GFP_DMA, 0);
+ if (!tp->freemem_pages[pages])
+ goto out_pages;
+ add_string_memory(&tp->freemem,
+ tp->freemem_pages[pages], PAGE_SIZE);
+ }
+ tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
+ if (!tp->write)
+ goto out_pages;
+ tp->read = raw3270_request_alloc(0);
+ if (!tp->read)
+ goto out_write;
+ tp->kreset = raw3270_request_alloc(1);
+ if (!tp->kreset)
+ goto out_read;
+ tp->kbd = kbd_alloc();
+ if (!tp->kbd)
+ goto out_reset;
+ return tp;
+
+out_reset:
+ raw3270_request_free(tp->kreset);
+out_read:
+ raw3270_request_free(tp->read);
+out_write:
+ raw3270_request_free(tp->write);
+out_pages:
+ while (pages--)
+ free_pages((unsigned long) tp->freemem_pages[pages], 0);
+ kfree(tp->freemem_pages);
+out_tp:
+ kfree(tp);
+out_err:
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void
+tty3270_free_view(struct tty3270 *tp)
+{
+ int pages;
+
+ kbd_free(tp->kbd);
+ raw3270_request_free(tp->kreset);
+ raw3270_request_free(tp->read);
+ raw3270_request_free(tp->write);
+ for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
+ free_pages((unsigned long) tp->freemem_pages[pages], 0);
+ kfree(tp->freemem_pages);
+ kfree(tp);
+}
+
+/*
+ * Allocate tty3270 screen.
+ */
+static int
+tty3270_alloc_screen(struct tty3270 *tp)
+{
+ unsigned long size;
+ int lines;
+
+ size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
+ tp->screen = kmalloc(size, GFP_KERNEL);
+ if (!tp->screen)
+ goto out_err;
+ memset(tp->screen, 0, size);
+ for (lines = 0; lines < tp->view.rows - 2; lines++) {
+ size = sizeof(struct tty3270_cell) * tp->view.cols;
+ tp->screen[lines].cells = kmalloc(size, GFP_KERNEL);
+ if (!tp->screen[lines].cells)
+ goto out_screen;
+ memset(tp->screen[lines].cells, 0, size);
+ }
+ return 0;
+out_screen:
+ while (lines--)
+ kfree(tp->screen[lines].cells);
+ kfree(tp->screen);
+out_err:
+ return -ENOMEM;
+}
+
+/*
+ * Free tty3270 screen.
+ */
+static void
+tty3270_free_screen(struct tty3270 *tp)
+{
+ int lines;
+
+ for (lines = 0; lines < tp->view.rows - 2; lines++)
+ kfree(tp->screen[lines].cells);
+ kfree(tp->screen);
+}
+
+/*
+ * Unlink tty3270 data structure from tty.
+ */
+static void
+tty3270_release(struct raw3270_view *view)
+{
+ struct tty3270 *tp;
+ struct tty_struct *tty;
+
+ tp = (struct tty3270 *) view;
+ tty = tp->tty;
+ if (tty) {
+ tty->driver_data = 0;
+ tp->tty = tp->kbd->tty = 0;
+ tty_hangup(tty);
+ raw3270_put_view(&tp->view);
+ }
+}
+
+/*
+ * Free tty3270 data structure
+ */
+static void
+tty3270_free(struct raw3270_view *view)
+{
+ tty3270_free_screen((struct tty3270 *) view);
+ tty3270_free_view((struct tty3270 *) view);
+}
+
+/*
+ * Delayed freeing of tty3270 views.
+ */
+static void
+tty3270_del_views(void)
+{
+ struct tty3270 *tp;
+ int i;
+
+ for (i = 0; i < tty3270_max_index; i++) {
+ tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, i);
+ if (!IS_ERR(tp))
+ raw3270_del_view(&tp->view);
+ }
+}
+
+struct raw3270_fn tty3270_fn = {
+ .activate = tty3270_activate,
+ .deactivate = tty3270_deactivate,
+ .intv = (void *) tty3270_irq,
+ .release = tty3270_release,
+ .free = tty3270_free
+};
+
+/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file * filp)
+{
+ struct tty3270 *tp;
+ int i, rc;
+
+ if (tty->count > 1)
+ return 0;
+ /* Check if the tty3270 is already there. */
+ tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, tty->index);
+ if (!IS_ERR(tp)) {
+ tty->driver_data = tp;
+ tty->winsize.ws_row = tp->view.rows - 2;
+ tty->winsize.ws_col = tp->view.cols;
+ tty->low_latency = 0;
+ tp->tty = tty;
+ tp->kbd->tty = tty;
+ tp->inattr = TF_INPUT;
+ return 0;
+ }
+ if (tty3270_max_index < tty->index + 1)
+ tty3270_max_index = tty->index + 1;
+
+ /* Quick exit if there is no device for tty->index. */
+ if (PTR_ERR(tp) == -ENODEV)
+ return -ENODEV;
+
+ /* Allocate tty3270 structure on first open. */
+ tp = tty3270_alloc_view();
+ if (IS_ERR(tp))
+ return PTR_ERR(tp);
+
+ INIT_LIST_HEAD(&tp->lines);
+ INIT_LIST_HEAD(&tp->update);
+ INIT_LIST_HEAD(&tp->rcl_lines);
+ tp->rcl_max = 20;
+ init_timer(&tp->timer);
+ tasklet_init(&tp->readlet,
+ (void (*)(unsigned long)) tty3270_read_tasklet,
+ (unsigned long) tp->read);
+
+ rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
+ if (rc) {
+ tty3270_free_view(tp);
+ return rc;
+ }
+
+ rc = tty3270_alloc_screen(tp);
+ if (rc) {
+ raw3270_del_view(&tp->view);
+ raw3270_put_view(&tp->view);
+ return rc;
+ }
+
+ tp->tty = tty;
+ tty->low_latency = 0;
+ tty->driver_data = tp;
+ tty->winsize.ws_row = tp->view.rows - 2;
+ tty->winsize.ws_col = tp->view.cols;
+
+ tty3270_create_prompt(tp);
+ tty3270_create_status(tp);
+ tty3270_update_status(tp);
+
+ /* Create blank line for every line in the tty output area. */
+ for (i = 0; i < tp->view.rows - 2; i++)
+ tty3270_blank_line(tp);
+
+ tp->kbd->tty = tty;
+ tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
+ tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
+ tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
+ tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
+ kbd_ascebc(tp->kbd, tp->view.ascebc);
+
+ raw3270_activate_view(&tp->view);
+ return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void
+tty3270_close(struct tty_struct *tty, struct file * filp)
+{
+ struct tty3270 *tp;
+
+ if (tty->count > 1)
+ return;
+ tp = (struct tty3270 *) tty->driver_data;
+ if (tp) {
+ tty->driver_data = 0;
+ tp->tty = tp->kbd->tty = 0;
+ raw3270_put_view(&tp->view);
+ }
+}
+
+/*
+ * We always have room.
+ */
+static int
+tty3270_write_room(struct tty_struct *tty)
+{
+ return INT_MAX;
+}
+
+/*
+ * Insert character into the screen at the current position with the
+ * current color and highlight. This function does NOT do cursor movement.
+ */
+static void
+tty3270_put_character(struct tty3270 *tp, char ch)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+
+ line = tp->screen + tp->cy;
+ if (line->len <= tp->cx) {
+ while (line->len < tp->cx) {
+ cell = line->cells + line->len;
+ cell->character = tp->view.ascebc[' '];
+ cell->highlight = tp->highlight;
+ cell->f_color = tp->f_color;
+ line->len++;
+ }
+ line->len++;
+ }
+ cell = line->cells + tp->cx;
+ cell->character = tp->view.ascebc[(unsigned int) ch];
+ cell->highlight = tp->highlight;
+ cell->f_color = tp->f_color;
+}
+
+/*
+ * Convert a tty3270_line to a 3270 data fragment usable for output.
+ */
+static void
+tty3270_convert_line(struct tty3270 *tp, int line_nr)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+ struct string *s, *n;
+ unsigned char highlight;
+ unsigned char f_color;
+ char *cp;
+ int flen, i;
+
+ /* Determine how long the fragment will be. */
+ flen = 3; /* Prefix (TO_SBA). */
+ line = tp->screen + line_nr;
+ flen += line->len;
+ highlight = TAX_RESET;
+ f_color = TAC_RESET;
+ for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+ if (cell->highlight != highlight) {
+ flen += 3; /* TO_SA to switch highlight. */
+ highlight = cell->highlight;
+ }
+ if (cell->f_color != f_color) {
+ flen += 3; /* TO_SA to switch color. */
+ f_color = cell->f_color;
+ }
+ }
+ if (highlight != TAX_RESET)
+ flen += 3; /* TO_SA to reset hightlight. */
+ if (f_color != TAC_RESET)
+ flen += 3; /* TO_SA to reset color. */
+ if (line->len < tp->view.cols)
+ flen += 4; /* Postfix (TO_RA). */
+
+ /* Find the line in the list. */
+ i = tp->view.rows - 2 - line_nr;
+ list_for_each_entry_reverse(s, &tp->lines, list)
+ if (--i <= 0)
+ break;
+ /*
+ * Check if the line needs to get reallocated.
+ */
+ if (s->len != flen) {
+ /* Reallocate string. */
+ n = tty3270_alloc_string(tp, flen);
+ list_add(&n->list, &s->list);
+ list_del_init(&s->list);
+ if (!list_empty(&s->update))
+ list_del_init(&s->update);
+ free_string(&tp->freemem, s);
+ s = n;
+ }
+
+ /* Write 3270 data fragment. */
+ cp = s->string;
+ *cp++ = TO_SBA;
+ *cp++ = 0;
+ *cp++ = 0;
+
+ highlight = TAX_RESET;
+ f_color = TAC_RESET;
+ for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+ if (cell->highlight != highlight) {
+ *cp++ = TO_SA;
+ *cp++ = TAT_EXTHI;
+ *cp++ = cell->highlight;
+ highlight = cell->highlight;
+ }
+ if (cell->f_color != f_color) {
+ *cp++ = TO_SA;
+ *cp++ = TAT_COLOR;
+ *cp++ = cell->f_color;
+ f_color = cell->f_color;
+ }
+ *cp++ = cell->character;
+ }
+ if (highlight != TAX_RESET) {
+ *cp++ = TO_SA;
+ *cp++ = TAT_EXTHI;
+ *cp++ = TAX_RESET;
+ }
+ if (f_color != TAC_RESET) {
+ *cp++ = TO_SA;
+ *cp++ = TAT_COLOR;
+ *cp++ = TAC_RESET;
+ }
+ if (line->len < tp->view.cols) {
+ *cp++ = TO_RA;
+ *cp++ = 0;
+ *cp++ = 0;
+ *cp++ = 0;
+ }
+
+ if (tp->nr_up + line_nr < tp->view.rows - 2) {
+ /* Line is currently visible on screen. */
+ tty3270_update_string(tp, s, line_nr);
+ /* Add line to update list. */
+ if (list_empty(&s->update)) {
+ list_add_tail(&s->update, &tp->update);
+ tp->update_flags |= TTY_UPDATE_LIST;
+ }
+ }
+}
+
+/*
+ * Do carriage return.
+ */
+static void
+tty3270_cr(struct tty3270 *tp)
+{
+ tp->cx = 0;
+}
+
+/*
+ * Do line feed.
+ */
+static void
+tty3270_lf(struct tty3270 *tp)
+{
+ struct tty3270_line temp;
+ int i;
+
+ tty3270_convert_line(tp, tp->cy);
+ if (tp->cy < tp->view.rows - 3) {
+ tp->cy++;
+ return;
+ }
+ /* Last line just filled up. Add new, blank line. */
+ tty3270_blank_line(tp);
+ temp = tp->screen[0];
+ temp.len = 0;
+ for (i = 0; i < tp->view.rows - 3; i++)
+ tp->screen[i] = tp->screen[i+1];
+ tp->screen[tp->view.rows - 3] = temp;
+ tty3270_rebuild_update(tp);
+}
+
+static void
+tty3270_ri(struct tty3270 *tp)
+{
+ if (tp->cy > 0) {
+ tty3270_convert_line(tp, tp->cy);
+ tp->cy--;
+ }
+}
+
+/*
+ * Insert characters at current position.
+ */
+static void
+tty3270_insert_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ int k;
+
+ line = tp->screen + tp->cy;
+ while (line->len < tp->cx) {
+ line->cells[line->len].character = tp->view.ascebc[' '];
+ line->cells[line->len].highlight = TAX_RESET;
+ line->cells[line->len].f_color = TAC_RESET;
+ line->len++;
+ }
+ if (n > tp->view.cols - tp->cx)
+ n = tp->view.cols - tp->cx;
+ k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
+ while (k--)
+ line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
+ line->len += n;
+ if (line->len > tp->view.cols)
+ line->len = tp->view.cols;
+ while (n-- > 0) {
+ line->cells[tp->cx + n].character = tp->view.ascebc[' '];
+ line->cells[tp->cx + n].highlight = tp->highlight;
+ line->cells[tp->cx + n].f_color = tp->f_color;
+ }
+}
+
+/*
+ * Delete characters at current position.
+ */
+static void
+tty3270_delete_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ int i;
+
+ line = tp->screen + tp->cy;
+ if (line->len <= tp->cx)
+ return;
+ if (line->len - tp->cx <= n) {
+ line->len = tp->cx;
+ return;
+ }
+ for (i = tp->cx; i + n < line->len; i++)
+ line->cells[i] = line->cells[i + n];
+ line->len -= n;
+}
+
+/*
+ * Erase characters at current position.
+ */
+static void
+tty3270_erase_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+
+ line = tp->screen + tp->cy;
+ while (line->len > tp->cx && n-- > 0) {
+ cell = line->cells + tp->cx++;
+ cell->character = ' ';
+ cell->highlight = TAX_RESET;
+ cell->f_color = TAC_RESET;
+ }
+ tp->cx += n;
+ tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
+}
+
+/*
+ * Erase line, 3 different cases:
+ * Esc [ 0 K Erase from current position to end of line inclusive
+ * Esc [ 1 K Erase from beginning of line to current position inclusive
+ * Esc [ 2 K Erase entire line (without moving cursor)
+ */
+static void
+tty3270_erase_line(struct tty3270 *tp, int mode)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+ int i;
+
+ line = tp->screen + tp->cy;
+ if (mode == 0)
+ line->len = tp->cx;
+ else if (mode == 1) {
+ for (i = 0; i < tp->cx; i++) {
+ cell = line->cells + i;
+ cell->character = ' ';
+ cell->highlight = TAX_RESET;
+ cell->f_color = TAC_RESET;
+ }
+ if (line->len <= tp->cx)
+ line->len = tp->cx + 1;
+ } else if (mode == 2)
+ line->len = 0;
+ tty3270_convert_line(tp, tp->cy);
+}
+
+/*
+ * Erase display, 3 different cases:
+ * Esc [ 0 J Erase from current position to bottom of screen inclusive
+ * Esc [ 1 J Erase from top of screen to current position inclusive
+ * Esc [ 2 J Erase entire screen (without moving the cursor)
+ */
+static void
+tty3270_erase_display(struct tty3270 *tp, int mode)
+{
+ int i;
+
+ if (mode == 0) {
+ tty3270_erase_line(tp, 0);
+ for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
+ tp->screen[i].len = 0;
+ tty3270_convert_line(tp, i);
+ }
+ } else if (mode == 1) {
+ for (i = 0; i < tp->cy; i++) {
+ tp->screen[i].len = 0;
+ tty3270_convert_line(tp, i);
+ }
+ tty3270_erase_line(tp, 1);
+ } else if (mode == 2) {
+ for (i = 0; i < tp->view.rows - 2; i++) {
+ tp->screen[i].len = 0;
+ tty3270_convert_line(tp, i);
+ }
+ }
+ tty3270_rebuild_update(tp);
+}
+
+/*
+ * Set attributes found in an escape sequence.
+ * Esc [ <attr> ; <attr> ; ... m
+ */
+static void
+tty3270_set_attributes(struct tty3270 *tp)
+{
+ static unsigned char f_colors[] = {
+ TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
+ TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
+ };
+ int i, attr;
+
+ for (i = 0; i <= tp->esc_npar; i++) {
+ attr = tp->esc_par[i];
+ switch (attr) {
+ case 0: /* Reset */
+ tp->highlight = TAX_RESET;
+ tp->f_color = TAC_RESET;
+ break;
+ /* Highlight. */
+ case 4: /* Start underlining. */
+ tp->highlight = TAX_UNDER;
+ break;
+ case 5: /* Start blink. */
+ tp->highlight = TAX_BLINK;
+ break;
+ case 7: /* Start reverse. */
+ tp->highlight = TAX_REVER;
+ break;
+ case 24: /* End underlining */
+ if (tp->highlight == TAX_UNDER)
+ tp->highlight = TAX_RESET;
+ break;
+ case 25: /* End blink. */
+ if (tp->highlight == TAX_BLINK)
+ tp->highlight = TAX_RESET;
+ break;
+ case 27: /* End reverse. */
+ if (tp->highlight == TAX_REVER)
+ tp->highlight = TAX_RESET;
+ break;
+ /* Foreground color. */
+ case 30: /* Black */
+ case 31: /* Red */
+ case 32: /* Green */
+ case 33: /* Yellow */
+ case 34: /* Blue */
+ case 35: /* Magenta */
+ case 36: /* Cyan */
+ case 37: /* White */
+ case 39: /* Black */
+ tp->f_color = f_colors[attr - 30];
+ break;
+ }
+ }
+}
+
+static inline int
+tty3270_getpar(struct tty3270 *tp, int ix)
+{
+ return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
+}
+
+static void
+tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
+{
+ tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx));
+ cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy));
+ if (cy != tp->cy) {
+ tty3270_convert_line(tp, tp->cy);
+ tp->cy = cy;
+ }
+}
+
+/*
+ * Process escape sequences. Known sequences:
+ * Esc 7 Save Cursor Position
+ * Esc 8 Restore Cursor Position
+ * Esc [ Pn ; Pn ; .. m Set attributes
+ * Esc [ Pn ; Pn H Cursor Position
+ * Esc [ Pn ; Pn f Cursor Position
+ * Esc [ Pn A Cursor Up
+ * Esc [ Pn B Cursor Down
+ * Esc [ Pn C Cursor Forward
+ * Esc [ Pn D Cursor Backward
+ * Esc [ Pn G Cursor Horizontal Absolute
+ * Esc [ Pn X Erase Characters
+ * Esc [ Ps J Erase in Display
+ * Esc [ Ps K Erase in Line
+ * // FIXME: add all the new ones.
+ *
+ * Pn is a numeric parameter, a string of zero or more decimal digits.
+ * Ps is a selective parameter.
+ */
+static void
+tty3270_escape_sequence(struct tty3270 *tp, char ch)
+{
+ enum { ESnormal, ESesc, ESsquare, ESgetpars };
+
+ if (tp->esc_state == ESnormal) {
+ if (ch == 0x1b)
+ /* Starting new escape sequence. */
+ tp->esc_state = ESesc;
+ return;
+ }
+ if (tp->esc_state == ESesc) {
+ tp->esc_state = ESnormal;
+ switch (ch) {
+ case '[':
+ tp->esc_state = ESsquare;
+ break;
+ case 'E':
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ case 'M':
+ tty3270_ri(tp);
+ break;
+ case 'D':
+ tty3270_lf(tp);
+ break;
+ case 'Z': /* Respond ID. */
+ kbd_puts_queue(tp->tty, "\033[?6c");
+ break;
+ case '7': /* Save cursor position. */
+ tp->saved_cx = tp->cx;
+ tp->saved_cy = tp->cy;
+ tp->saved_highlight = tp->highlight;
+ tp->saved_f_color = tp->f_color;
+ break;
+ case '8': /* Restore cursor position. */
+ tty3270_convert_line(tp, tp->cy);
+ tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+ tp->highlight = tp->saved_highlight;
+ tp->f_color = tp->saved_f_color;
+ break;
+ case 'c': /* Reset terminal. */
+ tp->cx = tp->saved_cx = 0;
+ tp->cy = tp->saved_cy = 0;
+ tp->highlight = tp->saved_highlight = TAX_RESET;
+ tp->f_color = tp->saved_f_color = TAC_RESET;
+ tty3270_erase_display(tp, 2);
+ break;
+ }
+ return;
+ }
+ if (tp->esc_state == ESsquare) {
+ tp->esc_state = ESgetpars;
+ memset(tp->esc_par, 0, sizeof(tp->esc_par));
+ tp->esc_npar = 0;
+ tp->esc_ques = (ch == '?');
+ if (tp->esc_ques)
+ return;
+ }
+ if (tp->esc_state == ESgetpars) {
+ if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
+ tp->esc_npar++;
+ return;
+ }
+ if (ch >= '0' && ch <= '9') {
+ tp->esc_par[tp->esc_npar] *= 10;
+ tp->esc_par[tp->esc_npar] += ch - '0';
+ return;
+ }
+ }
+ tp->esc_state = ESnormal;
+ if (ch == 'n' && !tp->esc_ques) {
+ if (tp->esc_par[0] == 5) /* Status report. */
+ kbd_puts_queue(tp->tty, "\033[0n");
+ else if (tp->esc_par[0] == 6) { /* Cursor report. */
+ char buf[40];
+ sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
+ kbd_puts_queue(tp->tty, buf);
+ }
+ return;
+ }
+ if (tp->esc_ques)
+ return;
+ switch (ch) {
+ case 'm':
+ tty3270_set_attributes(tp);
+ break;
+ case 'H': /* Set cursor position. */
+ case 'f':
+ tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
+ tty3270_getpar(tp, 0) - 1);
+ break;
+ case 'd': /* Set y position. */
+ tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
+ break;
+ case 'A': /* Cursor up. */
+ case 'F':
+ tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
+ break;
+ case 'B': /* Cursor down. */
+ case 'e':
+ case 'E':
+ tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
+ break;
+ case 'C': /* Cursor forward. */
+ case 'a':
+ tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'D': /* Cursor backward. */
+ tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'G': /* Set x position. */
+ case '`':
+ tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'X': /* Erase Characters. */
+ tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case 'J': /* Erase display. */
+ tty3270_erase_display(tp, tp->esc_par[0]);
+ break;
+ case 'K': /* Erase line. */
+ tty3270_erase_line(tp, tp->esc_par[0]);
+ break;
+ case 'P': /* Delete characters. */
+ tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case '@': /* Insert characters. */
+ tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case 's': /* Save cursor position. */
+ tp->saved_cx = tp->cx;
+ tp->saved_cy = tp->cy;
+ tp->saved_highlight = tp->highlight;
+ tp->saved_f_color = tp->f_color;
+ break;
+ case 'u': /* Restore cursor position. */
+ tty3270_convert_line(tp, tp->cy);
+ tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+ tp->highlight = tp->saved_highlight;
+ tp->f_color = tp->saved_f_color;
+ break;
+ }
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static void
+tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
+{
+ int i_msg, i;
+
+ spin_lock_bh(&tp->view.lock);
+ for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
+ if (tp->esc_state != 0) {
+ /* Continue escape sequence. */
+ tty3270_escape_sequence(tp, buf[i_msg]);
+ continue;
+ }
+
+ switch (buf[i_msg]) {
+ case 0x07: /* '\a' -- Alarm */
+ tp->wcc |= TW_PLUSALARM;
+ break;
+ case 0x08: /* Backspace. */
+ if (tp->cx > 0) {
+ tp->cx--;
+ tty3270_put_character(tp, ' ');
+ }
+ break;
+ case 0x09: /* '\t' -- Tabulate */
+ for (i = tp->cx % 8; i < 8; i++) {
+ if (tp->cx >= tp->view.cols) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ }
+ tty3270_put_character(tp, ' ');
+ tp->cx++;
+ }
+ break;
+ case 0x0a: /* '\n' -- New Line */
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ case 0x0c: /* '\f' -- Form Feed */
+ tty3270_erase_display(tp, 2);
+ tp->cx = tp->cy = 0;
+ break;
+ case 0x0d: /* '\r' -- Carriage Return */
+ tp->cx = 0;
+ break;
+ case 0x0f: /* SuSE "exit alternate mode" */
+ break;
+ case 0x1b: /* Start escape sequence. */
+ tty3270_escape_sequence(tp, buf[i_msg]);
+ break;
+ default: /* Insert normal character. */
+ if (tp->cx >= tp->view.cols) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ }
+ tty3270_put_character(tp, buf[i_msg]);
+ tp->cx++;
+ break;
+ }
+ }
+ /* Convert current line to 3270 data fragment. */
+ tty3270_convert_line(tp, tp->cy);
+
+ /* Setup timer to update display after 1/10 second */
+ if (!timer_pending(&tp->timer))
+ tty3270_set_timer(tp, HZ/10);
+
+ spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static int
+tty3270_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return 0;
+ if (tp->char_count > 0) {
+ tty3270_do_write(tp, tp->char_buf, tp->char_count);
+ tp->char_count = 0;
+ }
+ tty3270_do_write(tp, buf, count);
+ return count;
+}
+
+/*
+ * Put single characters to the ttys character buffer
+ */
+static void
+tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ if (tp->char_count < TTY3270_CHAR_BUF_SIZE)
+ tp->char_buf[tp->char_count++] = ch;
+}
+
+/*
+ * Flush all characters from the ttys characeter buffer put there
+ * by tty3270_put_char.
+ */
+static void
+tty3270_flush_chars(struct tty_struct *tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ if (tp->char_count > 0) {
+ tty3270_do_write(tp, tp->char_buf, tp->char_count);
+ tp->char_count = 0;
+ }
+}
+
+/*
+ * Returns the number of characters in the output buffer. This is
+ * used in tty_wait_until_sent to wait until all characters have
+ * appeared on the screen.
+ */
+static int
+tty3270_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+static void
+tty3270_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/*
+ * Check for visible/invisible input switches
+ */
+static void
+tty3270_set_termios(struct tty_struct *tty, struct termios *old)
+{
+ struct tty3270 *tp;
+ int new;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ spin_lock_bh(&tp->view.lock);
+ if (L_ICANON(tty)) {
+ new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
+ if (new != tp->inattr) {
+ tp->inattr = new;
+ tty3270_update_prompt(tp, 0, 0);
+ tty3270_set_timer(tp, 1);
+ }
+ }
+ spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Disable reading from a 3270 tty
+ */
+static void
+tty3270_throttle(struct tty_struct * tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ tp->throttle = 1;
+}
+
+/*
+ * Enable reading from a 3270 tty
+ */
+static void
+tty3270_unthrottle(struct tty_struct * tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ tp->throttle = 0;
+ if (tp->attn)
+ tty3270_issue_read(tp, 1);
+}
+
+/*
+ * Hang up the tty device.
+ */
+static void
+tty3270_hangup(struct tty_struct *tty)
+{
+ // FIXME: implement
+}
+
+static void
+tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+}
+
+static int
+tty3270_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ return kbd_ioctl(tp->kbd, file, cmd, arg);
+}
+
+static struct tty_operations tty3270_ops = {
+ .open = tty3270_open,
+ .close = tty3270_close,
+ .write = tty3270_write,
+ .put_char = tty3270_put_char,
+ .flush_chars = tty3270_flush_chars,
+ .write_room = tty3270_write_room,
+ .chars_in_buffer = tty3270_chars_in_buffer,
+ .flush_buffer = tty3270_flush_buffer,
+ .throttle = tty3270_throttle,
+ .unthrottle = tty3270_unthrottle,
+ .hangup = tty3270_hangup,
+ .wait_until_sent = tty3270_wait_until_sent,
+ .ioctl = tty3270_ioctl,
+ .set_termios = tty3270_set_termios
+};
+
+void
+tty3270_notifier(int index, int active)
+{
+ if (active)
+ tty_register_device(tty3270_driver, index, 0);
+ else
+ tty_unregister_device(tty3270_driver, index);
+}
+
+/*
+ * 3270 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+int __init
+tty3270_init(void)
+{
+ struct tty_driver *driver;
+ int ret;
+
+ driver = alloc_tty_driver(256);
+ if (!driver)
+ return -ENOMEM;
+
+ /*
+ * Initialize the tty_driver structure
+ * Entries in tty3270_driver that are NOT initialized:
+ * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+ */
+ driver->owner = THIS_MODULE;
+ driver->devfs_name = "ttyTUB/";
+ driver->driver_name = "ttyTUB";
+ driver->name = "ttyTUB";
+ driver->major = IBM_TTY3270_MAJOR;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_TTY;
+ driver->init_termios = tty_std_termios;
+ driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
+ tty_set_operations(driver, &tty3270_ops);
+ ret = tty_register_driver(driver);
+ if (ret) {
+ printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
+ put_tty_driver(driver);
+ return ret;
+ }
+ tty3270_driver = driver;
+ ret = raw3270_register_notifier(tty3270_notifier);
+ if (ret) {
+ printk(KERN_ERR "tty3270 notifier registration failed "
+ "with %d\n", ret);
+ put_tty_driver(driver);
+ return ret;
+
+ }
+ return 0;
+}
+
+static void __exit
+tty3270_exit(void)
+{
+ struct tty_driver *driver;
+
+ raw3270_unregister_notifier(tty3270_notifier);
+ driver = tty3270_driver;
+ tty3270_driver = 0;
+ tty_unregister_driver(driver);
+ tty3270_del_views();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
+
+module_init(tty3270_init);
+module_exit(tty3270_exit);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 000000000000..edf50d2bd10b
--- /dev/null
+++ b/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,920 @@
+/*
+ * drivers/s390/char/vmlogrdr.c
+ * character device driver for reading z/VM system service records
+ *
+ *
+ * Copyright (C) 2004 IBM Corporation
+ * character device driver for reading z/VM system service records,
+ * Version 1.0
+ * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
+ * Stefan Weinhuber <wein@de.ibm.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include "../net/iucv.h"
+#include <linux/kmod.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+
+
+MODULE_AUTHOR
+ ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
+ " Stefan Weinhuber (wein@de.ibm.com)");
+MODULE_DESCRIPTION ("Character device driver for reading z/VM "
+ "system service records.");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * The size of the buffer for iucv data transfer is one page,
+ * but in addition to the data we read from iucv we also
+ * place an integer and some characters into that buffer,
+ * so the maximum size for record data is a little less then
+ * one page.
+ */
+#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
+
+/*
+ * The elements that are concurrently accessed by bottom halves are
+ * connection_established, iucv_path_severed, local_interrupt_buffer
+ * and receive_ready. The first three can be protected by
+ * priv_lock. receive_ready is atomic, so it can be incremented and
+ * decremented without holding a lock.
+ * The variable dev_in_use needs to be protected by the lock, since
+ * it's a flag used by open to make sure that the device is opened only
+ * by one user at the same time.
+ */
+struct vmlogrdr_priv_t {
+ char system_service[8];
+ char internal_name[8];
+ char recording_name[8];
+ u16 pathid;
+ int connection_established;
+ int iucv_path_severed;
+ iucv_MessagePending local_interrupt_buffer;
+ atomic_t receive_ready;
+ iucv_handle_t iucv_handle;
+ int minor_num;
+ char * buffer;
+ char * current_position;
+ int remaining;
+ ulong residual_length;
+ int buffer_free;
+ int dev_in_use; /* 1: already opened, 0: not opened*/
+ spinlock_t priv_lock;
+ struct device *device;
+ struct class_device *class_device;
+ int autorecording;
+ int autopurge;
+};
+
+
+/*
+ * File operation structure for vmlogrdr devices
+ */
+static int vmlogrdr_open(struct inode *, struct file *);
+static int vmlogrdr_release(struct inode *, struct file *);
+static ssize_t vmlogrdr_read (struct file *filp, char *data, size_t count,
+ loff_t * ppos);
+
+static struct file_operations vmlogrdr_fops = {
+ .owner = THIS_MODULE,
+ .open = vmlogrdr_open,
+ .release = vmlogrdr_release,
+ .read = vmlogrdr_read,
+};
+
+
+static u8 iucvMagic[16] = {
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
+};
+
+
+static u8 mask[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+
+static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+
+static void
+vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
+static void
+vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
+static void
+vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
+
+
+static iucv_interrupt_ops_t vmlogrdr_iucvops = {
+ .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
+ .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
+ .MessagePending = vmlogrdr_iucv_MessagePending,
+};
+
+
+DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
+DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
+
+/*
+ * pointer to system service private structure
+ * minor number 0 --> logrec
+ * minor number 1 --> account
+ * minor number 2 --> symptom
+ */
+
+static struct vmlogrdr_priv_t sys_ser[] = {
+ { .system_service = "*LOGREC ",
+ .internal_name = "logrec",
+ .recording_name = "EREP",
+ .minor_num = 0,
+ .buffer_free = 1,
+ .priv_lock = SPIN_LOCK_UNLOCKED,
+ .autorecording = 1,
+ .autopurge = 1,
+ },
+ { .system_service = "*ACCOUNT",
+ .internal_name = "account",
+ .recording_name = "ACCOUNT",
+ .minor_num = 1,
+ .buffer_free = 1,
+ .priv_lock = SPIN_LOCK_UNLOCKED,
+ .autorecording = 1,
+ .autopurge = 1,
+ },
+ { .system_service = "*SYMPTOM",
+ .internal_name = "symptom",
+ .recording_name = "SYMPTOM",
+ .minor_num = 2,
+ .buffer_free = 1,
+ .priv_lock = SPIN_LOCK_UNLOCKED,
+ .autorecording = 1,
+ .autopurge = 1,
+ }
+};
+
+#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+
+static char FENCE[] = {"EOR"};
+static int vmlogrdr_major = 0;
+static struct cdev *vmlogrdr_cdev = NULL;
+static int recording_class_AB;
+
+
+static void
+vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
+ void * pgm_data)
+{
+ struct vmlogrdr_priv_t * logptr = pgm_data;
+ spin_lock(&logptr->priv_lock);
+ logptr->connection_established = 1;
+ spin_unlock(&logptr->priv_lock);
+ wake_up(&conn_wait_queue);
+ return;
+}
+
+
+static void
+vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
+{
+ u8 reason = (u8) eib->ipuser[8];
+ struct vmlogrdr_priv_t * logptr = pgm_data;
+
+ printk (KERN_ERR "vmlogrdr: connection severed with"
+ " reason %i\n", reason);
+
+ spin_lock(&logptr->priv_lock);
+ logptr->connection_established = 0;
+ logptr->iucv_path_severed = 1;
+ spin_unlock(&logptr->priv_lock);
+
+ wake_up(&conn_wait_queue);
+ /* just in case we're sleeping waiting for a record */
+ wake_up_interruptible(&read_wait_queue);
+}
+
+
+static void
+vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
+{
+ struct vmlogrdr_priv_t * logptr = pgm_data;
+
+ /*
+ * This function is the bottom half so it should be quick.
+ * Copy the external interrupt data into our local eib and increment
+ * the usage count
+ */
+ spin_lock(&logptr->priv_lock);
+ memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib));
+ atomic_inc(&logptr->receive_ready);
+ spin_unlock(&logptr->priv_lock);
+ wake_up_interruptible(&read_wait_queue);
+}
+
+
+static int
+vmlogrdr_get_recording_class_AB(void) {
+ char cp_command[]="QUERY COMMAND RECORDING ";
+ char cp_response[80];
+ char *tail;
+ int len,i;
+
+ printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
+ cpcmd(cp_command, cp_response, sizeof(cp_response));
+ printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
+ len = strnlen(cp_response,sizeof(cp_response));
+ // now the parsing
+ tail=strnchr(cp_response,len,'=');
+ if (!tail)
+ return 0;
+ tail++;
+ if (!strncmp("ANY",tail,3))
+ return 1;
+ if (!strncmp("NONE",tail,4))
+ return 0;
+ /*
+ * expect comma separated list of classes here, if one of them
+ * is A or B return 1 otherwise 0
+ */
+ for (i=tail-cp_response; i<len; i++)
+ if ( cp_response[i]=='A' || cp_response[i]=='B' )
+ return 1;
+ return 0;
+}
+
+
+static int
+vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
+
+ char cp_command[80];
+ char cp_response[160];
+ char *onoff, *qid_string;
+
+ memset(cp_command, 0x00, sizeof(cp_command));
+ memset(cp_response, 0x00, sizeof(cp_response));
+
+ onoff = ((action == 1) ? "ON" : "OFF");
+ qid_string = ((recording_class_AB == 1) ? " QID * " : "");
+
+ /*
+ * The recording commands needs to be called with option QID
+ * for guests that have previlege classes A or B.
+ * Purging has to be done as separate step, because recording
+ * can't be switched on as long as records are on the queue.
+ * Doing both at the same time doesn't work.
+ */
+
+ if (purge) {
+ snprintf(cp_command, sizeof(cp_command),
+ "RECORDING %s PURGE %s",
+ logptr->recording_name,
+ qid_string);
+
+ printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
+ cp_command);
+ cpcmd(cp_command, cp_response, sizeof(cp_response));
+ printk (KERN_DEBUG "vmlogrdr: recording response: %s",
+ cp_response);
+ }
+
+ memset(cp_command, 0x00, sizeof(cp_command));
+ memset(cp_response, 0x00, sizeof(cp_response));
+ snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
+ logptr->recording_name,
+ onoff,
+ qid_string);
+
+ printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
+ cpcmd(cp_command, cp_response, sizeof(cp_response));
+ printk (KERN_DEBUG "vmlogrdr: recording response: %s",
+ cp_response);
+ /* The recording command will usually answer with 'Command complete'
+ * on success, but when the specific service was never connected
+ * before then there might be an additional informational message
+ * 'HCPCRC8072I Recording entry not found' before the
+ * 'Command complete'. So I use strstr rather then the strncmp.
+ */
+ if (strstr(cp_response,"Command complete"))
+ return 0;
+ else
+ return -EIO;
+
+}
+
+
+static int
+vmlogrdr_open (struct inode *inode, struct file *filp)
+{
+ int dev_num = 0;
+ struct vmlogrdr_priv_t * logptr = NULL;
+ int connect_rc = 0;
+ int ret;
+
+ dev_num = iminor(inode);
+ if (dev_num > MAXMINOR)
+ return -ENODEV;
+
+ logptr = &sys_ser[dev_num];
+ if (logptr == NULL)
+ return -ENODEV;
+
+ /*
+ * only allow for blocking reads to be open
+ */
+ if (filp->f_flags & O_NONBLOCK)
+ return -ENOSYS;
+
+ /* Besure this device hasn't already been opened */
+ spin_lock_bh(&logptr->priv_lock);
+ if (logptr->dev_in_use) {
+ spin_unlock_bh(&logptr->priv_lock);
+ return -EBUSY;
+ } else {
+ logptr->dev_in_use = 1;
+ spin_unlock_bh(&logptr->priv_lock);
+ }
+
+ atomic_set(&logptr->receive_ready, 0);
+ logptr->buffer_free = 1;
+
+ /* set the file options */
+ filp->private_data = logptr;
+ filp->f_op = &vmlogrdr_fops;
+
+ /* start recording for this service*/
+ ret=0;
+ if (logptr->autorecording)
+ ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
+ if (ret)
+ printk (KERN_WARNING "vmlogrdr: failed to start "
+ "recording automatically\n");
+
+ /* Register with iucv driver */
+ logptr->iucv_handle = iucv_register_program(iucvMagic,
+ logptr->system_service, mask, &vmlogrdr_iucvops,
+ logptr);
+
+ if (logptr->iucv_handle == NULL) {
+ printk (KERN_ERR "vmlogrdr: failed to register with"
+ "iucv driver\n");
+ goto not_registered;
+ }
+
+ /* create connection to the system service */
+ spin_lock_bh(&logptr->priv_lock);
+ logptr->connection_established = 0;
+ logptr->iucv_path_severed = 0;
+ spin_unlock_bh(&logptr->priv_lock);
+
+ connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic,
+ logptr->system_service, iucv_host, 0,
+ NULL, NULL,
+ logptr->iucv_handle, NULL);
+ if (connect_rc) {
+ printk (KERN_ERR "vmlogrdr: iucv connection to %s "
+ "failed with rc %i \n", logptr->system_service,
+ connect_rc);
+ goto not_connected;
+ }
+
+ /* We've issued the connect and now we must wait for a
+ * ConnectionComplete or ConnectinSevered Interrupt
+ * before we can continue to process.
+ */
+ wait_event(conn_wait_queue, (logptr->connection_established)
+ || (logptr->iucv_path_severed));
+ if (logptr->iucv_path_severed) {
+ goto not_connected;
+ }
+
+ return nonseekable_open(inode, filp);
+
+not_connected:
+ iucv_unregister_program(logptr->iucv_handle);
+ logptr->iucv_handle = NULL;
+not_registered:
+ if (logptr->autorecording)
+ vmlogrdr_recording(logptr,0,logptr->autopurge);
+ logptr->dev_in_use = 0;
+ return -EIO;
+
+
+}
+
+
+static int
+vmlogrdr_release (struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ struct vmlogrdr_priv_t * logptr = filp->private_data;
+
+ iucv_unregister_program(logptr->iucv_handle);
+ logptr->iucv_handle = NULL;
+
+ if (logptr->autorecording) {
+ ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
+ if (ret)
+ printk (KERN_WARNING "vmlogrdr: failed to stop "
+ "recording automatically\n");
+ }
+ logptr->dev_in_use = 0;
+
+ return 0;
+}
+
+
+static int
+vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
+ int rc, *temp;
+ /* we need to keep track of two data sizes here:
+ * The number of bytes we need to receive from iucv and
+ * the total number of bytes we actually write into the buffer.
+ */
+ int user_data_count, iucv_data_count;
+ char * buffer;
+
+ if (atomic_read(&priv->receive_ready)) {
+ spin_lock_bh(&priv->priv_lock);
+ if (priv->residual_length){
+ /* receive second half of a record */
+ iucv_data_count = priv->residual_length;
+ user_data_count = 0;
+ buffer = priv->buffer;
+ } else {
+ /* receive a new record:
+ * We need to return the total length of the record
+ * + size of FENCE in the first 4 bytes of the buffer.
+ */
+ iucv_data_count =
+ priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
+ user_data_count = sizeof(int);
+ temp = (int*)priv->buffer;
+ *temp= iucv_data_count + sizeof(FENCE);
+ buffer = priv->buffer + sizeof(int);
+ }
+ /*
+ * If the record is bigger then our buffer, we receive only
+ * a part of it. We can get the rest later.
+ */
+ if (iucv_data_count > NET_BUFFER_SIZE)
+ iucv_data_count = NET_BUFFER_SIZE;
+ rc = iucv_receive(priv->pathid,
+ priv->local_interrupt_buffer.ipmsgid,
+ priv->local_interrupt_buffer.iptrgcls,
+ buffer,
+ iucv_data_count,
+ NULL,
+ NULL,
+ &priv->residual_length);
+ spin_unlock_bh(&priv->priv_lock);
+ /* An rc of 5 indicates that the record was bigger then
+ * the buffer, which is OK for us. A 9 indicates that the
+ * record was purged befor we could receive it.
+ */
+ if (rc == 5)
+ rc = 0;
+ if (rc == 9)
+ atomic_set(&priv->receive_ready, 0);
+ } else {
+ rc = 1;
+ }
+ if (!rc) {
+ priv->buffer_free = 0;
+ user_data_count += iucv_data_count;
+ priv->current_position = priv->buffer;
+ if (priv->residual_length == 0){
+ /* the whole record has been captured,
+ * now add the fence */
+ atomic_dec(&priv->receive_ready);
+ buffer = priv->buffer + user_data_count;
+ memcpy(buffer, FENCE, sizeof(FENCE));
+ user_data_count += sizeof(FENCE);
+ }
+ priv->remaining = user_data_count;
+ }
+
+ return rc;
+}
+
+
+static ssize_t
+vmlogrdr_read (struct file *filp, char *data, size_t count, loff_t * ppos)
+{
+ int rc;
+ struct vmlogrdr_priv_t * priv = filp->private_data;
+
+ while (priv->buffer_free) {
+ rc = vmlogrdr_receive_data(priv);
+ if (rc) {
+ rc = wait_event_interruptible(read_wait_queue,
+ atomic_read(&priv->receive_ready));
+ if (rc)
+ return rc;
+ }
+ }
+ /* copy only up to end of record */
+ if (count > priv->remaining)
+ count = priv->remaining;
+
+ if (copy_to_user(data, priv->current_position, count))
+ return -EFAULT;
+
+ *ppos += count;
+ priv->current_position += count;
+ priv->remaining -= count;
+
+ /* if all data has been transferred, set buffer free */
+ if (priv->remaining == 0)
+ priv->buffer_free = 1;
+
+ return count;
+}
+
+static ssize_t
+vmlogrdr_autopurge_store(struct device * dev, const char * buf, size_t count) {
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+ ssize_t ret = count;
+
+ switch (buf[0]) {
+ case '0':
+ priv->autopurge=0;
+ break;
+ case '1':
+ priv->autopurge=1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+
+static ssize_t
+vmlogrdr_autopurge_show(struct device *dev, char *buf) {
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+ return sprintf(buf, "%u\n", priv->autopurge);
+}
+
+
+static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
+ vmlogrdr_autopurge_store);
+
+
+static ssize_t
+vmlogrdr_purge_store(struct device * dev, const char * buf, size_t count) {
+
+ char cp_command[80];
+ char cp_response[80];
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+
+ if (buf[0] != '1')
+ return -EINVAL;
+
+ memset(cp_command, 0x00, sizeof(cp_command));
+ memset(cp_response, 0x00, sizeof(cp_response));
+
+ /*
+ * The recording command needs to be called with option QID
+ * for guests that have previlege classes A or B.
+ * Other guests will not recognize the command and we have to
+ * issue the same command without the QID parameter.
+ */
+
+ if (recording_class_AB)
+ snprintf(cp_command, sizeof(cp_command),
+ "RECORDING %s PURGE QID * ",
+ priv->recording_name);
+ else
+ snprintf(cp_command, sizeof(cp_command),
+ "RECORDING %s PURGE ",
+ priv->recording_name);
+
+ printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
+ cpcmd(cp_command, cp_response, sizeof(cp_response));
+ printk (KERN_DEBUG "vmlogrdr: recording response: %s",
+ cp_response);
+
+ return count;
+}
+
+
+static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
+
+
+static ssize_t
+vmlogrdr_autorecording_store(struct device *dev, const char *buf,
+ size_t count) {
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+ ssize_t ret = count;
+
+ switch (buf[0]) {
+ case '0':
+ priv->autorecording=0;
+ break;
+ case '1':
+ priv->autorecording=1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+
+static ssize_t
+vmlogrdr_autorecording_show(struct device *dev, char *buf) {
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+ return sprintf(buf, "%u\n", priv->autorecording);
+}
+
+
+static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
+ vmlogrdr_autorecording_store);
+
+
+static ssize_t
+vmlogrdr_recording_store(struct device * dev, const char * buf, size_t count) {
+
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+ ssize_t ret;
+
+ switch (buf[0]) {
+ case '0':
+ ret = vmlogrdr_recording(priv,0,0);
+ break;
+ case '1':
+ ret = vmlogrdr_recording(priv,1,0);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret)
+ return ret;
+ else
+ return count;
+
+}
+
+
+static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
+
+
+static ssize_t
+vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) {
+
+ char cp_command[] = "QUERY RECORDING ";
+ int len;
+
+ cpcmd(cp_command, buf, 4096);
+ len = strlen(buf);
+ return len;
+}
+
+
+static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
+ NULL);
+
+static struct attribute *vmlogrdr_attrs[] = {
+ &dev_attr_autopurge.attr,
+ &dev_attr_purge.attr,
+ &dev_attr_autorecording.attr,
+ &dev_attr_recording.attr,
+ NULL,
+};
+
+static struct attribute_group vmlogrdr_attr_group = {
+ .attrs = vmlogrdr_attrs,
+};
+
+static struct class_simple *vmlogrdr_class;
+static struct device_driver vmlogrdr_driver = {
+ .name = "vmlogrdr",
+ .bus = &iucv_bus,
+};
+
+
+static int
+vmlogrdr_register_driver(void) {
+ int ret;
+
+ ret = driver_register(&vmlogrdr_driver);
+ if (ret) {
+ printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
+ return ret;
+ }
+
+ ret = driver_create_file(&vmlogrdr_driver,
+ &driver_attr_recording_status);
+ if (ret) {
+ printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
+ goto unregdriver;
+ }
+
+ vmlogrdr_class = class_simple_create(THIS_MODULE, "vmlogrdr");
+ if (IS_ERR(vmlogrdr_class)) {
+ printk(KERN_ERR "vmlogrdr: failed to create class.\n");
+ ret=PTR_ERR(vmlogrdr_class);
+ vmlogrdr_class=NULL;
+ goto unregattr;
+ }
+ return 0;
+
+unregattr:
+ driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
+unregdriver:
+ driver_unregister(&vmlogrdr_driver);
+ return ret;
+}
+
+
+static void
+vmlogrdr_unregister_driver(void) {
+ class_simple_destroy(vmlogrdr_class);
+ vmlogrdr_class = NULL;
+ driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
+ driver_unregister(&vmlogrdr_driver);
+ return;
+}
+
+
+static int
+vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
+ struct device *dev;
+ int ret;
+
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ if (dev) {
+ memset(dev, 0, sizeof(struct device));
+ snprintf(dev->bus_id, BUS_ID_SIZE, "%s",
+ priv->internal_name);
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ dev->driver = &vmlogrdr_driver;
+ /*
+ * The release function could be called after the
+ * module has been unloaded. It's _only_ task is to
+ * free the struct. Therefore, we specify kfree()
+ * directly here. (Probably a little bit obfuscating
+ * but legitime ...).
+ */
+ dev->release = (void (*)(struct device *))kfree;
+ } else
+ return -ENOMEM;
+ ret = device_register(dev);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
+ if (ret) {
+ device_unregister(dev);
+ return ret;
+ }
+ priv->class_device = class_simple_device_add(
+ vmlogrdr_class,
+ MKDEV(vmlogrdr_major, priv->minor_num),
+ dev,
+ "%s", dev->bus_id );
+ if (IS_ERR(priv->class_device)) {
+ ret = PTR_ERR(priv->class_device);
+ priv->class_device=NULL;
+ sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
+ device_unregister(dev);
+ return ret;
+ }
+ dev->driver_data = priv;
+ priv->device = dev;
+ return 0;
+}
+
+
+static int
+vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
+ class_simple_device_remove(MKDEV(vmlogrdr_major, priv->minor_num));
+ if (priv->device != NULL) {
+ sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
+ device_unregister(priv->device);
+ priv->device=NULL;
+ }
+ return 0;
+}
+
+
+static int
+vmlogrdr_register_cdev(dev_t dev) {
+ int rc = 0;
+ vmlogrdr_cdev = cdev_alloc();
+ if (!vmlogrdr_cdev) {
+ return -ENOMEM;
+ }
+ vmlogrdr_cdev->owner = THIS_MODULE;
+ vmlogrdr_cdev->ops = &vmlogrdr_fops;
+ vmlogrdr_cdev->dev = dev;
+ rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+ if (!rc)
+ return 0;
+
+ // cleanup: cdev is not fully registered, no cdev_del here!
+ kobject_put(&vmlogrdr_cdev->kobj);
+ vmlogrdr_cdev=NULL;
+ return rc;
+}
+
+
+static void
+vmlogrdr_cleanup(void) {
+ int i;
+ if (vmlogrdr_cdev) {
+ cdev_del(vmlogrdr_cdev);
+ vmlogrdr_cdev=NULL;
+ }
+ for (i=0; i < MAXMINOR; ++i ) {
+ vmlogrdr_unregister_device(&sys_ser[i]);
+ free_page((unsigned long)sys_ser[i].buffer);
+ }
+ vmlogrdr_unregister_driver();
+ if (vmlogrdr_major) {
+ unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
+ vmlogrdr_major=0;
+ }
+}
+
+
+static int
+vmlogrdr_init(void)
+{
+ int rc;
+ int i;
+ dev_t dev;
+
+ if (! MACHINE_IS_VM) {
+ printk (KERN_ERR "vmlogrdr: not running under VM, "
+ "driver not loaded.\n");
+ return -ENODEV;
+ }
+
+ recording_class_AB = vmlogrdr_get_recording_class_AB();
+
+ rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
+ if (rc)
+ return rc;
+ vmlogrdr_major = MAJOR(dev);
+
+ rc=vmlogrdr_register_driver();
+ if (rc)
+ goto cleanup;
+
+ for (i=0; i < MAXMINOR; ++i ) {
+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ if (!sys_ser[i].buffer) {
+ rc = ENOMEM;
+ break;
+ }
+ sys_ser[i].current_position = sys_ser[i].buffer;
+ rc=vmlogrdr_register_device(&sys_ser[i]);
+ if (rc)
+ break;
+ }
+ if (rc)
+ goto cleanup;
+
+ rc = vmlogrdr_register_cdev(dev);
+ if (rc)
+ goto cleanup;
+ printk (KERN_INFO "vmlogrdr: driver loaded\n");
+ return 0;
+
+cleanup:
+ vmlogrdr_cleanup();
+ printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
+ return rc;
+}
+
+
+static void
+vmlogrdr_exit(void)
+{
+ vmlogrdr_cleanup();
+ printk (KERN_INFO "vmlogrdr: driver unloaded\n");
+ return;
+}
+
+
+module_init(vmlogrdr_init);
+module_exit(vmlogrdr_exit);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
new file mode 100644
index 000000000000..22cf4fec8da9
--- /dev/null
+++ b/drivers/s390/char/vmwatchdog.c
@@ -0,0 +1,292 @@
+/*
+ * Watchdog implementation based on z/VM Watchdog Timer API
+ *
+ * The user space watchdog daemon can use this driver as
+ * /dev/vmwatchdog to have z/VM execute the specified CP
+ * command when the timeout expires. The default command is
+ * "IPL", which which cause an immediate reboot.
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define MAX_CMDLEN 240
+#define MIN_INTERVAL 15
+static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
+static int vmwdt_conceal;
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+static int vmwdt_nowayout = 1;
+#else
+static int vmwdt_nowayout = 0;
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_DESCRIPTION("z/VM Watchdog Timer");
+module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
+MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
+module_param_named(conceal, vmwdt_conceal, bool, 0644);
+MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
+ " is active");
+module_param_named(nowayout, vmwdt_nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+ " (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
+static unsigned int vmwdt_interval = 60;
+static unsigned long vmwdt_is_open;
+static int vmwdt_expect_close;
+
+enum vmwdt_func {
+ /* function codes */
+ wdt_init = 0,
+ wdt_change = 1,
+ wdt_cancel = 2,
+ /* flags */
+ wdt_conceal = 0x80000000,
+};
+
+static int __diag288(enum vmwdt_func func, unsigned int timeout,
+ char *cmd, size_t len)
+{
+ register unsigned long __func asm("2");
+ register unsigned long __timeout asm("3");
+ register unsigned long __cmdp asm("4");
+ register unsigned long __cmdl asm("5");
+ int err;
+
+ __func = func;
+ __timeout = timeout;
+ __cmdp = virt_to_phys(cmd);
+ __cmdl = len;
+ err = 0;
+ asm volatile (
+#ifdef __s390x__
+ "diag %2,%4,0x288\n"
+ "1: \n"
+ ".section .fixup,\"ax\"\n"
+ "2: lghi %0,%1\n"
+ " jg 1b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,2b\n"
+ ".previous\n"
+#else
+ "diag %2,%4,0x288\n"
+ "1: \n"
+ ".section .fixup,\"ax\"\n"
+ "2: lhi %0,%1\n"
+ " bras 1,3f\n"
+ " .long 1b\n"
+ "3: l 1,0(1)\n"
+ " br 1\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,2b\n"
+ ".previous\n"
+#endif
+ : "+&d"(err)
+ : "i"(-EINVAL), "d"(__func), "d"(__timeout),
+ "d"(__cmdp), "d"(__cmdl)
+ : "1", "cc");
+ return err;
+}
+
+static int vmwdt_keepalive(void)
+{
+ /* we allocate new memory every time to avoid having
+ * to track the state. static allocation is not an
+ * option since that might not be contiguous in real
+ * storage in case of a modular build */
+ static char *ebc_cmd;
+ size_t len;
+ int ret;
+ unsigned int func;
+
+ ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+ if (!ebc_cmd)
+ return -ENOMEM;
+
+ len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
+ ASCEBC(ebc_cmd, MAX_CMDLEN);
+ EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+ func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
+ ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
+ kfree(ebc_cmd);
+
+ if (ret) {
+ printk(KERN_WARNING "%s: problem setting interval %d, "
+ "cmd %s\n", __FUNCTION__, vmwdt_interval,
+ vmwdt_cmd);
+ }
+ return ret;
+}
+
+static int vmwdt_disable(void)
+{
+ int ret = __diag288(wdt_cancel, 0, "", 0);
+ if (ret) {
+ printk(KERN_WARNING "%s: problem disabling watchdog\n",
+ __FUNCTION__);
+ }
+ return ret;
+}
+
+static int __init vmwdt_probe(void)
+{
+ /* there is no real way to see if the watchdog is supported,
+ * so we try initializing it with a NOP command ("BEGIN")
+ * that won't cause any harm even if the following disable
+ * fails for some reason */
+ static char __initdata ebc_begin[] = {
+ 194, 197, 199, 201, 213
+ };
+ if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) {
+ printk(KERN_INFO "z/VM watchdog not available\n");
+ return -EINVAL;
+ }
+ return vmwdt_disable();
+}
+
+static int vmwdt_open(struct inode *i, struct file *f)
+{
+ int ret;
+ if (test_and_set_bit(0, &vmwdt_is_open))
+ return -EBUSY;
+ ret = vmwdt_keepalive();
+ if (ret)
+ clear_bit(0, &vmwdt_is_open);
+ return ret ? ret : nonseekable_open(i, f);
+}
+
+static int vmwdt_close(struct inode *i, struct file *f)
+{
+ if (vmwdt_expect_close == 42)
+ vmwdt_disable();
+ vmwdt_expect_close = 0;
+ clear_bit(0, &vmwdt_is_open);
+ return 0;
+}
+
+static struct watchdog_info vmwdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = "z/VM Watchdog Timer",
+};
+
+static int vmwdt_ioctl(struct inode *i, struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user((void __user *)arg, &vmwdt_info,
+ sizeof(vmwdt_info)))
+ return -EFAULT;
+ return 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+ case WDIOC_GETTEMP:
+ return -EINVAL;
+ case WDIOC_SETOPTIONS:
+ {
+ int options, ret;
+ if (get_user(options, (int __user *)arg))
+ return -EFAULT;
+ ret = -EINVAL;
+ if (options & WDIOS_DISABLECARD) {
+ ret = vmwdt_disable();
+ if (ret)
+ return ret;
+ }
+ if (options & WDIOS_ENABLECARD) {
+ ret = vmwdt_keepalive();
+ }
+ return ret;
+ }
+ case WDIOC_GETTIMEOUT:
+ return put_user(vmwdt_interval, (int __user *)arg);
+ case WDIOC_SETTIMEOUT:
+ {
+ int interval;
+ if (get_user(interval, (int __user *)arg))
+ return -EFAULT;
+ if (interval < MIN_INTERVAL)
+ return -EINVAL;
+ vmwdt_interval = interval;
+ }
+ return vmwdt_keepalive();
+ case WDIOC_KEEPALIVE:
+ return vmwdt_keepalive();
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t vmwdt_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if(count) {
+ if (!vmwdt_nowayout) {
+ size_t i;
+
+ /* note: just in case someone wrote the magic character
+ * five months ago... */
+ vmwdt_expect_close = 0;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf+i))
+ return -EFAULT;
+ if (c == 'V')
+ vmwdt_expect_close = 42;
+ }
+ }
+ /* someone wrote to us, we should restart timer */
+ vmwdt_keepalive();
+ }
+ return count;
+}
+
+static struct file_operations vmwdt_fops = {
+ .open = &vmwdt_open,
+ .release = &vmwdt_close,
+ .ioctl = &vmwdt_ioctl,
+ .write = &vmwdt_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice vmwdt_dev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &vmwdt_fops,
+};
+
+static int __init vmwdt_init(void)
+{
+ int ret;
+
+ ret = vmwdt_probe();
+ if (ret)
+ return ret;
+ return misc_register(&vmwdt_dev);
+}
+module_init(vmwdt_init);
+
+static void __exit vmwdt_exit(void)
+{
+ WARN_ON(misc_deregister(&vmwdt_dev) != 0);
+}
+module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
new file mode 100644
index 000000000000..c490c2a1c2fc
--- /dev/null
+++ b/drivers/s390/cio/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the S/390 common i/o drivers
+#
+
+obj-y += airq.o blacklist.o chsc.o cio.o css.o
+ccw_device-objs += device.o device_fsm.o device_ops.o
+ccw_device-objs += device_id.o device_pgid.o device_status.o
+obj-y += ccw_device.o cmf.o
+obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
new file mode 100644
index 000000000000..3720e77b465f
--- /dev/null
+++ b/drivers/s390/cio/airq.c
@@ -0,0 +1,87 @@
+/*
+ * drivers/s390/cio/airq.c
+ * S/390 common I/O routines -- support for adapter interruptions
+ *
+ * $Revision: 1.12 $
+ *
+ * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include "cio_debug.h"
+#include "airq.h"
+
+static adapter_int_handler_t adapter_handler;
+
+/*
+ * register for adapter interrupts
+ *
+ * With HiperSockets the zSeries architecture provides for
+ * means of adapter interrups, pseudo I/O interrupts that are
+ * not tied to an I/O subchannel, but to an adapter. However,
+ * it doesn't disclose the info how to enable/disable them, but
+ * to recognize them only. Perhaps we should consider them
+ * being shared interrupts, and thus build a linked list
+ * of adapter handlers ... to be evaluated ...
+ */
+int
+s390_register_adapter_interrupt (adapter_int_handler_t handler)
+{
+ int ret;
+ char dbf_txt[15];
+
+ CIO_TRACE_EVENT (4, "rgaint");
+
+ if (handler == NULL)
+ ret = -EINVAL;
+ else
+ ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
+ if (!ret)
+ synchronize_kernel();
+
+ sprintf (dbf_txt, "ret:%d", ret);
+ CIO_TRACE_EVENT (4, dbf_txt);
+
+ return ret;
+}
+
+int
+s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
+{
+ int ret;
+ char dbf_txt[15];
+
+ CIO_TRACE_EVENT (4, "urgaint");
+
+ if (handler == NULL)
+ ret = -EINVAL;
+ else {
+ adapter_handler = NULL;
+ synchronize_kernel();
+ ret = 0;
+ }
+ sprintf (dbf_txt, "ret:%d", ret);
+ CIO_TRACE_EVENT (4, dbf_txt);
+
+ return ret;
+}
+
+void
+do_adapter_IO (void)
+{
+ CIO_TRACE_EVENT (6, "doaio");
+
+ if (adapter_handler)
+ (*adapter_handler) ();
+}
+
+EXPORT_SYMBOL (s390_register_adapter_interrupt);
+EXPORT_SYMBOL (s390_unregister_adapter_interrupt);
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h
new file mode 100644
index 000000000000..7d6be3fdcd66
--- /dev/null
+++ b/drivers/s390/cio/airq.h
@@ -0,0 +1,10 @@
+#ifndef S390_AINTERRUPT_H
+#define S390_AINTERRUPT_H
+
+typedef int (*adapter_int_handler_t)(void);
+
+extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
+extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
+extern void do_adapter_IO (void);
+
+#endif
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
new file mode 100644
index 000000000000..4a06c7d0e5e4
--- /dev/null
+++ b/drivers/s390/cio/blacklist.c
@@ -0,0 +1,351 @@
+/*
+ * drivers/s390/cio/blacklist.c
+ * S/390 common I/O routines -- blacklisting of specific devices
+ * $Revision: 1.33 $
+ *
+ * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include <asm/cio.h>
+#include <asm/uaccess.h>
+
+#include "blacklist.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+
+/*
+ * "Blacklisting" of certain devices:
+ * Device numbers given in the commandline as cio_ignore=... won't be known
+ * to Linux.
+ *
+ * These can be single devices or ranges of devices
+ */
+
+/* 65536 bits to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \
+ (8*sizeof(long)))
+static unsigned long bl_dev[__BL_DEV_WORDS];
+typedef enum {add, free} range_action;
+
+/*
+ * Function: blacklist_range
+ * (Un-)blacklist the devices from-to
+ */
+static inline void
+blacklist_range (range_action action, unsigned int from, unsigned int to)
+{
+ if (!to)
+ to = from;
+
+ if (from > to || to > __MAX_SUBCHANNELS) {
+ printk (KERN_WARNING "Invalid blacklist range "
+ "0x%04x to 0x%04x, skipping\n", from, to);
+ return;
+ }
+ for (; from <= to; from++) {
+ if (action == add)
+ set_bit (from, bl_dev);
+ else
+ clear_bit (from, bl_dev);
+ }
+}
+
+/*
+ * Function: blacklist_busid
+ * Get devno/busid from given string.
+ * Shamelessly grabbed from dasd_devmap.c.
+ */
+static inline int
+blacklist_busid(char **str, int *id0, int *id1, int *devno)
+{
+ int val, old_style;
+ char *sav;
+
+ sav = *str;
+
+ /* check for leading '0x' */
+ old_style = 0;
+ if ((*str)[0] == '0' && (*str)[1] == 'x') {
+ *str += 2;
+ old_style = 1;
+ }
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ goto confused;
+ val = simple_strtoul(*str, str, 16);
+ if (old_style || (*str)[0] != '.') {
+ *id0 = *id1 = 0;
+ if (val < 0 || val > 0xffff)
+ goto confused;
+ *devno = val;
+ if ((*str)[0] != ',' && (*str)[0] != '-' &&
+ (*str)[0] != '\n' && (*str)[0] != '\0')
+ goto confused;
+ return 0;
+ }
+ /* New style x.y.z busid */
+ if (val < 0 || val > 0xff)
+ goto confused;
+ *id0 = val;
+ (*str)++;
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ goto confused;
+ val = simple_strtoul(*str, str, 16);
+ if (val < 0 || val > 0xff || (*str)++[0] != '.')
+ goto confused;
+ *id1 = val;
+ if (!isxdigit((*str)[0])) /* We require at least one hex digit */
+ goto confused;
+ val = simple_strtoul(*str, str, 16);
+ if (val < 0 || val > 0xffff)
+ goto confused;
+ *devno = val;
+ if ((*str)[0] != ',' && (*str)[0] != '-' &&
+ (*str)[0] != '\n' && (*str)[0] != '\0')
+ goto confused;
+ return 0;
+confused:
+ strsep(str, ",\n");
+ printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav);
+ return 1;
+}
+
+static inline int
+blacklist_parse_parameters (char *str, range_action action)
+{
+ unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
+
+ while (*str != 0 && *str != '\n') {
+ range_action ra = action;
+ while(*str == ',')
+ str++;
+ if (*str == '!') {
+ ra = !action;
+ ++str;
+ }
+
+ /*
+ * Since we have to parse the proc commands and the
+ * kernel arguments we have to check four cases
+ */
+ if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
+ strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
+ from = 0;
+ to = __MAX_SUBCHANNELS;
+ str += 3;
+ } else {
+ int rc;
+
+ rc = blacklist_busid(&str, &from_id0,
+ &from_id1, &from);
+ if (rc)
+ continue;
+ to = from;
+ to_id0 = from_id0;
+ to_id1 = from_id1;
+ if (*str == '-') {
+ str++;
+ rc = blacklist_busid(&str, &to_id0,
+ &to_id1, &to);
+ if (rc)
+ continue;
+ }
+ if (*str == '-') {
+ printk(KERN_WARNING "invalid cio_ignore "
+ "parameter '%s'\n",
+ strsep(&str, ",\n"));
+ continue;
+ }
+ if ((from_id0 != to_id0) || (from_id1 != to_id1)) {
+ printk(KERN_WARNING "invalid cio_ignore range "
+ "%x.%x.%04x-%x.%x.%04x\n",
+ from_id0, from_id1, from,
+ to_id0, to_id1, to);
+ continue;
+ }
+ }
+ /* FIXME: ignoring id0 and id1 here. */
+ pr_debug("blacklist_setup: adding range "
+ "from 0.0.%04x to 0.0.%04x\n", from, to);
+ blacklist_range (ra, from, to);
+ }
+ return 1;
+}
+
+/* Parsing the commandline for blacklist parameters, e.g. to blacklist
+ * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
+ * - cio_ignore=1234-1236
+ * - cio_ignore=0x1234-0x1235,1236
+ * - cio_ignore=0x1234,1235-1236
+ * - cio_ignore=1236 cio_ignore=1234-0x1236
+ * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
+ * - cio_ignore=0.0.1234-0.0.1236
+ * - cio_ignore=0.0.1234,0x1235,1236
+ * - ...
+ */
+static int __init
+blacklist_setup (char *str)
+{
+ CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
+ return blacklist_parse_parameters (str, add);
+}
+
+__setup ("cio_ignore=", blacklist_setup);
+
+/* Checking if devices are blacklisted */
+
+/*
+ * Function: is_blacklisted
+ * Returns 1 if the given devicenumber can be found in the blacklist,
+ * otherwise 0.
+ * Used by validate_subchannel()
+ */
+int
+is_blacklisted (int devno)
+{
+ return test_bit (devno, bl_dev);
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Function: s390_redo_validation
+ * Look for no longer blacklisted devices
+ * FIXME: there must be a better way to do this */
+static inline void
+s390_redo_validation (void)
+{
+ unsigned int irq;
+
+ CIO_TRACE_EVENT (0, "redoval");
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ int ret;
+ struct subchannel *sch;
+
+ sch = get_subchannel_by_schid(irq);
+ if (sch) {
+ /* Already known. */
+ put_device(&sch->dev);
+ continue;
+ }
+ ret = css_probe_device(irq);
+ if (ret == -ENXIO)
+ break; /* We're through. */
+ if (ret == -ENOMEM)
+ /*
+ * Stop validation for now. Bad, but no need for a
+ * panic.
+ */
+ break;
+ }
+}
+
+/*
+ * Function: blacklist_parse_proc_parameters
+ * parse the stuff which is piped to /proc/cio_ignore
+ */
+static inline void
+blacklist_parse_proc_parameters (char *buf)
+{
+ if (strncmp (buf, "free ", 5) == 0) {
+ blacklist_parse_parameters (buf + 5, free);
+ } else if (strncmp (buf, "add ", 4) == 0) {
+ /*
+ * We don't need to check for known devices since
+ * css_probe_device will handle this correctly.
+ */
+ blacklist_parse_parameters (buf + 4, add);
+ } else {
+ printk (KERN_WARNING "cio_ignore: Parse error; \n"
+ KERN_WARNING "try using 'free all|<devno-range>,"
+ "<devno-range>,...'\n"
+ KERN_WARNING "or 'add <devno-range>,"
+ "<devno-range>,...'\n");
+ return;
+ }
+
+ s390_redo_validation ();
+}
+
+/* FIXME: These should be real bus ids and not home-grown ones! */
+static int cio_ignore_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
+ long devno;
+ int len;
+
+ len = 0;
+ for (devno = off; /* abuse the page variable
+ * as counter, see fs/proc/generic.c */
+ devno <= __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
+ if (!test_bit(devno, bl_dev))
+ continue;
+ len += sprintf(page + len, "0.0.%04lx", devno);
+ if (test_bit(devno + 1, bl_dev)) { /* print range */
+ while (++devno < __MAX_SUBCHANNELS)
+ if (!test_bit(devno, bl_dev))
+ break;
+ len += sprintf(page + len, "-0.0.%04lx", --devno);
+ }
+ len += sprintf(page + len, "\n");
+ }
+
+ if (devno <= __MAX_SUBCHANNELS)
+ *eof = 1;
+ *start = (char *) (devno - off); /* number of checked entries */
+ return len;
+}
+
+static int cio_ignore_write(struct file *file, const char __user *user_buf,
+ unsigned long user_len, void *data)
+{
+ char *buf;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buf = vmalloc (user_len + 1); /* maybe better use the stack? */
+ if (buf == NULL)
+ return -ENOMEM;
+ if (strncpy_from_user (buf, user_buf, user_len) < 0) {
+ vfree (buf);
+ return -EFAULT;
+ }
+ buf[user_len] = '\0';
+
+ blacklist_parse_proc_parameters (buf);
+
+ vfree (buf);
+ return user_len;
+}
+
+static int
+cio_ignore_proc_init (void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
+ &proc_root);
+ if (!entry)
+ return 0;
+
+ entry->read_proc = cio_ignore_read;
+ entry->write_proc = cio_ignore_write;
+
+ return 1;
+}
+
+__initcall (cio_ignore_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
new file mode 100644
index 000000000000..fb42cafbe57c
--- /dev/null
+++ b/drivers/s390/cio/blacklist.h
@@ -0,0 +1,6 @@
+#ifndef S390_BLACKLIST_H
+#define S390_BLACKLIST_H
+
+extern int is_blacklisted (int devno);
+
+#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 000000000000..21a75ee28b80
--- /dev/null
+++ b/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,482 @@
+/*
+ * drivers/s390/cio/ccwgroup.c
+ * bus driver for ccwgroup
+ * $Revision: 1.29 $
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+
+#include <asm/semaphore.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+/* In Linux 2.4, we had a channel device layer called "chandev"
+ * that did all sorts of obscure stuff for networking devices.
+ * This is another driver that serves as a replacement for just
+ * one of its functions, namely the translation of single subchannels
+ * to devices that use multiple subchannels.
+ */
+
+/* a device matches a driver if all its slave devices match the same
+ * entry of the driver */
+static int
+ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+
+ gdev = container_of(dev, struct ccwgroup_device, dev);
+ gdrv = container_of(drv, struct ccwgroup_driver, driver);
+
+ if (gdev->creator_id == gdrv->driver_id)
+ return 1;
+
+ return 0;
+}
+static int
+ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer,
+ int buffer_size)
+{
+ /* TODO */
+ return 0;
+}
+
+static struct bus_type ccwgroup_bus_type = {
+ .name = "ccwgroup",
+ .match = ccwgroup_bus_match,
+ .hotplug = ccwgroup_hotplug,
+};
+
+static inline void
+__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+{
+ int i;
+ char str[8];
+
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ }
+
+}
+
+/*
+ * Provide an 'ungroup' attribute so the user can remove group devices no
+ * longer needed or accidentially created. Saves memory :)
+ */
+static ssize_t
+ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev;
+
+ gdev = to_ccwgroupdev(dev);
+
+ if (gdev->state != CCWGROUP_OFFLINE)
+ return -EINVAL;
+
+ __ccwgroup_remove_symlinks(gdev);
+ device_unregister(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+
+static void
+ccwgroup_release (struct device *dev)
+{
+ struct ccwgroup_device *gdev;
+ int i;
+
+ gdev = to_ccwgroupdev(dev);
+
+ for (i = 0; i < gdev->count; i++) {
+ gdev->cdev[i]->dev.driver_data = NULL;
+ put_device(&gdev->cdev[i]->dev);
+ }
+ kfree(gdev);
+}
+
+static inline int
+__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+{
+ char str[8];
+ int i, rc;
+
+ for (i = 0; i < gdev->count; i++) {
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
+ "group_device");
+ if (rc) {
+ for (--i; i >= 0; i--)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
+ str);
+ if (rc) {
+ for (--i; i >= 0; i--) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ }
+ for (i = 0; i < gdev->count; i++)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/*
+ * try to add a new ccwgroup device for one driver
+ * argc and argv[] are a list of bus_id's of devices
+ * belonging to the driver.
+ */
+int
+ccwgroup_create(struct device *root,
+ unsigned int creator_id,
+ struct ccw_driver *cdrv,
+ int argc, char *argv[])
+{
+ struct ccwgroup_device *gdev;
+ int i;
+ int rc;
+ int del_drvdata;
+
+ if (argc > 256) /* disallow dumb users */
+ return -EINVAL;
+
+ gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0]));
+ atomic_set(&gdev->onoff, 0);
+
+ del_drvdata = 0;
+ for (i = 0; i < argc; i++) {
+ gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
+
+ /* all devices have to be of the same type in
+ * order to be grouped */
+ if (!gdev->cdev[i]
+ || gdev->cdev[i]->id.driver_info !=
+ gdev->cdev[0]->id.driver_info) {
+ rc = -EINVAL;
+ goto free_dev;
+ }
+ /* Don't allow a device to belong to more than one group. */
+ if (gdev->cdev[i]->dev.driver_data) {
+ rc = -EINVAL;
+ goto free_dev;
+ }
+ }
+ for (i = 0; i < argc; i++)
+ gdev->cdev[i]->dev.driver_data = gdev;
+ del_drvdata = 1;
+
+ gdev->creator_id = creator_id;
+ gdev->count = argc;
+ gdev->dev = (struct device ) {
+ .bus = &ccwgroup_bus_type,
+ .parent = root,
+ .release = ccwgroup_release,
+ };
+
+ snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
+ gdev->cdev[0]->dev.bus_id);
+
+ rc = device_register(&gdev->dev);
+
+ if (rc)
+ goto free_dev;
+ get_device(&gdev->dev);
+ rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
+
+ if (rc) {
+ device_unregister(&gdev->dev);
+ goto error;
+ }
+
+ rc = __ccwgroup_create_symlinks(gdev);
+ if (!rc) {
+ put_device(&gdev->dev);
+ return 0;
+ }
+ device_remove_file(&gdev->dev, &dev_attr_ungroup);
+ device_unregister(&gdev->dev);
+error:
+ for (i = 0; i < argc; i++)
+ if (gdev->cdev[i]) {
+ put_device(&gdev->cdev[i]->dev);
+ gdev->cdev[i]->dev.driver_data = NULL;
+ }
+ put_device(&gdev->dev);
+ return rc;
+free_dev:
+ for (i = 0; i < argc; i++)
+ if (gdev->cdev[i]) {
+ put_device(&gdev->cdev[i]->dev);
+ if (del_drvdata)
+ gdev->cdev[i]->dev.driver_data = NULL;
+ }
+ kfree(gdev);
+ return rc;
+}
+
+static int __init
+init_ccwgroup (void)
+{
+ return bus_register (&ccwgroup_bus_type);
+}
+
+static void __exit
+cleanup_ccwgroup (void)
+{
+ bus_unregister (&ccwgroup_bus_type);
+}
+
+module_init(init_ccwgroup);
+module_exit(cleanup_ccwgroup);
+
+/************************** driver stuff ******************************/
+
+static int
+ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv;
+ int ret;
+
+ if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE) {
+ ret = 0;
+ goto out;
+ }
+ if (!gdev->dev.driver) {
+ ret = -EINVAL;
+ goto out;
+ }
+ gdrv = to_ccwgroupdrv (gdev->dev.driver);
+ if ((ret = gdrv->set_online(gdev)))
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+ out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+
+static int
+ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv;
+ int ret;
+
+ if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE) {
+ ret = 0;
+ goto out;
+ }
+ if (!gdev->dev.driver) {
+ ret = -EINVAL;
+ goto out;
+ }
+ gdrv = to_ccwgroupdrv (gdev->dev.driver);
+ if ((ret = gdrv->set_offline(gdev)))
+ goto out;
+
+ gdev->state = CCWGROUP_OFFLINE;
+ out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+
+static ssize_t
+ccwgroup_online_store (struct device *dev, const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+ unsigned int value;
+ int ret;
+
+ gdev = to_ccwgroupdev(dev);
+ if (!dev->driver)
+ return count;
+
+ gdrv = to_ccwgroupdrv (gdev->dev.driver);
+ if (!try_module_get(gdrv->owner))
+ return -EINVAL;
+
+ value = simple_strtoul(buf, 0, 0);
+ ret = count;
+ if (value == 1)
+ ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+ module_put(gdrv->owner);
+ return ret;
+}
+
+static ssize_t
+ccwgroup_online_show (struct device *dev, char *buf)
+{
+ int online;
+
+ online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
+
+ return sprintf(buf, online ? "1\n" : "0\n");
+}
+
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static int
+ccwgroup_probe (struct device *dev)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+
+ int ret;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
+
+ if ((ret = device_create_file(dev, &dev_attr_online)))
+ return ret;
+
+ pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
+ ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
+ if (ret)
+ device_remove_file(dev, &dev_attr_online);
+
+ return ret;
+}
+
+static int
+ccwgroup_remove (struct device *dev)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
+
+ pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
+
+ device_remove_file(dev, &dev_attr_online);
+
+ if (gdrv && gdrv->remove)
+ gdrv->remove(gdev);
+ return 0;
+}
+
+int
+ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
+{
+ /* register our new driver with the core */
+ cdriver->driver = (struct device_driver) {
+ .bus = &ccwgroup_bus_type,
+ .name = cdriver->name,
+ .probe = ccwgroup_probe,
+ .remove = ccwgroup_remove,
+ };
+
+ return driver_register(&cdriver->driver);
+}
+
+static inline struct device *
+__get_next_ccwgroup_device(struct device_driver *drv)
+{
+ struct device *dev, *d;
+
+ down_read(&drv->bus->subsys.rwsem);
+ dev = NULL;
+ list_for_each_entry(d, &drv->devices, driver_list) {
+ dev = get_device(d);
+ if (dev)
+ break;
+ }
+ up_read(&drv->bus->subsys.rwsem);
+ return dev;
+}
+
+void
+ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
+{
+ struct device *dev;
+
+ /* We don't want ccwgroup devices to live longer than their driver. */
+ get_driver(&cdriver->driver);
+ while ((dev = __get_next_ccwgroup_device(&cdriver->driver))) {
+ __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
+ device_unregister(dev);
+ put_device(dev);
+ };
+ put_driver(&cdriver->driver);
+ driver_unregister(&cdriver->driver);
+}
+
+int
+ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+{
+ return 0;
+}
+
+static inline struct ccwgroup_device *
+__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
+{
+ struct ccwgroup_device *gdev;
+
+ if (cdev->dev.driver_data) {
+ gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
+ if (get_device(&gdev->dev)) {
+ if (!list_empty(&gdev->dev.node))
+ return gdev;
+ put_device(&gdev->dev);
+ }
+ return NULL;
+ }
+ return NULL;
+}
+
+void
+ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+{
+ struct ccwgroup_device *gdev;
+
+ /* Ignore offlining errors, device is gone anyway. */
+ ccw_device_set_offline(cdev);
+ /* If one of its devices is gone, the whole group is done for. */
+ gdev = __ccwgroup_get_gdev_by_cdev(cdev);
+ if (gdev) {
+ __ccwgroup_remove_symlinks(gdev);
+ device_unregister(&gdev->dev);
+ put_device(&gdev->dev);
+ }
+}
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccwgroup_driver_register);
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
+EXPORT_SYMBOL(ccwgroup_create);
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
+EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
new file mode 100644
index 000000000000..b35fe12e6bfc
--- /dev/null
+++ b/drivers/s390/cio/chsc.c
@@ -0,0 +1,1114 @@
+/*
+ * drivers/s390/cio/chsc.c
+ * S/390 common I/O routines -- channel subsystem call
+ * $Revision: 1.119 $
+ *
+ * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/cio.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+static struct channel_path *chps[NR_CHPIDS];
+
+static void *sei_page;
+
+static int new_channel_path(int chpid);
+
+static inline void
+set_chp_logically_online(int chp, int onoff)
+{
+ chps[chp]->state = onoff;
+}
+
+static int
+get_chp_status(int chp)
+{
+ return (chps[chp] ? chps[chp]->state : -ENODEV);
+}
+
+void
+chsc_validate_chpids(struct subchannel *sch)
+{
+ int mask, chp;
+
+ for (chp = 0; chp <= 7; chp++) {
+ mask = 0x80 >> chp;
+ if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
+ /* disable using this path */
+ sch->opm &= ~mask;
+ }
+}
+
+void
+chpid_is_actually_online(int chp)
+{
+ int state;
+
+ state = get_chp_status(chp);
+ if (state < 0) {
+ need_rescan = 1;
+ queue_work(slow_path_wq, &slow_path_work);
+ } else
+ WARN_ON(!state);
+}
+
+/* FIXME: this is _always_ called for every subchannel. shouldn't we
+ * process more than one at a time? */
+static int
+chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
+{
+ int ccode, j;
+
+ struct {
+ struct chsc_header request;
+ u16 reserved1;
+ u16 f_sch; /* first subchannel */
+ u16 reserved2;
+ u16 l_sch; /* last subchannel */
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u8 sch_valid : 1;
+ u8 dev_valid : 1;
+ u8 st : 3; /* subchannel type */
+ u8 zeroes : 3;
+ u8 unit_addr; /* unit address */
+ u16 devno; /* device number */
+ u8 path_mask;
+ u8 fla_valid_mask;
+ u16 sch; /* subchannel */
+ u8 chpid[8]; /* chpids 0-7 */
+ u16 fla[8]; /* full link addresses 0-7 */
+ } *ssd_area;
+
+ ssd_area = page;
+
+ ssd_area->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x0004,
+ };
+
+ ssd_area->f_sch = sch->irq;
+ ssd_area->l_sch = sch->irq;
+
+ ccode = chsc(ssd_area);
+ if (ccode > 0) {
+ pr_debug("chsc returned with ccode = %d\n", ccode);
+ return (ccode == 3) ? -ENODEV : -EBUSY;
+ }
+
+ switch (ssd_area->response.code) {
+ case 0x0001: /* everything ok */
+ break;
+ case 0x0002:
+ CIO_CRW_EVENT(2, "Invalid command!\n");
+ return -EINVAL;
+ case 0x0003:
+ CIO_CRW_EVENT(2, "Error in chsc request block!\n");
+ return -EINVAL;
+ case 0x0004:
+ CIO_CRW_EVENT(2, "Model does not provide ssd\n");
+ return -EOPNOTSUPP;
+ default:
+ CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ ssd_area->response.code);
+ return -EIO;
+ }
+
+ /*
+ * ssd_area->st stores the type of the detected
+ * subchannel, with the following definitions:
+ *
+ * 0: I/O subchannel: All fields have meaning
+ * 1: CHSC subchannel: Only sch_val, st and sch
+ * have meaning
+ * 2: Message subchannel: All fields except unit_addr
+ * have meaning
+ * 3: ADM subchannel: Only sch_val, st and sch
+ * have meaning
+ *
+ * Other types are currently undefined.
+ */
+ if (ssd_area->st > 3) { /* uhm, that looks strange... */
+ CIO_CRW_EVENT(0, "Strange subchannel type %d"
+ " for sch %04x\n", ssd_area->st, sch->irq);
+ /*
+ * There may have been a new subchannel type defined in the
+ * time since this code was written; since we don't know which
+ * fields have meaning and what to do with it we just jump out
+ */
+ return 0;
+ } else {
+ const char *type[4] = {"I/O", "chsc", "message", "ADM"};
+ CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
+ sch->irq, type[ssd_area->st]);
+
+ sch->ssd_info.valid = 1;
+ sch->ssd_info.type = ssd_area->st;
+ }
+
+ if (ssd_area->st == 0 || ssd_area->st == 2) {
+ for (j = 0; j < 8; j++) {
+ if (!((0x80 >> j) & ssd_area->path_mask &
+ ssd_area->fla_valid_mask))
+ continue;
+ sch->ssd_info.chpid[j] = ssd_area->chpid[j];
+ sch->ssd_info.fla[j] = ssd_area->fla[j];
+ }
+ }
+ return 0;
+}
+
+int
+css_get_ssd_info(struct subchannel *sch)
+{
+ int ret;
+ void *page;
+
+ page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_irq(&sch->lock);
+ ret = chsc_get_sch_desc_irq(sch, page);
+ if (ret) {
+ static int cio_chsc_err_msg;
+
+ if (!cio_chsc_err_msg) {
+ printk(KERN_ERR
+ "chsc_get_sch_descriptions:"
+ " Error %d while doing chsc; "
+ "processing some machine checks may "
+ "not work\n", ret);
+ cio_chsc_err_msg = 1;
+ }
+ }
+ spin_unlock_irq(&sch->lock);
+ free_page((unsigned long)page);
+ if (!ret) {
+ int j, chpid;
+ /* Allocate channel path structures, if needed. */
+ for (j = 0; j < 8; j++) {
+ chpid = sch->ssd_info.chpid[j];
+ if (chpid && (get_chp_status(chpid) < 0))
+ new_channel_path(chpid);
+ }
+ }
+ return ret;
+}
+
+static int
+s390_subchannel_remove_chpid(struct device *dev, void *data)
+{
+ int j;
+ int mask;
+ struct subchannel *sch;
+ __u8 *chpid;
+ struct schib schib;
+
+ sch = to_subchannel(dev);
+ chpid = data;
+ for (j = 0; j < 8; j++)
+ if (sch->schib.pmcw.chpid[j] == *chpid)
+ break;
+ if (j >= 8)
+ return 0;
+
+ mask = 0x80 >> j;
+ spin_lock(&sch->lock);
+
+ stsch(sch->irq, &schib);
+ if (!schib.pmcw.dnv)
+ goto out_unreg;
+ memcpy(&sch->schib, &schib, sizeof(struct schib));
+ /* Check for single path devices. */
+ if (sch->schib.pmcw.pim == 0x80)
+ goto out_unreg;
+ if (sch->vpm == mask)
+ goto out_unreg;
+
+ if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
+ SCSW_ACTL_HALT_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_RESUME_PEND)) &&
+ (sch->schib.pmcw.lpum == mask)) {
+ int cc = cio_cancel(sch);
+
+ if (cc == -ENODEV)
+ goto out_unreg;
+
+ if (cc == -EINVAL) {
+ cc = cio_clear(sch);
+ if (cc == -ENODEV)
+ goto out_unreg;
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+ goto out_unlock;
+ }
+ } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
+ (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
+ (sch->schib.pmcw.lpum == mask)) {
+ int cc;
+
+ cc = cio_clear(sch);
+ if (cc == -ENODEV)
+ goto out_unreg;
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+ goto out_unlock;
+ }
+
+ /* trigger path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+out_unlock:
+ spin_unlock(&sch->lock);
+ return 0;
+out_unreg:
+ spin_unlock(&sch->lock);
+ sch->lpm = 0;
+ if (css_enqueue_subchannel_slow(sch->irq)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ return 0;
+}
+
+static inline void
+s390_set_chpid_offline( __u8 chpid)
+{
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "chpr%x", chpid);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ if (get_chp_status(chpid) <= 0)
+ return;
+
+ bus_for_each_dev(&css_bus_type, NULL, &chpid,
+ s390_subchannel_remove_chpid);
+
+ if (need_rescan || css_slow_subchannels_exist())
+ queue_work(slow_path_wq, &slow_path_work);
+}
+
+static int
+s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
+ struct subchannel *sch)
+{
+ int found;
+ int chp;
+ int ccode;
+
+ found = 0;
+ for (chp = 0; chp <= 7; chp++)
+ /*
+ * check if chpid is in information updated by ssd
+ */
+ if (sch->ssd_info.valid &&
+ sch->ssd_info.chpid[chp] == chpid &&
+ (sch->ssd_info.fla[chp] & fla_mask) == fla) {
+ found = 1;
+ break;
+ }
+
+ if (found == 0)
+ return 0;
+
+ /*
+ * Do a stsch to update our subchannel structure with the
+ * new path information and eventually check for logically
+ * offline chpids.
+ */
+ ccode = stsch(sch->irq, &sch->schib);
+ if (ccode > 0)
+ return 0;
+
+ return 0x80 >> chp;
+}
+
+static int
+s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
+{
+ struct subchannel *sch;
+ int irq, rc;
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "accpr%x", chpid);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ if (fla != 0) {
+ sprintf(dbf_txt, "fla%x", fla);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ }
+
+ /*
+ * I/O resources may have become accessible.
+ * Scan through all subchannels that may be concerned and
+ * do a validation on those.
+ * The more information we have (info), the less scanning
+ * will we have to do.
+ */
+
+ if (!get_chp_status(chpid))
+ return 0; /* no need to do the rest */
+
+ rc = 0;
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ int chp_mask, old_lpm;
+
+ sch = get_subchannel_by_schid(irq);
+ if (!sch) {
+ struct schib schib;
+ int ret;
+ /*
+ * We don't know the device yet, but since a path
+ * may be available now to the device we'll have
+ * to do recognition again.
+ * Since we don't have any idea about which chpid
+ * that beast may be on we'll have to do a stsch
+ * on all devices, grr...
+ */
+ if (stsch(irq, &schib)) {
+ /* We're through */
+ if (need_rescan)
+ rc = -EAGAIN;
+ break;
+ }
+ if (need_rescan) {
+ rc = -EAGAIN;
+ continue;
+ }
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(irq);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ rc = -EAGAIN;
+ continue;
+ }
+
+ spin_lock_irq(&sch->lock);
+
+ chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
+
+ if (chp_mask == 0) {
+
+ spin_unlock_irq(&sch->lock);
+
+ if (fla_mask != 0)
+ break;
+ else
+ continue;
+ }
+ old_lpm = sch->lpm;
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | chp_mask) & sch->opm;
+ if (!old_lpm && sch->lpm)
+ device_trigger_reprobe(sch);
+ else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock_irq(&sch->lock);
+ put_device(&sch->dev);
+ if (fla_mask != 0)
+ break;
+ }
+ return rc;
+}
+
+static int
+__get_chpid_from_lir(void *data)
+{
+ struct lir {
+ u8 iq;
+ u8 ic;
+ u16 sci;
+ /* incident-node descriptor */
+ u32 indesc[28];
+ /* attached-node descriptor */
+ u32 andesc[28];
+ /* incident-specific information */
+ u32 isinfo[28];
+ } *lir;
+
+ lir = (struct lir*) data;
+ if (!(lir->iq&0x80))
+ /* NULL link incident record */
+ return -EINVAL;
+ if (!(lir->indesc[0]&0xc0000000))
+ /* node descriptor not valid */
+ return -EINVAL;
+ if (!(lir->indesc[0]&0x10000000))
+ /* don't handle device-type nodes - FIXME */
+ return -EINVAL;
+ /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
+
+ return (u16) (lir->indesc[0]&0x000000ff);
+}
+
+int
+chsc_process_crw(void)
+{
+ int chpid, ret;
+ struct {
+ struct chsc_header request;
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
+ u32 reserved5;
+ u32 reserved6;
+ u32 ccdf[96]; /* content-code dependent field */
+ /* ccdf has to be big enough for a link-incident record */
+ } *sei_area;
+
+ if (!sei_page)
+ return 0;
+ /*
+ * build the chsc request block for store event information
+ * and do the call
+ * This function is only called by the machine check handler thread,
+ * so we don't need locking for the sei_page.
+ */
+ sei_area = sei_page;
+
+ CIO_TRACE_EVENT( 2, "prcss");
+ ret = 0;
+ do {
+ int ccode, status;
+ memset(sei_area, 0, sizeof(*sei_area));
+
+ sei_area->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x000e,
+ };
+
+ ccode = chsc(sei_area);
+ if (ccode > 0)
+ return 0;
+
+ switch (sei_area->response.code) {
+ /* for debug purposes, check for problems */
+ case 0x0001:
+ CIO_CRW_EVENT(4, "chsc_process_crw: event information "
+ "successfully stored\n");
+ break; /* everything ok */
+ case 0x0002:
+ CIO_CRW_EVENT(2,
+ "chsc_process_crw: invalid command!\n");
+ return 0;
+ case 0x0003:
+ CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
+ "request block!\n");
+ return 0;
+ case 0x0005:
+ CIO_CRW_EVENT(2, "chsc_process_crw: no event "
+ "information stored\n");
+ return 0;
+ default:
+ CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
+ sei_area->response.code);
+ return 0;
+ }
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40)
+ CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
+ "has been lost due to overflow!\n");
+
+ if (sei_area->rs != 4) {
+ CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
+ "(%04X) isn't a chpid!\n",
+ sei_area->rsid);
+ continue;
+ }
+
+ /* which kind of information was stored? */
+ switch (sei_area->cc) {
+ case 1: /* link incident*/
+ CIO_CRW_EVENT(4, "chsc_process_crw: "
+ "channel subsystem reports link incident,"
+ " reporting source is chpid %x\n",
+ sei_area->rsid);
+ chpid = __get_chpid_from_lir(sei_area->ccdf);
+ if (chpid < 0)
+ CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
+ __FUNCTION__);
+ else
+ s390_set_chpid_offline(chpid);
+ break;
+
+ case 2: /* i/o resource accessibiliy */
+ CIO_CRW_EVENT(4, "chsc_process_crw: "
+ "channel subsystem reports some I/O "
+ "devices may have become accessible\n");
+ pr_debug("Data received after sei: \n");
+ pr_debug("Validity flags: %x\n", sei_area->vf);
+
+ /* allocate a new channel path structure, if needed */
+ status = get_chp_status(sei_area->rsid);
+ if (status < 0)
+ new_channel_path(sei_area->rsid);
+ else if (!status)
+ return 0;
+ if ((sei_area->vf & 0x80) == 0) {
+ pr_debug("chpid: %x\n", sei_area->rsid);
+ ret = s390_process_res_acc(sei_area->rsid,
+ 0, 0);
+ } else if ((sei_area->vf & 0xc0) == 0x80) {
+ pr_debug("chpid: %x link addr: %x\n",
+ sei_area->rsid, sei_area->fla);
+ ret = s390_process_res_acc(sei_area->rsid,
+ sei_area->fla,
+ 0xff00);
+ } else if ((sei_area->vf & 0xc0) == 0xc0) {
+ pr_debug("chpid: %x full link addr: %x\n",
+ sei_area->rsid, sei_area->fla);
+ ret = s390_process_res_acc(sei_area->rsid,
+ sei_area->fla,
+ 0xffff);
+ }
+ pr_debug("\n");
+
+ break;
+
+ default: /* other stuff */
+ CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
+ sei_area->cc);
+ break;
+ }
+ } while (sei_area->flags & 0x80);
+ return ret;
+}
+
+static int
+chp_add(int chpid)
+{
+ struct subchannel *sch;
+ int irq, ret, rc;
+ char dbf_txt[15];
+
+ if (!get_chp_status(chpid))
+ return 0; /* no need to do the rest */
+
+ sprintf(dbf_txt, "cadd%x", chpid);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ rc = 0;
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ int i;
+
+ sch = get_subchannel_by_schid(irq);
+ if (!sch) {
+ struct schib schib;
+
+ if (stsch(irq, &schib)) {
+ /* We're through */
+ if (need_rescan)
+ rc = -EAGAIN;
+ break;
+ }
+ if (need_rescan) {
+ rc = -EAGAIN;
+ continue;
+ }
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(irq);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ rc = -EAGAIN;
+ continue;
+ }
+
+ spin_lock(&sch->lock);
+ for (i=0; i<8; i++)
+ if (sch->schib.pmcw.chpid[i] == chpid) {
+ if (stsch(sch->irq, &sch->schib) != 0) {
+ /* Endgame. */
+ spin_unlock(&sch->lock);
+ return rc;
+ }
+ break;
+ }
+ if (i==8) {
+ spin_unlock(&sch->lock);
+ return rc;
+ }
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | 0x80 >> i) & sch->opm;
+
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock(&sch->lock);
+ put_device(&sch->dev);
+ }
+ return rc;
+}
+
+/*
+ * Handling of crw machine checks with channel path source.
+ */
+int
+chp_process_crw(int chpid, int on)
+{
+ if (on == 0) {
+ /* Path has gone. We use the link incident routine.*/
+ s390_set_chpid_offline(chpid);
+ return 0; /* De-register is async anyway. */
+ }
+ /*
+ * Path has come. Allocate a new channel path structure,
+ * if needed.
+ */
+ if (get_chp_status(chpid) < 0)
+ new_channel_path(chpid);
+ /* Avoid the extra overhead in process_rec_acc. */
+ return chp_add(chpid);
+}
+
+static inline int
+__check_for_io_and_kill(struct subchannel *sch, int index)
+{
+ int cc;
+
+ if (!device_is_online(sch))
+ /* cio could be doing I/O. */
+ return 0;
+ cc = stsch(sch->irq, &sch->schib);
+ if (cc)
+ return 0;
+ if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
+ device_set_waiting(sch);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
+{
+ int chp, old_lpm;
+ unsigned long flags;
+
+ if (!sch->ssd_info.valid)
+ return;
+
+ spin_lock_irqsave(&sch->lock, flags);
+ old_lpm = sch->lpm;
+ for (chp = 0; chp < 8; chp++) {
+ if (sch->ssd_info.chpid[chp] != chpid)
+ continue;
+
+ if (on) {
+ sch->opm |= (0x80 >> chp);
+ sch->lpm |= (0x80 >> chp);
+ if (!old_lpm)
+ device_trigger_reprobe(sch);
+ else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ } else {
+ sch->opm &= ~(0x80 >> chp);
+ sch->lpm &= ~(0x80 >> chp);
+ /*
+ * Give running I/O a grace period in which it
+ * can successfully terminate, even using the
+ * just varied off path. Then kill it.
+ */
+ if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
+ if (css_enqueue_subchannel_slow(sch->irq)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ } else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&sch->lock, flags);
+}
+
+static int
+s390_subchannel_vary_chpid_off(struct device *dev, void *data)
+{
+ struct subchannel *sch;
+ __u8 *chpid;
+
+ sch = to_subchannel(dev);
+ chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 0);
+ return 0;
+}
+
+static int
+s390_subchannel_vary_chpid_on(struct device *dev, void *data)
+{
+ struct subchannel *sch;
+ __u8 *chpid;
+
+ sch = to_subchannel(dev);
+ chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 1);
+ return 0;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int
+s390_vary_chpid( __u8 chpid, int on)
+{
+ char dbf_text[15];
+ int status, irq, ret;
+ struct subchannel *sch;
+
+ sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
+ CIO_TRACE_EVENT( 2, dbf_text);
+
+ status = get_chp_status(chpid);
+ if (status < 0) {
+ printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
+ return -EINVAL;
+ }
+
+ if (!on && !status) {
+ printk(KERN_ERR "chpid %x is already offline\n", chpid);
+ return -EINVAL;
+ }
+
+ set_chp_logically_online(chpid, on);
+
+ /*
+ * Redo PathVerification on the devices the chpid connects to
+ */
+
+ bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
+ s390_subchannel_vary_chpid_on :
+ s390_subchannel_vary_chpid_off);
+ if (!on)
+ goto out;
+ /* Scan for new devices on varied on path. */
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ struct schib schib;
+
+ if (need_rescan)
+ break;
+ sch = get_subchannel_by_schid(irq);
+ if (sch) {
+ put_device(&sch->dev);
+ continue;
+ }
+ if (stsch(irq, &schib))
+ /* We're through */
+ break;
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(irq);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+out:
+ if (need_rescan || css_slow_subchannels_exist())
+ queue_work(slow_path_wq, &slow_path_work);
+ return 0;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t
+chp_status_show(struct device *dev, char *buf)
+{
+ struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+ if (!chp)
+ return 0;
+ return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
+ sprintf(buf, "offline\n"));
+}
+
+static ssize_t
+chp_status_write(struct device *dev, const char *buf, size_t count)
+{
+ struct channel_path *cp = container_of(dev, struct channel_path, dev);
+ char cmd[10];
+ int num_args;
+ int error;
+
+ num_args = sscanf(buf, "%5s", cmd);
+ if (!num_args)
+ return count;
+
+ if (!strnicmp(cmd, "on", 2))
+ error = s390_vary_chpid(cp->id, 1);
+ else if (!strnicmp(cmd, "off", 3))
+ error = s390_vary_chpid(cp->id, 0);
+ else
+ error = -EINVAL;
+
+ return error < 0 ? error : count;
+
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t
+chp_type_show(struct device *dev, char *buf)
+{
+ struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+ if (!chp)
+ return 0;
+ return sprintf(buf, "%x\n", chp->desc.desc);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static struct attribute * chp_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_type.attr,
+ NULL,
+};
+
+static struct attribute_group chp_attr_group = {
+ .attrs = chp_attrs,
+};
+
+static void
+chp_release(struct device *dev)
+{
+ struct channel_path *cp;
+
+ cp = container_of(dev, struct channel_path, dev);
+ kfree(cp);
+}
+
+static int
+chsc_determine_channel_path_description(int chpid,
+ struct channel_path_desc *desc)
+{
+ int ccode, ret;
+
+ struct {
+ struct chsc_header request;
+ u32 : 24;
+ u32 first_chpid : 8;
+ u32 : 24;
+ u32 last_chpid : 8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u32 zeroes2;
+ struct channel_path_desc desc;
+ } *scpd_area;
+
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area)
+ return -ENOMEM;
+
+ scpd_area->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x0002,
+ };
+
+ scpd_area->first_chpid = chpid;
+ scpd_area->last_chpid = chpid;
+
+ ccode = chsc(scpd_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ switch (scpd_area->response.code) {
+ case 0x0001: /* Success. */
+ memcpy(desc, &scpd_area->desc,
+ sizeof(struct channel_path_desc));
+ ret = 0;
+ break;
+ case 0x0003: /* Invalid block. */
+ case 0x0007: /* Invalid format. */
+ case 0x0008: /* Other invalid block. */
+ CIO_CRW_EVENT(2, "Error in chsc request block!\n");
+ ret = -EINVAL;
+ break;
+ case 0x0004: /* Command not provided in model. */
+ CIO_CRW_EVENT(2, "Model does not provide scpd\n");
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ scpd_area->response.code);
+ ret = -EIO;
+ }
+out:
+ free_page((unsigned long)scpd_area);
+ return ret;
+}
+
+/*
+ * Entries for chpids on the system bus.
+ * This replaces /proc/chpids.
+ */
+static int
+new_channel_path(int chpid)
+{
+ struct channel_path *chp;
+ int ret;
+
+ chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp)
+ return -ENOMEM;
+ memset(chp, 0, sizeof(struct channel_path));
+
+ /* fill in status, etc. */
+ chp->id = chpid;
+ chp->state = 1;
+ chp->dev = (struct device) {
+ .parent = &css_bus_device,
+ .release = chp_release,
+ };
+ snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
+
+ /* Obtain channel path description and fill it in. */
+ ret = chsc_determine_channel_path_description(chpid, &chp->desc);
+ if (ret)
+ goto out_free;
+
+ /* make it known to the system */
+ ret = device_register(&chp->dev);
+ if (ret) {
+ printk(KERN_WARNING "%s: could not register %02x\n",
+ __func__, chpid);
+ goto out_free;
+ }
+ ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
+ if (ret) {
+ device_unregister(&chp->dev);
+ goto out_free;
+ } else
+ chps[chpid] = chp;
+ return ret;
+out_free:
+ kfree(chp);
+ return ret;
+}
+
+void *
+chsc_get_chp_desc(struct subchannel *sch, int chp_no)
+{
+ struct channel_path *chp;
+ struct channel_path_desc *desc;
+
+ chp = chps[sch->schib.pmcw.chpid[chp_no]];
+ if (!chp)
+ return NULL;
+ desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+ memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ return desc;
+}
+
+
+static int __init
+chsc_alloc_sei_area(void)
+{
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page)
+ printk(KERN_WARNING"Can't allocate page for processing of " \
+ "chsc machine checks!\n");
+ return (sei_page ? 0 : -ENOMEM);
+}
+
+subsys_initcall(chsc_alloc_sei_area);
+
+struct css_general_char css_general_characteristics;
+struct css_chsc_char css_chsc_characteristics;
+
+int __init
+chsc_determine_css_characteristics(void)
+{
+ int result;
+ struct {
+ struct chsc_header request;
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u32 general_char[510];
+ u32 chsc_char[518];
+ } *scsc_area;
+
+ scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scsc_area) {
+ printk(KERN_WARNING"cio: Was not able to determine available" \
+ "CHSCs due to no memory.\n");
+ return -ENOMEM;
+ }
+
+ scsc_area->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x0010,
+ };
+
+ result = chsc(scsc_area);
+ if (result) {
+ printk(KERN_WARNING"cio: Was not able to determine " \
+ "available CHSCs, cc=%i.\n", result);
+ result = -EIO;
+ goto exit;
+ }
+
+ if (scsc_area->response.code != 1) {
+ printk(KERN_WARNING"cio: Was not able to determine " \
+ "available CHSCs.\n");
+ result = -EIO;
+ goto exit;
+ }
+ memcpy(&css_general_characteristics, scsc_area->general_char,
+ sizeof(css_general_characteristics));
+ memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+ sizeof(css_chsc_characteristics));
+exit:
+ free_page ((unsigned long) scsc_area);
+ return result;
+}
+
+EXPORT_SYMBOL_GPL(css_general_characteristics);
+EXPORT_SYMBOL_GPL(css_chsc_characteristics);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
new file mode 100644
index 000000000000..be20da49d147
--- /dev/null
+++ b/drivers/s390/cio/chsc.h
@@ -0,0 +1,66 @@
+#ifndef S390_CHSC_H
+#define S390_CHSC_H
+
+#define NR_CHPIDS 256
+
+#define CHSC_SEI_ACC_CHPID 1
+#define CHSC_SEI_ACC_LINKADDR 2
+#define CHSC_SEI_ACC_FULLLINKADDR 3
+
+struct chsc_header {
+ u16 length;
+ u16 code;
+};
+
+struct channel_path_desc {
+ u8 flags;
+ u8 lsn;
+ u8 desc;
+ u8 chpid;
+ u8 swla;
+ u8 zeroes;
+ u8 chla;
+ u8 chpp;
+};
+
+struct channel_path {
+ int id;
+ int state;
+ struct channel_path_desc desc;
+ struct device dev;
+};
+
+extern void s390_process_css( void );
+extern void chsc_validate_chpids(struct subchannel *);
+extern void chpid_is_actually_online(int);
+
+struct css_general_char {
+ u64 : 41;
+ u32 aif : 1; /* bit 41 */
+ u32 : 3;
+ u32 mcss : 1; /* bit 45 */
+ u32 : 2;
+ u32 ext_mb : 1; /* bit 48 */
+ u32 : 7;
+ u32 aif_tdd : 1; /* bit 56 */
+ u32 : 10;
+ u32 aif_osa : 1; /* bit 67 */
+ u32 : 28;
+}__attribute__((packed));
+
+struct css_chsc_char {
+ u64 res;
+ u64 : 43;
+ u32 scssc : 1; /* bit 107 */
+ u32 scsscf : 1; /* bit 108 */
+ u32 : 19;
+}__attribute__((packed));
+
+extern struct css_general_char css_general_characteristics;
+extern struct css_chsc_char css_chsc_characteristics;
+
+extern int chsc_determine_css_characteristics(void);
+extern int css_characteristics_avail;
+
+extern void *chsc_get_chp_desc(struct subchannel*, int);
+#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
new file mode 100644
index 000000000000..99ce5a567982
--- /dev/null
+++ b/drivers/s390/cio/cio.c
@@ -0,0 +1,860 @@
+/*
+ * drivers/s390/cio/cio.c
+ * S/390 common I/O routines -- low level i/o calls
+ * $Revision: 1.131 $
+ *
+ * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+#include "airq.h"
+#include "cio.h"
+#include "css.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+
+debug_info_t *cio_debug_msg_id;
+debug_info_t *cio_debug_trace_id;
+debug_info_t *cio_debug_crw_id;
+
+int cio_show_msg;
+
+static int __init
+cio_setup (char *parm)
+{
+ if (!strcmp (parm, "yes"))
+ cio_show_msg = 1;
+ else if (!strcmp (parm, "no"))
+ cio_show_msg = 0;
+ else
+ printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
+ parm);
+ return 1;
+}
+
+__setup ("cio_msg=", cio_setup);
+
+/*
+ * Function: cio_debug_init
+ * Initializes three debug logs (under /proc/s390dbf) for common I/O:
+ * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
+ * - cio_trace logs the calling of different functions
+ * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
+ * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
+ */
+static int __init
+cio_debug_init (void)
+{
+ cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long));
+ if (!cio_debug_msg_id)
+ goto out_unregister;
+ debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
+ debug_set_level (cio_debug_msg_id, 2);
+ cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
+ if (!cio_debug_trace_id)
+ goto out_unregister;
+ debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
+ debug_set_level (cio_debug_trace_id, 2);
+ cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long));
+ if (!cio_debug_crw_id)
+ goto out_unregister;
+ debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
+ debug_set_level (cio_debug_crw_id, 2);
+ pr_debug("debugging initialized\n");
+ return 0;
+
+out_unregister:
+ if (cio_debug_msg_id)
+ debug_unregister (cio_debug_msg_id);
+ if (cio_debug_trace_id)
+ debug_unregister (cio_debug_trace_id);
+ if (cio_debug_crw_id)
+ debug_unregister (cio_debug_crw_id);
+ pr_debug("could not initialize debugging\n");
+ return -1;
+}
+
+arch_initcall (cio_debug_init);
+
+int
+cio_set_options (struct subchannel *sch, int flags)
+{
+ sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
+}
+
+/* FIXME: who wants to use this? */
+int
+cio_get_options (struct subchannel *sch)
+{
+ int flags;
+
+ flags = 0;
+ if (sch->options.suspend)
+ flags |= DOIO_ALLOW_SUSPEND;
+ if (sch->options.prefetch)
+ flags |= DOIO_DENY_PREFETCH;
+ if (sch->options.inter)
+ flags |= DOIO_SUPPRESS_INTER;
+ return flags;
+}
+
+/*
+ * Use tpi to get a pending interrupt, call the interrupt handler and
+ * return a pointer to the subchannel structure.
+ */
+static inline int
+cio_tpi(void)
+{
+ struct tpi_info *tpi_info;
+ struct subchannel *sch;
+ struct irb *irb;
+
+ tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
+ if (tpi (NULL) != 1)
+ return 0;
+ irb = (struct irb *) __LC_IRB;
+ /* Store interrupt response block to lowcore. */
+ if (tsch (tpi_info->irq, irb) != 0)
+ /* Not status pending or not operational. */
+ return 1;
+ sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
+ if (!sch)
+ return 1;
+ local_bh_disable();
+ irq_enter ();
+ spin_lock(&sch->lock);
+ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(&sch->dev);
+ spin_unlock(&sch->lock);
+ irq_exit ();
+ __local_bh_enable();
+ return 1;
+}
+
+static inline int
+cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
+{
+ char dbf_text[15];
+
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ stsch (sch->irq, &sch->schib);
+
+ CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
+ "subchannel %04x!\n", sch->irq);
+ sprintf(dbf_text, "no%s", sch->dev.bus_id);
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
+
+ return (sch->lpm ? -EACCES : -ENODEV);
+}
+
+int
+cio_start_key (struct subchannel *sch, /* subchannel structure */
+ struct ccw1 * cpa, /* logical channel prog addr */
+ __u8 lpm, /* logical path mask */
+ __u8 key) /* storage key */
+{
+ char dbf_txt[15];
+ int ccode;
+
+ CIO_TRACE_EVENT (4, "stIO");
+ CIO_TRACE_EVENT (4, sch->dev.bus_id);
+
+ /* sch is always under 2G. */
+ sch->orb.intparm = (__u32)(unsigned long)sch;
+ sch->orb.fmt = 1;
+
+ sch->orb.pfch = sch->options.prefetch == 0;
+ sch->orb.spnd = sch->options.suspend;
+ sch->orb.ssic = sch->options.suspend && sch->options.inter;
+ sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
+#ifdef CONFIG_ARCH_S390X
+ /*
+ * for 64 bit we always support 64 bit IDAWs with 4k page size only
+ */
+ sch->orb.c64 = 1;
+ sch->orb.i2k = 0;
+#endif
+ sch->orb.key = key >> 4;
+ /* issue "Start Subchannel" */
+ sch->orb.cpa = (__u32) __pa (cpa);
+ ccode = ssch (sch->irq, &sch->orb);
+
+ /* process condition code */
+ sprintf (dbf_txt, "ccode:%d", ccode);
+ CIO_TRACE_EVENT (4, dbf_txt);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * initialize device status information
+ */
+ sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ default: /* device/path not operational */
+ return cio_start_handle_notoper(sch, lpm);
+ }
+}
+
+int
+cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
+{
+ return cio_start_key(sch, cpa, lpm, default_storage_key);
+}
+
+/*
+ * resume suspended I/O operation
+ */
+int
+cio_resume (struct subchannel *sch)
+{
+ char dbf_txt[15];
+ int ccode;
+
+ CIO_TRACE_EVENT (4, "resIO");
+ CIO_TRACE_EVENT (4, sch->dev.bus_id);
+
+ ccode = rsch (sch->irq);
+
+ sprintf (dbf_txt, "ccode:%d", ccode);
+ CIO_TRACE_EVENT (4, dbf_txt);
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
+ return 0;
+ case 1:
+ return -EBUSY;
+ case 2:
+ return -EINVAL;
+ default:
+ /*
+ * useless to wait for request completion
+ * as device is no longer operational !
+ */
+ return -ENODEV;
+ }
+}
+
+/*
+ * halt I/O operation
+ */
+int
+cio_halt(struct subchannel *sch)
+{
+ char dbf_txt[15];
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT (2, "haltIO");
+ CIO_TRACE_EVENT (2, sch->dev.bus_id);
+
+ /*
+ * Issue "Halt subchannel" and process condition code
+ */
+ ccode = hsch (sch->irq);
+
+ sprintf (dbf_txt, "ccode:%d", ccode);
+ CIO_TRACE_EVENT (2, dbf_txt);
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+
+/*
+ * Clear I/O operation
+ */
+int
+cio_clear(struct subchannel *sch)
+{
+ char dbf_txt[15];
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT (2, "clearIO");
+ CIO_TRACE_EVENT (2, sch->dev.bus_id);
+
+ /*
+ * Issue "Clear subchannel" and process condition code
+ */
+ ccode = csch (sch->irq);
+
+ sprintf (dbf_txt, "ccode:%d", ccode);
+ CIO_TRACE_EVENT (2, dbf_txt);
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+
+/*
+ * Function: cio_cancel
+ * Issues a "Cancel Subchannel" on the specified subchannel
+ * Note: We don't need any fancy intparms and flags here
+ * since xsch is executed synchronously.
+ * Only for common I/O internal use as for now.
+ */
+int
+cio_cancel (struct subchannel *sch)
+{
+ char dbf_txt[15];
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT (2, "cancelIO");
+ CIO_TRACE_EVENT (2, sch->dev.bus_id);
+
+ ccode = xsch (sch->irq);
+
+ sprintf (dbf_txt, "ccode:%d", ccode);
+ CIO_TRACE_EVENT (2, dbf_txt);
+
+ switch (ccode) {
+ case 0: /* success */
+ /* Update information in scsw. */
+ stsch (sch->irq, &sch->schib);
+ return 0;
+ case 1: /* status pending */
+ return -EBUSY;
+ case 2: /* not applicable */
+ return -EINVAL;
+ default: /* not oper */
+ return -ENODEV;
+ }
+}
+
+/*
+ * Function: cio_modify
+ * Issues a "Modify Subchannel" on the specified subchannel
+ */
+int
+cio_modify (struct subchannel *sch)
+{
+ int ccode, retry, ret;
+
+ ret = 0;
+ for (retry = 0; retry < 5; retry++) {
+ ccode = msch_err (sch->irq, &sch->schib);
+ if (ccode < 0) /* -EIO if msch gets a program check. */
+ return ccode;
+ switch (ccode) {
+ case 0: /* successfull */
+ return 0;
+ case 1: /* status pending */
+ return -EBUSY;
+ case 2: /* busy */
+ udelay (100); /* allow for recovery */
+ ret = -EBUSY;
+ break;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Enable subchannel.
+ */
+int
+cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
+{
+ char dbf_txt[15];
+ int ccode;
+ int retry;
+ int ret;
+
+ CIO_TRACE_EVENT (2, "ensch");
+ CIO_TRACE_EVENT (2, sch->dev.bus_id);
+
+ ccode = stsch (sch->irq, &sch->schib);
+ if (ccode)
+ return -ENODEV;
+
+ for (retry = 5, ret = 0; retry > 0; retry--) {
+ sch->schib.pmcw.ena = 1;
+ sch->schib.pmcw.isc = isc;
+ sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
+ ret = cio_modify(sch);
+ if (ret == -ENODEV)
+ break;
+ if (ret == -EIO)
+ /*
+ * Got a program check in cio_modify. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->schib.pmcw.csense = 0;
+ if (ret == 0) {
+ stsch (sch->irq, &sch->schib);
+ if (sch->schib.pmcw.ena)
+ break;
+ }
+ if (ret == -EBUSY) {
+ struct irb irb;
+ if (tsch(sch->irq, &irb) != 0)
+ break;
+ }
+ }
+ sprintf (dbf_txt, "ret:%d", ret);
+ CIO_TRACE_EVENT (2, dbf_txt);
+ return ret;
+}
+
+/*
+ * Disable subchannel.
+ */
+int
+cio_disable_subchannel (struct subchannel *sch)
+{
+ char dbf_txt[15];
+ int ccode;
+ int retry;
+ int ret;
+
+ CIO_TRACE_EVENT (2, "dissch");
+ CIO_TRACE_EVENT (2, sch->dev.bus_id);
+
+ ccode = stsch (sch->irq, &sch->schib);
+ if (ccode == 3) /* Not operational. */
+ return -ENODEV;
+
+ if (sch->schib.scsw.actl != 0)
+ /*
+ * the disable function must not be called while there are
+ * requests pending for completion !
+ */
+ return -EBUSY;
+
+ for (retry = 5, ret = 0; retry > 0; retry--) {
+ sch->schib.pmcw.ena = 0;
+ ret = cio_modify(sch);
+ if (ret == -ENODEV)
+ break;
+ if (ret == -EBUSY)
+ /*
+ * The subchannel is busy or status pending.
+ * We'll disable when the next interrupt was delivered
+ * via the state machine.
+ */
+ break;
+ if (ret == 0) {
+ stsch (sch->irq, &sch->schib);
+ if (!sch->schib.pmcw.ena)
+ break;
+ }
+ }
+ sprintf (dbf_txt, "ret:%d", ret);
+ CIO_TRACE_EVENT (2, dbf_txt);
+ return ret;
+}
+
+/*
+ * cio_validate_subchannel()
+ *
+ * Find out subchannel type and initialize struct subchannel.
+ * Return codes:
+ * SUBCHANNEL_TYPE_IO for a normal io subchannel
+ * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
+ * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
+ * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
+ * -ENXIO for non-defined subchannels
+ * -ENODEV for subchannels with invalid device number or blacklisted devices
+ */
+int
+cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
+{
+ char dbf_txt[15];
+ int ccode;
+
+ sprintf (dbf_txt, "valsch%x", irq);
+ CIO_TRACE_EVENT (4, dbf_txt);
+
+ /* Nuke all fields. */
+ memset(sch, 0, sizeof(struct subchannel));
+
+ spin_lock_init(&sch->lock);
+
+ /* Set a name for the subchannel */
+ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
+
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ */
+ sch->irq = irq;
+ ccode = stsch (irq, &sch->schib);
+ if (ccode)
+ return -ENXIO;
+
+ /* Copy subchannel type from path management control word. */
+ sch->st = sch->schib.pmcw.st;
+
+ /*
+ * ... just being curious we check for non I/O subchannels
+ */
+ if (sch->st != 0) {
+ CIO_DEBUG(KERN_INFO, 0,
+ "Subchannel %04X reports "
+ "non-I/O subchannel type %04X\n",
+ sch->irq, sch->st);
+ /* We stop here for non-io subchannels. */
+ return sch->st;
+ }
+
+ /* Initialization for io subchannels. */
+ if (!sch->schib.pmcw.dnv)
+ /* io subchannel but device number is invalid. */
+ return -ENODEV;
+
+ /* Devno is valid. */
+ if (is_blacklisted (sch->schib.pmcw.dev)) {
+ /*
+ * This device must not be known to Linux. So we simply
+ * say that there is no device and return ENODEV.
+ */
+ CIO_MSG_EVENT(0, "Blacklisted device detected "
+ "at devno %04X\n", sch->schib.pmcw.dev);
+ return -ENODEV;
+ }
+ sch->opm = 0xff;
+ chsc_validate_chpids(sch);
+ sch->lpm = sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom &
+ sch->opm;
+
+ CIO_DEBUG(KERN_INFO, 0,
+ "Detected device %04X on subchannel %04X"
+ " - PIM = %02X, PAM = %02X, POM = %02X\n",
+ sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
+ sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+
+ /*
+ * We now have to initially ...
+ * ... set "interruption subclass"
+ * ... enable "concurrent sense"
+ * ... enable "multipath mode" if more than one
+ * CHPID is available. This is done regardless
+ * whether multiple paths are available for us.
+ */
+ sch->schib.pmcw.isc = 3; /* could be smth. else */
+ sch->schib.pmcw.csense = 1; /* concurrent sense */
+ sch->schib.pmcw.ena = 0;
+ if ((sch->lpm & (sch->lpm - 1)) != 0)
+ sch->schib.pmcw.mp = 1; /* multipath mode */
+ return 0;
+}
+
+/*
+ * do_IRQ() handles all normal I/O device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ *
+ */
+void
+do_IRQ (struct pt_regs *regs)
+{
+ struct tpi_info *tpi_info;
+ struct subchannel *sch;
+ struct irb *irb;
+
+ irq_enter ();
+ asm volatile ("mc 0,0");
+ if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
+ /**
+ * Make sure that the i/o interrupt did not "overtake"
+ * the last HZ timer interrupt.
+ */
+ account_ticks(regs);
+ /*
+ * Get interrupt information from lowcore
+ */
+ tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
+ irb = (struct irb *) __LC_IRB;
+ do {
+ kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+ /*
+ * Non I/O-subchannel thin interrupts are processed differently
+ */
+ if (tpi_info->adapter_IO == 1 &&
+ tpi_info->int_type == IO_INTERRUPT_TYPE) {
+ do_adapter_IO();
+ continue;
+ }
+ sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
+ if (sch)
+ spin_lock(&sch->lock);
+ /* Store interrupt response block to lowcore. */
+ if (tsch (tpi_info->irq, irb) == 0 && sch) {
+ /* Keep subchannel information word up to date. */
+ memcpy (&sch->schib.scsw, &irb->scsw,
+ sizeof (irb->scsw));
+ /* Call interrupt handler if there is one. */
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(&sch->dev);
+ }
+ if (sch)
+ spin_unlock(&sch->lock);
+ /*
+ * Are more interrupts pending?
+ * If so, the tpi instruction will update the lowcore
+ * to hold the info for the next interrupt.
+ * We don't do this for VM because a tpi drops the cpu
+ * out of the sie which costs more cycles than it saves.
+ */
+ } while (!MACHINE_IS_VM && tpi (NULL) != 0);
+ irq_exit ();
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct subchannel console_subchannel;
+static int console_subchannel_in_use;
+
+/*
+ * busy wait for the next interrupt on the console
+ */
+void
+wait_cons_dev (void)
+{
+ unsigned long cr6 __attribute__ ((aligned (8)));
+ unsigned long save_cr6 __attribute__ ((aligned (8)));
+
+ /*
+ * before entering the spinlock we may already have
+ * processed the interrupt on a different CPU...
+ */
+ if (!console_subchannel_in_use)
+ return;
+
+ /* disable all but isc 7 (console device) */
+ __ctl_store (save_cr6, 6, 6);
+ cr6 = 0x01000000;
+ __ctl_load (cr6, 6, 6);
+
+ do {
+ spin_unlock(&console_subchannel.lock);
+ if (!cio_tpi())
+ cpu_relax();
+ spin_lock(&console_subchannel.lock);
+ } while (console_subchannel.schib.scsw.actl != 0);
+ /*
+ * restore previous isc value
+ */
+ __ctl_load (save_cr6, 6, 6);
+}
+
+static int
+cio_console_irq(void)
+{
+ int irq;
+
+ if (console_irq != -1) {
+ /* VM provided us with the irq number of the console. */
+ if (stsch(console_irq, &console_subchannel.schib) != 0 ||
+ !console_subchannel.schib.pmcw.dnv)
+ return -1;
+ console_devno = console_subchannel.schib.pmcw.dev;
+ } else if (console_devno != -1) {
+ /* At least the console device number is known. */
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ if (stsch(irq, &console_subchannel.schib) != 0)
+ break;
+ if (console_subchannel.schib.pmcw.dnv &&
+ console_subchannel.schib.pmcw.dev ==
+ console_devno) {
+ console_irq = irq;
+ break;
+ }
+ }
+ if (console_irq == -1)
+ return -1;
+ } else {
+ /* unlike in 2.4, we cannot autoprobe here, since
+ * the channel subsystem is not fully initialized.
+ * With some luck, the HWC console can take over */
+ printk(KERN_WARNING "No ccw console found!\n");
+ return -1;
+ }
+ return console_irq;
+}
+
+struct subchannel *
+cio_probe_console(void)
+{
+ int irq, ret;
+
+ if (xchg(&console_subchannel_in_use, 1) != 0)
+ return ERR_PTR(-EBUSY);
+ irq = cio_console_irq();
+ if (irq == -1) {
+ console_subchannel_in_use = 0;
+ return ERR_PTR(-ENODEV);
+ }
+ memset(&console_subchannel, 0, sizeof(struct subchannel));
+ ret = cio_validate_subchannel(&console_subchannel, irq);
+ if (ret) {
+ console_subchannel_in_use = 0;
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * enable console I/O-interrupt subclass 7
+ */
+ ctl_set_bit(6, 24);
+ console_subchannel.schib.pmcw.isc = 7;
+ console_subchannel.schib.pmcw.intparm =
+ (__u32)(unsigned long)&console_subchannel;
+ ret = cio_modify(&console_subchannel);
+ if (ret) {
+ console_subchannel_in_use = 0;
+ return ERR_PTR(ret);
+ }
+ return &console_subchannel;
+}
+
+void
+cio_release_console(void)
+{
+ console_subchannel.schib.pmcw.intparm = 0;
+ cio_modify(&console_subchannel);
+ ctl_clear_bit(6, 24);
+ console_subchannel_in_use = 0;
+}
+
+/* Bah... hack to catch console special sausages. */
+int
+cio_is_console(int irq)
+{
+ if (!console_subchannel_in_use)
+ return 0;
+ return (irq == console_subchannel.irq);
+}
+
+struct subchannel *
+cio_get_console_subchannel(void)
+{
+ if (!console_subchannel_in_use)
+ return 0;
+ return &console_subchannel;
+}
+
+#endif
+static inline int
+__disable_subchannel_easy(unsigned int schid, struct schib *schib)
+{
+ int retry, cc;
+
+ cc = 0;
+ for (retry=0;retry<3;retry++) {
+ schib->pmcw.ena = 0;
+ cc = msch(schid, schib);
+ if (cc)
+ return (cc==3?-ENODEV:-EBUSY);
+ stsch(schid, schib);
+ if (!schib->pmcw.ena)
+ return 0;
+ }
+ return -EBUSY; /* uhm... */
+}
+
+static inline int
+__clear_subchannel_easy(unsigned int schid)
+{
+ int retry;
+
+ if (csch(schid))
+ return -ENODEV;
+ for (retry=0;retry<20;retry++) {
+ struct tpi_info ti;
+
+ if (tpi(&ti)) {
+ tsch(schid, (struct irb *)__LC_IRB);
+ return 0;
+ }
+ udelay(100);
+ }
+ return -EBUSY;
+}
+
+extern void do_reipl(unsigned long devno);
+
+/* Clear all subchannels. */
+void
+clear_all_subchannels(void)
+{
+ unsigned int schid;
+
+ local_irq_disable();
+ for (schid=0;schid<=highest_subchannel;schid++) {
+ struct schib schib;
+ if (stsch(schid, &schib))
+ break; /* break out of the loop */
+ if (!schib.pmcw.ena)
+ continue;
+ switch(__disable_subchannel_easy(schid, &schib)) {
+ case 0:
+ case -ENODEV:
+ break;
+ default: /* -EBUSY */
+ if (__clear_subchannel_easy(schid))
+ break; /* give up... jump out of switch */
+ stsch(schid, &schib);
+ __disable_subchannel_easy(schid, &schib);
+ }
+ }
+}
+
+/* Make sure all subchannels are quiet before we re-ipl an lpar. */
+void
+reipl(unsigned long devno)
+{
+ clear_all_subchannels();
+ do_reipl(devno);
+}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
new file mode 100644
index 000000000000..c50a9da420a9
--- /dev/null
+++ b/drivers/s390/cio/cio.h
@@ -0,0 +1,143 @@
+#ifndef S390_CIO_H
+#define S390_CIO_H
+
+/*
+ * where we put the ssd info
+ */
+struct ssd_info {
+ __u8 valid:1;
+ __u8 type:7; /* subchannel type */
+ __u8 chpid[8]; /* chpids */
+ __u16 fla[8]; /* full link addresses */
+} __attribute__ ((packed));
+
+/*
+ * path management control word
+ */
+struct pmcw {
+ __u32 intparm; /* interruption parameter */
+ __u32 qf : 1; /* qdio facility */
+ __u32 res0 : 1; /* reserved zeros */
+ __u32 isc : 3; /* interruption sublass */
+ __u32 res5 : 3; /* reserved zeros */
+ __u32 ena : 1; /* enabled */
+ __u32 lm : 2; /* limit mode */
+ __u32 mme : 2; /* measurement-mode enable */
+ __u32 mp : 1; /* multipath mode */
+ __u32 tf : 1; /* timing facility */
+ __u32 dnv : 1; /* device number valid */
+ __u32 dev : 16; /* device number */
+ __u8 lpm; /* logical path mask */
+ __u8 pnom; /* path not operational mask */
+ __u8 lpum; /* last path used mask */
+ __u8 pim; /* path installed mask */
+ __u16 mbi; /* measurement-block index */
+ __u8 pom; /* path operational mask */
+ __u8 pam; /* path available mask */
+ __u8 chpid[8]; /* CHPID 0-7 (if available) */
+ __u32 unused1 : 8; /* reserved zeros */
+ __u32 st : 3; /* subchannel type */
+ __u32 unused2 : 18; /* reserved zeros */
+ __u32 mbfc : 1; /* measurement block format control */
+ __u32 xmwme : 1; /* extended measurement word mode enable */
+ __u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+ /* ... in an operand exception. */
+} __attribute__ ((packed));
+
+/*
+ * subchannel information block
+ */
+struct schib {
+ struct pmcw pmcw; /* path management control word */
+ struct scsw scsw; /* subchannel status word */
+ __u64 mba; /* measurement block address */
+ __u8 mda[4]; /* model dependent area */
+} __attribute__ ((packed,aligned(4)));
+
+/*
+ * operation request block
+ */
+struct orb {
+ __u32 intparm; /* interruption parameter */
+ __u32 key : 4; /* flags, like key, suspend control, etc. */
+ __u32 spnd : 1; /* suspend control */
+ __u32 res1 : 1; /* reserved */
+ __u32 mod : 1; /* modification control */
+ __u32 sync : 1; /* synchronize control */
+ __u32 fmt : 1; /* format control */
+ __u32 pfch : 1; /* prefetch control */
+ __u32 isic : 1; /* initial-status-interruption control */
+ __u32 alcc : 1; /* address-limit-checking control */
+ __u32 ssic : 1; /* suppress-suspended-interr. control */
+ __u32 res2 : 1; /* reserved */
+ __u32 c64 : 1; /* IDAW/QDIO 64 bit control */
+ __u32 i2k : 1; /* IDAW 2/4kB block size control */
+ __u32 lpm : 8; /* logical path mask */
+ __u32 ils : 1; /* incorrect length */
+ __u32 zero : 6; /* reserved zeros */
+ __u32 orbx : 1; /* ORB extension control */
+ __u32 cpa; /* channel program address */
+} __attribute__ ((packed,aligned(4)));
+
+/* subchannel data structure used by I/O subroutines */
+struct subchannel {
+ unsigned int irq; /* aka. subchannel number */
+ spinlock_t lock; /* subchannel lock */
+
+ enum {
+ SUBCHANNEL_TYPE_IO = 0,
+ SUBCHANNEL_TYPE_CHSC = 1,
+ SUBCHANNEL_TYPE_MESSAGE = 2,
+ SUBCHANNEL_TYPE_ADM = 3,
+ } st; /* subchannel type */
+
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __attribute__ ((packed)) options;
+
+ __u8 vpm; /* verified path mask */
+ __u8 lpm; /* logical path mask */
+ __u8 opm; /* operational path mask */
+ struct schib schib; /* subchannel information block */
+ struct orb orb; /* operation request block */
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+ struct ssd_info ssd_info; /* subchannel description */
+ struct device dev; /* entry in device tree */
+ struct css_driver *driver;
+} __attribute__ ((aligned(8)));
+
+#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
+
+#define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+extern int cio_validate_subchannel (struct subchannel *, unsigned int);
+extern int cio_enable_subchannel (struct subchannel *, unsigned int);
+extern int cio_disable_subchannel (struct subchannel *);
+extern int cio_cancel (struct subchannel *);
+extern int cio_clear (struct subchannel *);
+extern int cio_resume (struct subchannel *);
+extern int cio_halt (struct subchannel *);
+extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
+extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
+extern int cio_cancel (struct subchannel *);
+extern int cio_set_options (struct subchannel *, int);
+extern int cio_get_options (struct subchannel *);
+extern int cio_modify (struct subchannel *);
+/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
+extern struct subchannel *cio_probe_console(void);
+extern void cio_release_console(void);
+extern int cio_is_console(int irq);
+extern struct subchannel *cio_get_console_subchannel(void);
+#else
+#define cio_is_console(irq) 0
+#define cio_get_console_subchannel() NULL
+#endif
+
+extern int cio_show_msg;
+
+#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
new file mode 100644
index 000000000000..6af8b27d366b
--- /dev/null
+++ b/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,32 @@
+#ifndef CIO_DEBUG_H
+#define CIO_DEBUG_H
+
+#include <asm/debug.h>
+
+#define CIO_TRACE_EVENT(imp, txt) do { \
+ debug_text_event(cio_debug_trace_id, imp, txt); \
+ } while (0)
+
+#define CIO_MSG_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
+ } while (0)
+
+#define CIO_CRW_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
+ } while (0)
+
+#define CIO_HEX_EVENT(imp, args...) do { \
+ debug_event(cio_debug_trace_id, imp, ##args); \
+ } while (0)
+
+#define CIO_DEBUG(printk_level,event_level,msg...) ({ \
+ if (cio_show_msg) printk(printk_level msg); \
+ CIO_MSG_EVENT (event_level, msg); \
+})
+
+/* for use of debug feature */
+extern debug_info_t *cio_debug_msg_id;
+extern debug_info_t *cio_debug_trace_id;
+extern debug_info_t *cio_debug_crw_id;
+
+#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
new file mode 100644
index 000000000000..49def26ba383
--- /dev/null
+++ b/drivers/s390/cio/cmf.c
@@ -0,0 +1,1042 @@
+/*
+ * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
+ *
+ * Linux on zSeries Channel Measurement Facility support
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cmb.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+/* parameter to enable cmf during boot, possible uses are:
+ * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
+ * used on any subchannel
+ * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
+ * <num> subchannel, where <num> is an integer
+ * between 1 and 65535, default is 1024
+ */
+#define ARGSTRING "s390cmf"
+
+/* indices for READCMB */
+enum cmb_index {
+ /* basic and exended format: */
+ cmb_ssch_rsch_count,
+ cmb_sample_count,
+ cmb_device_connect_time,
+ cmb_function_pending_time,
+ cmb_device_disconnect_time,
+ cmb_control_unit_queuing_time,
+ cmb_device_active_only_time,
+ /* extended format only: */
+ cmb_device_busy_time,
+ cmb_initial_command_response_time,
+};
+
+/**
+ * enum cmb_format - types of supported measurement block formats
+ *
+ * @CMF_BASIC: traditional channel measurement blocks supported
+ * by all machines that we run on
+ * @CMF_EXTENDED: improved format that was introduced with the z990
+ * machine
+ * @CMF_AUTODETECT: default: use extended format when running on a z990
+ * or later machine, otherwise fall back to basic format
+ **/
+enum cmb_format {
+ CMF_BASIC,
+ CMF_EXTENDED,
+ CMF_AUTODETECT = -1,
+};
+/**
+ * format - actual format for all measurement blocks
+ *
+ * The format module parameter can be set to a value of 0 (zero)
+ * or 1, indicating basic or extended format as described for
+ * enum cmb_format.
+ */
+static int format = CMF_AUTODETECT;
+module_param(format, bool, 0444);
+
+/**
+ * struct cmb_operations - functions to use depending on cmb_format
+ *
+ * all these functions operate on a struct cmf_device. There is only
+ * one instance of struct cmb_operations because all cmf_device
+ * objects are guaranteed to be of the same type.
+ *
+ * @alloc: allocate memory for a channel measurement block,
+ * either with the help of a special pool or with kmalloc
+ * @free: free memory allocated with @alloc
+ * @set: enable or disable measurement
+ * @readall: read a measurement block in a common format
+ * @reset: clear the data in the associated measurement block and
+ * reset its time stamp
+ */
+struct cmb_operations {
+ int (*alloc) (struct ccw_device*);
+ void(*free) (struct ccw_device*);
+ int (*set) (struct ccw_device*, u32);
+ u64 (*read) (struct ccw_device*, int);
+ int (*readall)(struct ccw_device*, struct cmbdata *);
+ void (*reset) (struct ccw_device*);
+
+ struct attribute_group *attr_group;
+};
+static struct cmb_operations *cmbops;
+
+/* our user interface is designed in terms of nanoseconds,
+ * while the hardware measures total times in its own
+ * unit.*/
+static inline u64 time_to_nsec(u32 value)
+{
+ return ((u64)value) * 128000ull;
+}
+
+/*
+ * Users are usually interested in average times,
+ * not accumulated time.
+ * This also helps us with atomicity problems
+ * when reading sinlge values.
+ */
+static inline u64 time_to_avg_nsec(u32 value, u32 count)
+{
+ u64 ret;
+
+ /* no samples yet, avoid division by 0 */
+ if (count == 0)
+ return 0;
+
+ /* value comes in units of 128 µsec */
+ ret = time_to_nsec(value);
+ do_div(ret, count);
+
+ return ret;
+}
+
+/* activate or deactivate the channel monitor. When area is NULL,
+ * the monitor is deactivated. The channel monitor needs to
+ * be active in order to measure subchannels, which also need
+ * to be enabled. */
+static inline void
+cmf_activate(void *area, unsigned int onoff)
+{
+ register void * __gpr2 asm("2");
+ register long __gpr1 asm("1");
+
+ __gpr2 = area;
+ __gpr1 = onoff ? 2 : 0;
+ /* activate channel measurement */
+ asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+}
+
+static int
+set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
+{
+ int ret;
+ int retry;
+ struct subchannel *sch;
+ struct schib *schib;
+
+ sch = to_subchannel(cdev->dev.parent);
+ schib = &sch->schib;
+ /* msch can silently fail, so do it again if necessary */
+ for (retry = 0; retry < 3; retry++) {
+ /* prepare schib */
+ stsch(sch->irq, schib);
+ schib->pmcw.mme = mme;
+ schib->pmcw.mbfc = mbfc;
+ /* address can be either a block address or a block index */
+ if (mbfc)
+ schib->mba = address;
+ else
+ schib->pmcw.mbi = address;
+
+ /* try to submit it */
+ switch(ret = msch_err(sch->irq, schib)) {
+ case 0:
+ break;
+ case 1:
+ case 2: /* in I/O or status pending */
+ ret = -EBUSY;
+ break;
+ case 3: /* subchannel is no longer valid */
+ ret = -ENODEV;
+ break;
+ default: /* msch caught an exception */
+ ret = -EINVAL;
+ break;
+ }
+ stsch(sch->irq, schib); /* restore the schib */
+
+ if (ret)
+ break;
+
+ /* check if it worked */
+ if (schib->pmcw.mme == mme &&
+ schib->pmcw.mbfc == mbfc &&
+ (mbfc ? (schib->mba == address)
+ : (schib->pmcw.mbi == address)))
+ return 0;
+
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+struct set_schib_struct {
+ u32 mme;
+ int mbfc;
+ unsigned long address;
+ wait_queue_head_t wait;
+ int ret;
+};
+
+static int set_schib_wait(struct ccw_device *cdev, u32 mme,
+ int mbfc, unsigned long address)
+{
+ struct set_schib_struct s = {
+ .mme = mme,
+ .mbfc = mbfc,
+ .address = address,
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
+ };
+
+ spin_lock_irq(cdev->ccwlock);
+ s.ret = set_schib(cdev, mme, mbfc, address);
+ if (s.ret != -EBUSY) {
+ goto out_nowait;
+ }
+
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ s.ret = -EBUSY;
+ /* if the device is not online, don't even try again */
+ goto out_nowait;
+ }
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ cdev->private->cmb_wait = &s;
+ s.ret = 1;
+
+ spin_unlock_irq(cdev->ccwlock);
+ if (wait_event_interruptible(s.wait, s.ret != 1)) {
+ spin_lock_irq(cdev->ccwlock);
+ if (s.ret == 1) {
+ s.ret = -ERESTARTSYS;
+ cdev->private->cmb_wait = 0;
+ if (cdev->private->state == DEV_STATE_CMFCHANGE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ }
+ return s.ret;
+
+out_nowait:
+ spin_unlock_irq(cdev->ccwlock);
+ return s.ret;
+}
+
+void retry_set_schib(struct ccw_device *cdev)
+{
+ struct set_schib_struct *s;
+
+ s = cdev->private->cmb_wait;
+ cdev->private->cmb_wait = 0;
+ if (!s) {
+ WARN_ON(1);
+ return;
+ }
+ s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
+ wake_up(&s->wait);
+}
+
+/**
+ * struct cmb_area - container for global cmb data
+ *
+ * @mem: pointer to CMBs (only in basic measurement mode)
+ * @list: contains a linked list of all subchannels
+ * @lock: protect concurrent access to @mem and @list
+ */
+struct cmb_area {
+ struct cmb *mem;
+ struct list_head list;
+ int num_channels;
+ spinlock_t lock;
+};
+
+static struct cmb_area cmb_area = {
+ .lock = SPIN_LOCK_UNLOCKED,
+ .list = LIST_HEAD_INIT(cmb_area.list),
+ .num_channels = 1024,
+};
+
+
+/* ****** old style CMB handling ********/
+
+/** int maxchannels
+ *
+ * Basic channel measurement blocks are allocated in one contiguous
+ * block of memory, which can not be moved as long as any channel
+ * is active. Therefore, a maximum number of subchannels needs to
+ * be defined somewhere. This is a module parameter, defaulting to
+ * a resonable value of 1024, or 32 kb of memory.
+ * Current kernels don't allow kmalloc with more than 128kb, so the
+ * maximum is 4096
+ */
+
+module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
+
+/**
+ * struct cmb - basic channel measurement block
+ *
+ * cmb as used by the hardware the fields are described in z/Architecture
+ * Principles of Operation, chapter 17.
+ * The area to be a contiguous array and may not be reallocated or freed.
+ * Only one cmb area can be present in the system.
+ */
+struct cmb {
+ u16 ssch_rsch_count;
+ u16 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 reserved[2];
+};
+
+/* insert a single device into the cmb_area list
+ * called with cmb_area.lock held from alloc_cmb
+ */
+static inline int
+alloc_cmb_single (struct ccw_device *cdev)
+{
+ struct cmb *cmb;
+ struct ccw_device_private *node;
+ int ret;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!list_empty(&cdev->private->cmb_list)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* find first unused cmb in cmb_area.mem.
+ * this is a little tricky: cmb_area.list
+ * remains sorted by ->cmb pointers */
+ cmb = cmb_area.mem;
+ list_for_each_entry(node, &cmb_area.list, cmb_list) {
+ if ((struct cmb*)node->cmb > cmb)
+ break;
+ cmb++;
+ }
+ if (cmb - cmb_area.mem >= cmb_area.num_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* insert new cmb */
+ list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
+ cdev->private->cmb = cmb;
+ ret = 0;
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+static int
+alloc_cmb (struct ccw_device *cdev)
+{
+ int ret;
+ struct cmb *mem;
+ ssize_t size;
+
+ spin_lock(&cmb_area.lock);
+
+ if (!cmb_area.mem) {
+ /* there is no user yet, so we need a new area */
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ WARN_ON(!list_empty(&cmb_area.list));
+
+ spin_unlock(&cmb_area.lock);
+ mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(size));
+ spin_lock(&cmb_area.lock);
+
+ if (cmb_area.mem) {
+ /* ok, another thread was faster */
+ free_pages((unsigned long)mem, get_order(size));
+ } else if (!mem) {
+ /* no luck */
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ /* everything ok */
+ memset(mem, 0, size);
+ cmb_area.mem = mem;
+ cmf_activate(cmb_area.mem, 1);
+ }
+ }
+
+ /* do the actual allocation */
+ ret = alloc_cmb_single(cdev);
+out:
+ spin_unlock(&cmb_area.lock);
+
+ return ret;
+}
+
+static void
+free_cmb(struct ccw_device *cdev)
+{
+ struct ccw_device_private *priv;
+
+ priv = cdev->private;
+
+ spin_lock(&cmb_area.lock);
+ spin_lock_irq(cdev->ccwlock);
+
+ if (list_empty(&priv->cmb_list)) {
+ /* already freed */
+ goto out;
+ }
+
+ priv->cmb = NULL;
+ list_del_init(&priv->cmb_list);
+
+ if (list_empty(&cmb_area.list)) {
+ ssize_t size;
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ cmf_activate(NULL, 0);
+ free_pages((unsigned long)cmb_area.mem, get_order(size));
+ cmb_area.mem = NULL;
+ }
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int
+set_cmb(struct ccw_device *cdev, u32 mme)
+{
+ u16 offset;
+
+ if (!cdev->private->cmb)
+ return -EINVAL;
+
+ offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
+
+ return set_schib_wait(cdev, mme, 0, offset);
+}
+
+static u64
+read_cmb (struct ccw_device *cdev, int index)
+{
+ /* yes, we have to put it on the stack
+ * because the cmb must only be accessed
+ * atomically, e.g. with mvc */
+ struct cmb cmb;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return 0;
+ }
+
+ cmb = *(struct cmb*)cdev->private->cmb;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ switch (index) {
+ case cmb_ssch_rsch_count:
+ return cmb.ssch_rsch_count;
+ case cmb_sample_count:
+ return cmb.sample_count;
+ case cmb_device_connect_time:
+ val = cmb.device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb.function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb.device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb.control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb.device_active_only_time;
+ break;
+ default:
+ return 0;
+ }
+ return time_to_avg_nsec(val, cmb.sample_count);
+}
+
+static int
+readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
+{
+ /* yes, we have to put it on the stack
+ * because the cmb must only be accessed
+ * atomically, e.g. with mvc */
+ struct cmb cmb;
+ unsigned long flags;
+ u64 time;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -ENODEV;
+ }
+
+ cmb = *(struct cmb*)cdev->private->cmb;
+ time = get_clock() - cdev->private->cmb_start_time;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ memset(data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* convert to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb.ssch_rsch_count;
+ data->sample_count = cmb.sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb.device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb.function_pending_time);
+ data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb.control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb.device_active_only_time);
+
+ return 0;
+}
+
+static void
+reset_cmb(struct ccw_device *cdev)
+{
+ struct cmb *cmb;
+ spin_lock_irq(cdev->ccwlock);
+ cmb = cdev->private->cmb;
+ if (cmb)
+ memset (cmb, 0, sizeof (*cmb));
+ cdev->private->cmb_start_time = get_clock();
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+static struct attribute_group cmf_attr_group;
+
+static struct cmb_operations cmbops_basic = {
+ .alloc = alloc_cmb,
+ .free = free_cmb,
+ .set = set_cmb,
+ .read = read_cmb,
+ .readall = readall_cmb,
+ .reset = reset_cmb,
+ .attr_group = &cmf_attr_group,
+};
+
+/* ******** extended cmb handling ********/
+
+/**
+ * struct cmbe - extended channel measurement block
+ *
+ * cmb as used by the hardware, may be in any 64 bit physical location,
+ * the fields are described in z/Architecture Principles of Operation,
+ * third edition, chapter 17.
+ */
+struct cmbe {
+ u32 ssch_rsch_count;
+ u32 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 device_busy_time;
+ u32 initial_command_response_time;
+ u32 reserved[7];
+};
+
+/* kmalloc only guarantees 8 byte alignment, but we need cmbe
+ * pointers to be naturally aligned. Make sure to allocate
+ * enough space for two cmbes */
+static inline struct cmbe* cmbe_align(struct cmbe *c)
+{
+ unsigned long addr;
+ addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
+ ~(sizeof (struct cmbe) - sizeof(long));
+ return (struct cmbe*)addr;
+}
+
+static int
+alloc_cmbe (struct ccw_device *cdev)
+{
+ struct cmbe *cmbe;
+ cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
+ if (!cmbe)
+ return -ENOMEM;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->private->cmb) {
+ kfree(cmbe);
+ spin_unlock_irq(cdev->ccwlock);
+ return -EBUSY;
+ }
+
+ cdev->private->cmb = cmbe;
+ spin_unlock_irq(cdev->ccwlock);
+
+ /* activate global measurement if this is the first channel */
+ spin_lock(&cmb_area.lock);
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, 1);
+ list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
+ spin_unlock(&cmb_area.lock);
+
+ return 0;
+}
+
+static void
+free_cmbe (struct ccw_device *cdev)
+{
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->private->cmb)
+ kfree(cdev->private->cmb);
+ cdev->private->cmb = NULL;
+ spin_unlock_irq(cdev->ccwlock);
+
+ /* deactivate global measurement if this is the last channel */
+ spin_lock(&cmb_area.lock);
+ list_del_init(&cdev->private->cmb_list);
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, 0);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int
+set_cmbe(struct ccw_device *cdev, u32 mme)
+{
+ unsigned long mba;
+
+ if (!cdev->private->cmb)
+ return -EINVAL;
+ mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
+
+ return set_schib_wait(cdev, mme, 1, mba);
+}
+
+
+u64
+read_cmbe (struct ccw_device *cdev, int index)
+{
+ /* yes, we have to put it on the stack
+ * because the cmb must only be accessed
+ * atomically, e.g. with mvc */
+ struct cmbe cmb;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return 0;
+ }
+
+ cmb = *cmbe_align(cdev->private->cmb);
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ switch (index) {
+ case cmb_ssch_rsch_count:
+ return cmb.ssch_rsch_count;
+ case cmb_sample_count:
+ return cmb.sample_count;
+ case cmb_device_connect_time:
+ val = cmb.device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb.function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb.device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb.control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb.device_active_only_time;
+ break;
+ case cmb_device_busy_time:
+ val = cmb.device_busy_time;
+ break;
+ case cmb_initial_command_response_time:
+ val = cmb.initial_command_response_time;
+ break;
+ default:
+ return 0;
+ }
+ return time_to_avg_nsec(val, cmb.sample_count);
+}
+
+static int
+readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
+{
+ /* yes, we have to put it on the stack
+ * because the cmb must only be accessed
+ * atomically, e.g. with mvc */
+ struct cmbe cmb;
+ unsigned long flags;
+ u64 time;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -ENODEV;
+ }
+
+ cmb = *cmbe_align(cdev->private->cmb);
+ time = get_clock() - cdev->private->cmb_start_time;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ memset (data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* conver to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb.ssch_rsch_count;
+ data->sample_count = cmb.sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb.device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb.function_pending_time);
+ data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb.control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb.device_active_only_time);
+ data->device_busy_time = time_to_nsec(cmb.device_busy_time);
+ data->initial_command_response_time
+ = time_to_nsec(cmb.initial_command_response_time);
+
+ return 0;
+}
+
+static void
+reset_cmbe(struct ccw_device *cdev)
+{
+ struct cmbe *cmb;
+ spin_lock_irq(cdev->ccwlock);
+ cmb = cmbe_align(cdev->private->cmb);
+ if (cmb)
+ memset (cmb, 0, sizeof (*cmb));
+ cdev->private->cmb_start_time = get_clock();
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+static struct attribute_group cmf_attr_group_ext;
+
+static struct cmb_operations cmbops_extended = {
+ .alloc = alloc_cmbe,
+ .free = free_cmbe,
+ .set = set_cmbe,
+ .read = read_cmbe,
+ .readall = readall_cmbe,
+ .reset = reset_cmbe,
+ .attr_group = &cmf_attr_group_ext,
+};
+
+
+static ssize_t
+cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+{
+ return sprintf(buf, "%lld\n",
+ (unsigned long long) cmf_read(to_ccwdev(dev), idx));
+}
+
+static ssize_t
+cmb_show_avg_sample_interval(struct device *dev, char *buf)
+{
+ struct ccw_device *cdev;
+ long interval;
+ unsigned long count;
+
+ cdev = to_ccwdev(dev);
+ interval = get_clock() - cdev->private->cmb_start_time;
+ count = cmf_read(cdev, cmb_sample_count);
+ if (count)
+ interval /= count;
+ else
+ interval = -1;
+ return sprintf(buf, "%ld\n", interval);
+}
+
+static ssize_t
+cmb_show_avg_utilization(struct device *dev, char *buf)
+{
+ struct cmbdata data;
+ u64 utilization;
+ unsigned long t, u;
+ int ret;
+
+ ret = cmf_readall(to_ccwdev(dev), &data);
+ if (ret)
+ return ret;
+
+ utilization = data.device_connect_time +
+ data.function_pending_time +
+ data.device_disconnect_time;
+
+ /* shift to avoid long long division */
+ while (-1ul < (data.elapsed_time | utilization)) {
+ utilization >>= 8;
+ data.elapsed_time >>= 8;
+ }
+
+ /* calculate value in 0.1 percent units */
+ t = (unsigned long) data.elapsed_time / 1000;
+ u = (unsigned long) utilization / t;
+
+ return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+}
+
+#define cmf_attr(name) \
+static ssize_t show_ ## name (struct device * dev, char * buf) \
+{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
+static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
+
+#define cmf_attr_avg(name) \
+static ssize_t show_avg_ ## name (struct device * dev, char * buf) \
+{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
+static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
+
+cmf_attr(ssch_rsch_count);
+cmf_attr(sample_count);
+cmf_attr_avg(device_connect_time);
+cmf_attr_avg(function_pending_time);
+cmf_attr_avg(device_disconnect_time);
+cmf_attr_avg(control_unit_queuing_time);
+cmf_attr_avg(device_active_only_time);
+cmf_attr_avg(device_busy_time);
+cmf_attr_avg(initial_command_response_time);
+
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
+static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
+
+static struct attribute *cmf_attributes[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ 0,
+};
+
+static struct attribute_group cmf_attr_group = {
+ .name = "cmf",
+ .attrs = cmf_attributes,
+};
+
+static struct attribute *cmf_attributes_ext[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ &dev_attr_avg_device_busy_time.attr,
+ &dev_attr_avg_initial_command_response_time.attr,
+ 0,
+};
+
+static struct attribute_group cmf_attr_group_ext = {
+ .name = "cmf",
+ .attrs = cmf_attributes_ext,
+};
+
+static ssize_t cmb_enable_show(struct device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
+}
+
+static ssize_t cmb_enable_store(struct device *dev, const char *buf, size_t c)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ cdev = to_ccwdev(dev);
+
+ switch (buf[0]) {
+ case '0':
+ ret = disable_cmf(cdev);
+ if (ret)
+ printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
+ break;
+ case '1':
+ ret = enable_cmf(cdev);
+ if (ret && ret != -EBUSY)
+ printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
+ break;
+ }
+
+ return c;
+}
+
+DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
+
+/* enable_cmf/disable_cmf: module interface for cmf (de)activation */
+int
+enable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = cmbops->alloc(cdev);
+ cmbops->reset(cdev);
+ if (ret)
+ return ret;
+ ret = cmbops->set(cdev, 2);
+ if (ret) {
+ cmbops->free(cdev);
+ return ret;
+ }
+ ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
+ if (!ret)
+ return 0;
+ cmbops->set(cdev, 0); //FIXME: this can fail
+ cmbops->free(cdev);
+ return ret;
+}
+
+int
+disable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = cmbops->set(cdev, 0);
+ if (ret)
+ return ret;
+ cmbops->free(cdev);
+ sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+ return ret;
+}
+
+u64
+cmf_read(struct ccw_device *cdev, int index)
+{
+ return cmbops->read(cdev, index);
+}
+
+int
+cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+{
+ return cmbops->readall(cdev, data);
+}
+
+static int __init
+init_cmf(void)
+{
+ char *format_string;
+ char *detect_string = "parameter";
+
+ /* We cannot really autoprobe this. If the user did not give a parameter,
+ see if we are running on z990 or up, otherwise fall back to basic mode. */
+
+ if (format == CMF_AUTODETECT) {
+ if (!css_characteristics_avail ||
+ !css_general_characteristics.ext_mb) {
+ format = CMF_BASIC;
+ } else {
+ format = CMF_EXTENDED;
+ }
+ detect_string = "autodetected";
+ } else {
+ detect_string = "parameter";
+ }
+
+ switch (format) {
+ case CMF_BASIC:
+ format_string = "basic";
+ cmbops = &cmbops_basic;
+ if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
+ printk(KERN_ERR "Basic channel measurement facility"
+ " can only use 1 to 4096 devices\n"
+ KERN_ERR "when the cmf driver is built"
+ " as a loadable module\n");
+ return 1;
+ }
+ break;
+ case CMF_EXTENDED:
+ format_string = "extended";
+ cmbops = &cmbops_extended;
+ break;
+ default:
+ printk(KERN_ERR "Invalid format %d for channel "
+ "measurement facility\n", format);
+ return 1;
+ }
+
+ printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
+ format_string, detect_string);
+ return 0;
+}
+
+module_init(init_cmf);
+
+
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("channel measurement facility base driver\n"
+ "Copyright 2003 IBM Corporation\n");
+
+EXPORT_SYMBOL_GPL(enable_cmf);
+EXPORT_SYMBOL_GPL(disable_cmf);
+EXPORT_SYMBOL_GPL(cmf_read);
+EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
new file mode 100644
index 000000000000..87bd70eeabed
--- /dev/null
+++ b/drivers/s390/cio/css.c
@@ -0,0 +1,575 @@
+/*
+ * drivers/s390/cio/css.c
+ * driver for channel subsystem
+ * $Revision: 1.85 $
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+unsigned int highest_subchannel;
+int need_rescan = 0;
+int css_init_done = 0;
+
+struct pgid global_pgid;
+int css_characteristics_avail = 0;
+
+struct device css_bus_device = {
+ .bus_id = "css0",
+};
+
+static struct subchannel *
+css_alloc_subchannel(int irq)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
+ if (sch == NULL)
+ return ERR_PTR(-ENOMEM);
+ ret = cio_validate_subchannel (sch, irq);
+ if (ret < 0) {
+ kfree(sch);
+ return ERR_PTR(ret);
+ }
+ if (irq > highest_subchannel)
+ highest_subchannel = irq;
+
+ if (sch->st != SUBCHANNEL_TYPE_IO) {
+ /* For now we ignore all non-io subchannels. */
+ kfree(sch);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Set intparm to subchannel address.
+ * This is fine even on 64bit since the subchannel is always located
+ * under 2G.
+ */
+ sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
+ ret = cio_modify(sch);
+ if (ret) {
+ kfree(sch);
+ return ERR_PTR(ret);
+ }
+ return sch;
+}
+
+static void
+css_free_subchannel(struct subchannel *sch)
+{
+ if (sch) {
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+ kfree(sch);
+ }
+
+}
+
+static void
+css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(dev);
+ if (!cio_is_console(sch->irq))
+ kfree(sch);
+}
+
+extern int css_get_ssd_info(struct subchannel *sch);
+
+static int
+css_register_subchannel(struct subchannel *sch)
+{
+ int ret;
+
+ /* Initialize the subchannel structure */
+ sch->dev.parent = &css_bus_device;
+ sch->dev.bus = &css_bus_type;
+ sch->dev.release = &css_subchannel_release;
+
+ /* make it known to the system */
+ ret = device_register(&sch->dev);
+ if (ret)
+ printk (KERN_WARNING "%s: could not register %s\n",
+ __func__, sch->dev.bus_id);
+ else
+ css_get_ssd_info(sch);
+ return ret;
+}
+
+int
+css_probe_device(int irq)
+{
+ int ret;
+ struct subchannel *sch;
+
+ sch = css_alloc_subchannel(irq);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
+ ret = css_register_subchannel(sch);
+ if (ret)
+ css_free_subchannel(sch);
+ return ret;
+}
+
+struct subchannel *
+get_subchannel_by_schid(int irq)
+{
+ struct subchannel *sch;
+ struct list_head *entry;
+ struct device *dev;
+
+ if (!get_bus(&css_bus_type))
+ return NULL;
+ down_read(&css_bus_type.subsys.rwsem);
+ sch = NULL;
+ list_for_each(entry, &css_bus_type.devices.list) {
+ dev = get_device(container_of(entry,
+ struct device, bus_list));
+ if (!dev)
+ continue;
+ sch = to_subchannel(dev);
+ if (sch->irq == irq)
+ break;
+ put_device(dev);
+ sch = NULL;
+ }
+ up_read(&css_bus_type.subsys.rwsem);
+ put_bus(&css_bus_type);
+
+ return sch;
+}
+
+static inline int
+css_get_subchannel_status(struct subchannel *sch, int schid)
+{
+ struct schib schib;
+ int cc;
+
+ cc = stsch(schid, &schib);
+ if (cc)
+ return CIO_GONE;
+ if (!schib.pmcw.dnv)
+ return CIO_GONE;
+ if (sch && sch->schib.pmcw.dnv &&
+ (schib.pmcw.dev != sch->schib.pmcw.dev))
+ return CIO_REVALIDATE;
+ if (sch && !sch->lpm)
+ return CIO_NO_PATH;
+ return CIO_OPER;
+}
+
+static int
+css_evaluate_subchannel(int irq, int slow)
+{
+ int event, ret, disc;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ sch = get_subchannel_by_schid(irq);
+ disc = sch ? device_is_disconnected(sch) : 0;
+ if (disc && slow) {
+ if (sch)
+ put_device(&sch->dev);
+ return 0; /* Already processed. */
+ }
+ /*
+ * We've got a machine check, so running I/O won't get an interrupt.
+ * Kill any pending timers.
+ */
+ if (sch)
+ device_kill_pending_timer(sch);
+ if (!disc && !slow) {
+ if (sch)
+ put_device(&sch->dev);
+ return -EAGAIN; /* Will be done on the slow path. */
+ }
+ event = css_get_subchannel_status(sch, irq);
+ CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
+ irq, event, sch?(disc?"disconnected":"normal"):"unknown",
+ slow?"slow":"fast");
+ switch (event) {
+ case CIO_NO_PATH:
+ case CIO_GONE:
+ if (!sch) {
+ /* Never used this subchannel. Ignore. */
+ ret = 0;
+ break;
+ }
+ if (disc && (event == CIO_NO_PATH)) {
+ /*
+ * Uargh, hack again. Because we don't get a machine
+ * check on configure on, our path bookkeeping can
+ * be out of date here (it's fine while we only do
+ * logical varying or get chsc machine checks). We
+ * need to force reprobing or we might miss devices
+ * coming operational again. It won't do harm in real
+ * no path situations.
+ */
+ spin_lock_irqsave(&sch->lock, flags);
+ device_trigger_reprobe(sch);
+ spin_unlock_irqrestore(&sch->lock, flags);
+ ret = 0;
+ break;
+ }
+ if (sch->driver && sch->driver->notify &&
+ sch->driver->notify(&sch->dev, event)) {
+ cio_disable_subchannel(sch);
+ device_set_disconnected(sch);
+ ret = 0;
+ break;
+ }
+ /*
+ * Unregister subchannel.
+ * The device will be killed automatically.
+ */
+ cio_disable_subchannel(sch);
+ device_unregister(&sch->dev);
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+ put_device(&sch->dev);
+ ret = 0;
+ break;
+ case CIO_REVALIDATE:
+ /*
+ * Revalidation machine check. Sick.
+ * We don't notify the driver since we have to throw the device
+ * away in any case.
+ */
+ if (!disc) {
+ device_unregister(&sch->dev);
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+ put_device(&sch->dev);
+ ret = css_probe_device(irq);
+ } else {
+ /*
+ * We can't immediately deregister the disconnected
+ * device since it might block.
+ */
+ spin_lock_irqsave(&sch->lock, flags);
+ device_trigger_reprobe(sch);
+ spin_unlock_irqrestore(&sch->lock, flags);
+ ret = 0;
+ }
+ break;
+ case CIO_OPER:
+ if (disc) {
+ spin_lock_irqsave(&sch->lock, flags);
+ /* Get device operational again. */
+ device_trigger_reprobe(sch);
+ spin_unlock_irqrestore(&sch->lock, flags);
+ }
+ ret = sch ? 0 : css_probe_device(irq);
+ break;
+ default:
+ BUG();
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+css_rescan_devices(void)
+{
+ int irq, ret;
+
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ ret = css_evaluate_subchannel(irq, 1);
+ /* No more memory. It doesn't make sense to continue. No
+ * panic because this can happen in midflight and just
+ * because we can't use a new device is no reason to crash
+ * the system. */
+ if (ret == -ENOMEM)
+ break;
+ /* -ENXIO indicates that there are no more subchannels. */
+ if (ret == -ENXIO)
+ break;
+ }
+}
+
+struct slow_subchannel {
+ struct list_head slow_list;
+ unsigned long schid;
+};
+
+static LIST_HEAD(slow_subchannels_head);
+static DEFINE_SPINLOCK(slow_subchannel_lock);
+
+static void
+css_trigger_slow_path(void)
+{
+ CIO_TRACE_EVENT(4, "slowpath");
+
+ if (need_rescan) {
+ need_rescan = 0;
+ css_rescan_devices();
+ return;
+ }
+
+ spin_lock_irq(&slow_subchannel_lock);
+ while (!list_empty(&slow_subchannels_head)) {
+ struct slow_subchannel *slow_sch =
+ list_entry(slow_subchannels_head.next,
+ struct slow_subchannel, slow_list);
+
+ list_del_init(slow_subchannels_head.next);
+ spin_unlock_irq(&slow_subchannel_lock);
+ css_evaluate_subchannel(slow_sch->schid, 1);
+ spin_lock_irq(&slow_subchannel_lock);
+ kfree(slow_sch);
+ }
+ spin_unlock_irq(&slow_subchannel_lock);
+}
+
+typedef void (*workfunc)(void *);
+DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+struct workqueue_struct *slow_path_wq;
+
+/*
+ * Rescan for new devices. FIXME: This is slow.
+ * This function is called when we have lost CRWs due to overflows and we have
+ * to do subchannel housekeeping.
+ */
+void
+css_reiterate_subchannels(void)
+{
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+}
+
+/*
+ * Called from the machine check handler for subchannel report words.
+ */
+int
+css_process_crw(int irq)
+{
+ int ret;
+
+ CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
+
+ if (need_rescan)
+ /* We need to iterate all subchannels anyway. */
+ return -EAGAIN;
+ /*
+ * Since we are always presented with IPI in the CRW, we have to
+ * use stsch() to find out if the subchannel in question has come
+ * or gone.
+ */
+ ret = css_evaluate_subchannel(irq, 0);
+ if (ret == -EAGAIN) {
+ if (css_enqueue_subchannel_slow(irq)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+ return ret;
+}
+
+static void __init
+css_generate_pgid(void)
+{
+ /* Let's build our path group ID here. */
+ if (css_characteristics_avail && css_general_characteristics.mcss)
+ global_pgid.cpu_addr = 0x8000;
+ else {
+#ifdef CONFIG_SMP
+ global_pgid.cpu_addr = hard_smp_processor_id();
+#else
+ global_pgid.cpu_addr = 0;
+#endif
+ }
+ global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
+ global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ global_pgid.tod_high = (__u32) (get_clock() >> 32);
+}
+
+/*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing (except for the
+ * static console subchannel).
+ */
+static int __init
+init_channel_subsystem (void)
+{
+ int ret, irq;
+
+ if (chsc_determine_css_characteristics() == 0)
+ css_characteristics_avail = 1;
+
+ css_generate_pgid();
+
+ if ((ret = bus_register(&css_bus_type)))
+ goto out;
+ if ((ret = device_register (&css_bus_device)))
+ goto out_bus;
+
+ css_init_done = 1;
+
+ ctl_set_bit(6, 28);
+
+ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
+ struct subchannel *sch;
+
+ if (cio_is_console(irq))
+ sch = cio_get_console_subchannel();
+ else {
+ sch = css_alloc_subchannel(irq);
+ if (IS_ERR(sch))
+ ret = PTR_ERR(sch);
+ else
+ ret = 0;
+ if (ret == -ENOMEM)
+ panic("Out of memory in "
+ "init_channel_subsystem\n");
+ /* -ENXIO: no more subchannels. */
+ if (ret == -ENXIO)
+ break;
+ if (ret)
+ continue;
+ }
+ /*
+ * We register ALL valid subchannels in ioinfo, even those
+ * that have been present before init_channel_subsystem.
+ * These subchannels can't have been registered yet (kmalloc
+ * not working) so we do it now. This is true e.g. for the
+ * console subchannel.
+ */
+ css_register_subchannel(sch);
+ }
+ return 0;
+
+out_bus:
+ bus_unregister(&css_bus_type);
+out:
+ return ret;
+}
+
+/*
+ * find a driver for a subchannel. They identify by the subchannel
+ * type with the exception that the console subchannel driver has its own
+ * subchannel type although the device is an i/o subchannel
+ */
+static int
+css_bus_match (struct device *dev, struct device_driver *drv)
+{
+ struct subchannel *sch = container_of (dev, struct subchannel, dev);
+ struct css_driver *driver = container_of (drv, struct css_driver, drv);
+
+ if (sch->st == driver->subchannel_type)
+ return 1;
+
+ return 0;
+}
+
+struct bus_type css_bus_type = {
+ .name = "css",
+ .match = &css_bus_match,
+};
+
+subsys_initcall(init_channel_subsystem);
+
+/*
+ * Register root devices for some drivers. The release function must not be
+ * in the device drivers, so we do it here.
+ */
+static void
+s390_root_dev_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+struct device *
+s390_root_dev_register(const char *name)
+{
+ struct device *dev;
+ int ret;
+
+ if (!strlen(name))
+ return ERR_PTR(-EINVAL);
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ memset(dev, 0, sizeof(struct device));
+ strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
+ dev->release = s390_root_dev_release;
+ ret = device_register(dev);
+ if (ret) {
+ kfree(dev);
+ return ERR_PTR(ret);
+ }
+ return dev;
+}
+
+void
+s390_root_dev_unregister(struct device *dev)
+{
+ if (dev)
+ device_unregister(dev);
+}
+
+int
+css_enqueue_subchannel_slow(unsigned long schid)
+{
+ struct slow_subchannel *new_slow_sch;
+ unsigned long flags;
+
+ new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
+ if (!new_slow_sch)
+ return -ENOMEM;
+ memset(new_slow_sch, 0, sizeof(struct slow_subchannel));
+ new_slow_sch->schid = schid;
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+ return 0;
+}
+
+void
+css_clear_subchannel_slow_list(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ while (!list_empty(&slow_subchannels_head)) {
+ struct slow_subchannel *slow_sch =
+ list_entry(slow_subchannels_head.next,
+ struct slow_subchannel, slow_list);
+
+ list_del_init(slow_subchannels_head.next);
+ kfree(slow_sch);
+ }
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+
+
+int
+css_slow_subchannels_exist(void)
+{
+ return (!list_empty(&slow_subchannels_head));
+}
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(css_bus_type);
+EXPORT_SYMBOL(s390_root_dev_register);
+EXPORT_SYMBOL(s390_root_dev_unregister);
+EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
new file mode 100644
index 000000000000..2004a6c49388
--- /dev/null
+++ b/drivers/s390/cio/css.h
@@ -0,0 +1,155 @@
+#ifndef _CSS_H
+#define _CSS_H
+
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <asm/cio.h>
+
+/*
+ * path grouping stuff
+ */
+#define SPID_FUNC_SINGLE_PATH 0x00
+#define SPID_FUNC_MULTI_PATH 0x80
+#define SPID_FUNC_ESTABLISH 0x00
+#define SPID_FUNC_RESIGN 0x40
+#define SPID_FUNC_DISBAND 0x20
+
+#define SNID_STATE1_RESET 0
+#define SNID_STATE1_UNGROUPED 2
+#define SNID_STATE1_GROUPED 3
+
+#define SNID_STATE2_NOT_RESVD 0
+#define SNID_STATE2_RESVD_ELSE 2
+#define SNID_STATE2_RESVD_SELF 3
+
+#define SNID_STATE3_MULTI_PATH 1
+#define SNID_STATE3_SINGLE_PATH 0
+
+struct path_state {
+ __u8 state1 : 2; /* path state value 1 */
+ __u8 state2 : 2; /* path state value 2 */
+ __u8 state3 : 1; /* path state value 3 */
+ __u8 resvd : 3; /* reserved */
+} __attribute__ ((packed));
+
+struct pgid {
+ union {
+ __u8 fc; /* SPID function code */
+ struct path_state ps; /* SNID path state */
+ } inf;
+ __u32 cpu_addr : 16; /* CPU address */
+ __u32 cpu_id : 24; /* CPU identification */
+ __u32 cpu_model : 16; /* CPU model */
+ __u32 tod_high; /* high word TOD clock */
+} __attribute__ ((packed));
+
+extern struct pgid global_pgid;
+
+#define MAX_CIWS 8
+
+/*
+ * sense-id response buffer layout
+ */
+struct senseid {
+ /* common part */
+ __u8 reserved; /* always 0x'FF' */
+ __u16 cu_type; /* control unit type */
+ __u8 cu_model; /* control unit model */
+ __u16 dev_type; /* device type */
+ __u8 dev_model; /* device model */
+ __u8 unused; /* padding byte */
+ /* extended part */
+ struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
+} __attribute__ ((packed,aligned(4)));
+
+struct ccw_device_private {
+ int state; /* device state */
+ atomic_t onoff;
+ unsigned long registered;
+ __u16 devno; /* device number */
+ __u16 irq; /* subchannel number */
+ __u8 imask; /* lpm mask for SNID/SID/SPGID */
+ int iretry; /* retry counter SNID/SID/SPGID */
+ struct {
+ unsigned int fast:1; /* post with "channel end" */
+ unsigned int repall:1; /* report every interrupt status */
+ unsigned int pgroup:1; /* do path grouping */
+ unsigned int force:1; /* allow forced online */
+ } __attribute__ ((packed)) options;
+ struct {
+ unsigned int pgid_single:1; /* use single path for Set PGID */
+ unsigned int esid:1; /* Ext. SenseID supported by HW */
+ unsigned int dosense:1; /* delayed SENSE required */
+ unsigned int doverify:1; /* delayed path verification */
+ unsigned int donotify:1; /* call notify function */
+ unsigned int recog_done:1; /* dev. recog. complete */
+ unsigned int fake_irb:1; /* deliver faked irb */
+ } __attribute__((packed)) flags;
+ unsigned long intparm; /* user interruption parameter */
+ struct qdio_irq *qdio_data;
+ struct irb irb; /* device status */
+ struct senseid senseid; /* SenseID info */
+ struct pgid pgid; /* path group ID */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct work_struct kick_work;
+ wait_queue_head_t wait_q;
+ struct timer_list timer;
+ void *cmb; /* measurement information */
+ struct list_head cmb_list; /* list of measured devices */
+ u64 cmb_start_time; /* clock value of cmb reset */
+ void *cmb_wait; /* deferred cmb enable/disable */
+};
+
+/*
+ * A css driver handles all subchannels of one type.
+ * Currently, we only care about I/O subchannels (type 0), these
+ * have a ccw_device connected to them.
+ */
+struct css_driver {
+ unsigned int subchannel_type;
+ struct device_driver drv;
+ void (*irq)(struct device *);
+ int (*notify)(struct device *, int);
+ void (*verify)(struct device *);
+ void (*termination)(struct device *);
+};
+
+/*
+ * all css_drivers have the css_bus_type
+ */
+extern struct bus_type css_bus_type;
+extern struct css_driver io_subchannel_driver;
+
+int css_probe_device(int irq);
+extern struct subchannel * get_subchannel_by_schid(int irq);
+extern unsigned int highest_subchannel;
+extern int css_init_done;
+
+#define __MAX_SUBCHANNELS 65536
+
+extern struct bus_type css_bus_type;
+extern struct device css_bus_device;
+
+/* Some helper functions for disconnected state. */
+int device_is_disconnected(struct subchannel *);
+void device_set_disconnected(struct subchannel *);
+void device_trigger_reprobe(struct subchannel *);
+
+/* Helper functions for vary on/off. */
+int device_is_online(struct subchannel *);
+void device_set_waiting(struct subchannel *);
+
+/* Machine check helper function. */
+void device_kill_pending_timer(struct subchannel *);
+
+/* Helper functions to build lists for the slow path. */
+int css_enqueue_subchannel_slow(unsigned long schid);
+void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
+void css_clear_subchannel_slow_list(void);
+int css_slow_subchannels_exist(void);
+extern int need_rescan;
+
+extern struct workqueue_struct *slow_path_wq;
+extern struct work_struct slow_path_work;
+#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
new file mode 100644
index 000000000000..df0325505e4e
--- /dev/null
+++ b/drivers/s390/cio/device.c
@@ -0,0 +1,1135 @@
+/*
+ * drivers/s390/cio/device.c
+ * bus driver for ccw devices
+ * $Revision: 1.131 $
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+
+/******************* bus type handling ***********************/
+
+/* The Linux driver model distinguishes between a bus type and
+ * the bus itself. Of course we only have one channel
+ * subsystem driver and one channel system per machine, but
+ * we still use the abstraction. T.R. says it's a good idea. */
+static int
+ccw_bus_match (struct device * dev, struct device_driver * drv)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(drv);
+ const struct ccw_device_id *ids = cdrv->ids, *found;
+
+ if (!ids)
+ return 0;
+
+ found = ccw_device_id_match(ids, &cdev->id);
+ if (!found)
+ return 0;
+
+ cdev->id.driver_info = found->driver_info;
+
+ return 1;
+}
+
+/*
+ * Hotplugging interface for ccw devices.
+ * Heavily modeled on pci and usb hotplug.
+ */
+static int
+ccw_hotplug (struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int i = 0;
+ int length = 0;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* what we want to pass to /sbin/hotplug */
+
+ envp[i++] = buffer;
+ length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X",
+ cdev->id.cu_type);
+ if ((buffer_size - length <= 0) || (i >= num_envp))
+ return -ENOMEM;
+ ++length;
+ buffer += length;
+
+ envp[i++] = buffer;
+ length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X",
+ cdev->id.cu_model);
+ if ((buffer_size - length <= 0) || (i >= num_envp))
+ return -ENOMEM;
+ ++length;
+ buffer += length;
+
+ /* The next two can be zero, that's ok for us */
+ envp[i++] = buffer;
+ length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X",
+ cdev->id.dev_type);
+ if ((buffer_size - length <= 0) || (i >= num_envp))
+ return -ENOMEM;
+ ++length;
+ buffer += length;
+
+ envp[i++] = buffer;
+ length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X",
+ cdev->id.dev_model);
+ if ((buffer_size - length <= 0) || (i >= num_envp))
+ return -ENOMEM;
+
+ envp[i] = 0;
+
+ return 0;
+}
+
+struct bus_type ccw_bus_type = {
+ .name = "ccw",
+ .match = &ccw_bus_match,
+ .hotplug = &ccw_hotplug,
+};
+
+static int io_subchannel_probe (struct device *);
+static int io_subchannel_remove (struct device *);
+void io_subchannel_irq (struct device *);
+static int io_subchannel_notify(struct device *, int);
+static void io_subchannel_verify(struct device *);
+static void io_subchannel_ioterm(struct device *);
+static void io_subchannel_shutdown(struct device *);
+
+struct css_driver io_subchannel_driver = {
+ .subchannel_type = SUBCHANNEL_TYPE_IO,
+ .drv = {
+ .name = "io_subchannel",
+ .bus = &css_bus_type,
+ .probe = &io_subchannel_probe,
+ .remove = &io_subchannel_remove,
+ .shutdown = &io_subchannel_shutdown,
+ },
+ .irq = io_subchannel_irq,
+ .notify = io_subchannel_notify,
+ .verify = io_subchannel_verify,
+ .termination = io_subchannel_ioterm,
+};
+
+struct workqueue_struct *ccw_device_work;
+struct workqueue_struct *ccw_device_notify_work;
+static wait_queue_head_t ccw_device_init_wq;
+static atomic_t ccw_device_init_count;
+
+static int __init
+init_ccw_bus_type (void)
+{
+ int ret;
+
+ init_waitqueue_head(&ccw_device_init_wq);
+ atomic_set(&ccw_device_init_count, 0);
+
+ ccw_device_work = create_singlethread_workqueue("cio");
+ if (!ccw_device_work)
+ return -ENOMEM; /* FIXME: better errno ? */
+ ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
+ if (!ccw_device_notify_work) {
+ ret = -ENOMEM; /* FIXME: better errno ? */
+ goto out_err;
+ }
+ slow_path_wq = create_singlethread_workqueue("kslowcrw");
+ if (!slow_path_wq) {
+ ret = -ENOMEM; /* FIXME: better errno ? */
+ goto out_err;
+ }
+ if ((ret = bus_register (&ccw_bus_type)))
+ goto out_err;
+
+ if ((ret = driver_register(&io_subchannel_driver.drv)))
+ goto out_err;
+
+ wait_event(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ flush_workqueue(ccw_device_work);
+ return 0;
+out_err:
+ if (ccw_device_work)
+ destroy_workqueue(ccw_device_work);
+ if (ccw_device_notify_work)
+ destroy_workqueue(ccw_device_notify_work);
+ if (slow_path_wq)
+ destroy_workqueue(slow_path_wq);
+ return ret;
+}
+
+static void __exit
+cleanup_ccw_bus_type (void)
+{
+ driver_unregister(&io_subchannel_driver.drv);
+ bus_unregister(&ccw_bus_type);
+ destroy_workqueue(ccw_device_notify_work);
+ destroy_workqueue(ccw_device_work);
+}
+
+subsys_initcall(init_ccw_bus_type);
+module_exit(cleanup_ccw_bus_type);
+
+/************************ device handling **************************/
+
+/*
+ * A ccw_device has some interfaces in sysfs in addition to the
+ * standard ones.
+ * The following entries are designed to export the information which
+ * resided in 2.4 in /proc/subchannels. Subchannel and device number
+ * are obvious, so they don't have an entry :)
+ * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
+ */
+static ssize_t
+chpids_show (struct device * dev, char * buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct ssd_info *ssd = &sch->ssd_info;
+ ssize_t ret = 0;
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
+
+ ret += sprintf (buf+ret, "\n");
+ return min((ssize_t)PAGE_SIZE, ret);
+}
+
+static ssize_t
+pimpampom_show (struct device * dev, char * buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ return sprintf (buf, "%02x %02x %02x\n",
+ pmcw->pim, pmcw->pam, pmcw->pom);
+}
+
+static ssize_t
+devtype_show (struct device *dev, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ if (id->dev_type != 0)
+ return sprintf(buf, "%04x/%02x\n",
+ id->dev_type, id->dev_model);
+ else
+ return sprintf(buf, "n/a\n");
+}
+
+static ssize_t
+cutype_show (struct device *dev, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ return sprintf(buf, "%04x/%02x\n",
+ id->cu_type, id->cu_model);
+}
+
+static ssize_t
+online_show (struct device *dev, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ return sprintf(buf, cdev->online ? "1\n" : "0\n");
+}
+
+static void
+ccw_device_remove_disconnected(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ /*
+ * Forced offline in disconnected state means
+ * 'throw away device'.
+ */
+ sch = to_subchannel(cdev->dev.parent);
+ device_unregister(&sch->dev);
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+ put_device(&sch->dev);
+}
+
+int
+ccw_device_set_offline(struct ccw_device *cdev)
+{
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ if (!cdev->online || !cdev->drv)
+ return -EINVAL;
+
+ if (cdev->drv->set_offline) {
+ ret = cdev->drv->set_offline(cdev);
+ if (ret != 0)
+ return ret;
+ }
+ cdev->online = 0;
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_offline(cdev);
+ if (ret == -ENODEV) {
+ if (cdev->private->state != DEV_STATE_NOT_OPER) {
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ else {
+ pr_debug("ccw_device_offline returned %d, device %s\n",
+ ret, cdev->dev.bus_id);
+ cdev->online = 1;
+ }
+ return ret;
+}
+
+int
+ccw_device_set_online(struct ccw_device *cdev)
+{
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ if (cdev->online || !cdev->drv)
+ return -EINVAL;
+
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_online(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ else {
+ pr_debug("ccw_device_online returned %d, device %s\n",
+ ret, cdev->dev.bus_id);
+ return ret;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -ENODEV;
+ if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
+ cdev->online = 1;
+ return 0;
+ }
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_offline(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ else
+ pr_debug("ccw_device_offline returned %d, device %s\n",
+ ret, cdev->dev.bus_id);
+ return (ret = 0) ? -ENODEV : ret;
+}
+
+static ssize_t
+online_store (struct device *dev, const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int i, force, ret;
+ char *tmp;
+
+ if (atomic_compare_and_swap(0, 1, &cdev->private->onoff))
+ return -EAGAIN;
+
+ if (cdev->drv && !try_module_get(cdev->drv->owner)) {
+ atomic_set(&cdev->private->onoff, 0);
+ return -EINVAL;
+ }
+ if (!strncmp(buf, "force\n", count)) {
+ force = 1;
+ i = 1;
+ } else {
+ force = 0;
+ i = simple_strtoul(buf, &tmp, 16);
+ }
+ if (i == 1) {
+ /* Do device recognition, if needed. */
+ if (cdev->id.cu_type == 0) {
+ ret = ccw_device_recognition(cdev);
+ if (ret) {
+ printk(KERN_WARNING"Couldn't start recognition "
+ "for device %s (ret=%d)\n",
+ cdev->dev.bus_id, ret);
+ goto out;
+ }
+ wait_event(cdev->private->wait_q,
+ cdev->private->flags.recog_done);
+ }
+ if (cdev->drv && cdev->drv->set_online)
+ ccw_device_set_online(cdev);
+ } else if (i == 0) {
+ if (cdev->private->state == DEV_STATE_DISCONNECTED)
+ ccw_device_remove_disconnected(cdev);
+ else if (cdev->drv && cdev->drv->set_offline)
+ ccw_device_set_offline(cdev);
+ }
+ if (force && cdev->private->state == DEV_STATE_BOXED) {
+ ret = ccw_device_stlck(cdev);
+ if (ret) {
+ printk(KERN_WARNING"ccw_device_stlck for device %s "
+ "returned %d!\n", cdev->dev.bus_id, ret);
+ goto out;
+ }
+ /* Do device recognition, if needed. */
+ if (cdev->id.cu_type == 0) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ret = ccw_device_recognition(cdev);
+ if (ret) {
+ printk(KERN_WARNING"Couldn't start recognition "
+ "for device %s (ret=%d)\n",
+ cdev->dev.bus_id, ret);
+ goto out;
+ }
+ wait_event(cdev->private->wait_q,
+ cdev->private->flags.recog_done);
+ }
+ if (cdev->drv && cdev->drv->set_online)
+ ccw_device_set_online(cdev);
+ }
+ out:
+ if (cdev->drv)
+ module_put(cdev->drv->owner);
+ atomic_set(&cdev->private->onoff, 0);
+ return count;
+}
+
+static ssize_t
+available_show (struct device *dev, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+
+ switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ return sprintf(buf, "boxed\n");
+ case DEV_STATE_DISCONNECTED:
+ case DEV_STATE_DISCONNECTED_SENSE_ID:
+ case DEV_STATE_NOT_OPER:
+ sch = to_subchannel(dev->parent);
+ if (!sch->lpm)
+ return sprintf(buf, "no path\n");
+ else
+ return sprintf(buf, "no device\n");
+ default:
+ /* All other states considered fine. */
+ return sprintf(buf, "good\n");
+ }
+}
+
+static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
+static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
+static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
+static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
+static DEVICE_ATTR(online, 0644, online_show, online_store);
+extern struct device_attribute dev_attr_cmb_enable;
+static DEVICE_ATTR(availability, 0444, available_show, NULL);
+
+static struct attribute * subch_attrs[] = {
+ &dev_attr_chpids.attr,
+ &dev_attr_pimpampom.attr,
+ NULL,
+};
+
+static struct attribute_group subch_attr_group = {
+ .attrs = subch_attrs,
+};
+
+static inline int
+subchannel_add_files (struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &subch_attr_group);
+}
+
+static struct attribute * ccwdev_attrs[] = {
+ &dev_attr_devtype.attr,
+ &dev_attr_cutype.attr,
+ &dev_attr_online.attr,
+ &dev_attr_cmb_enable.attr,
+ &dev_attr_availability.attr,
+ NULL,
+};
+
+static struct attribute_group ccwdev_attr_group = {
+ .attrs = ccwdev_attrs,
+};
+
+static inline int
+device_add_files (struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
+}
+
+static inline void
+device_remove_files(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
+}
+
+/* this is a simple abstraction for device_register that sets the
+ * correct bus type and adds the bus specific files */
+int
+ccw_device_register(struct ccw_device *cdev)
+{
+ struct device *dev = &cdev->dev;
+ int ret;
+
+ dev->bus = &ccw_bus_type;
+
+ if ((ret = device_add(dev)))
+ return ret;
+
+ set_bit(1, &cdev->private->registered);
+ if ((ret = device_add_files(dev))) {
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_del(dev);
+ }
+ return ret;
+}
+
+static struct ccw_device *
+get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling)
+{
+ struct ccw_device *cdev;
+ struct list_head *entry;
+ struct device *dev;
+
+ if (!get_bus(&ccw_bus_type))
+ return NULL;
+ down_read(&ccw_bus_type.subsys.rwsem);
+ cdev = NULL;
+ list_for_each(entry, &ccw_bus_type.devices.list) {
+ dev = get_device(container_of(entry,
+ struct device, bus_list));
+ if (!dev)
+ continue;
+ cdev = to_ccwdev(dev);
+ if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
+ (cdev->private->devno == devno) &&
+ (cdev != sibling)) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ break;
+ }
+ put_device(dev);
+ cdev = NULL;
+ }
+ up_read(&ccw_bus_type.subsys.rwsem);
+ put_bus(&ccw_bus_type);
+
+ return cdev;
+}
+
+static void
+ccw_device_add_changed(void *data)
+{
+
+ struct ccw_device *cdev;
+
+ cdev = (struct ccw_device *)data;
+ if (device_add(&cdev->dev)) {
+ put_device(&cdev->dev);
+ return;
+ }
+ set_bit(1, &cdev->private->registered);
+ if (device_add_files(&cdev->dev)) {
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_unregister(&cdev->dev);
+ }
+}
+
+extern int css_get_ssd_info(struct subchannel *sch);
+
+void
+ccw_device_do_unreg_rereg(void *data)
+{
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int need_rename;
+
+ cdev = (struct ccw_device *)data;
+ sch = to_subchannel(cdev->dev.parent);
+ if (cdev->private->devno != sch->schib.pmcw.dev) {
+ /*
+ * The device number has changed. This is usually only when
+ * a device has been detached under VM and then re-appeared
+ * on another subchannel because of a different attachment
+ * order than before. Ideally, we should should just switch
+ * subchannels, but unfortunately, this is not possible with
+ * the current implementation.
+ * Instead, we search for the old subchannel for this device
+ * number and deregister so there are no collisions with the
+ * newly registered ccw_device.
+ * FIXME: Find another solution so the block layer doesn't
+ * get possibly sick...
+ */
+ struct ccw_device *other_cdev;
+
+ need_rename = 1;
+ other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
+ cdev);
+ if (other_cdev) {
+ struct subchannel *other_sch;
+
+ other_sch = to_subchannel(other_cdev->dev.parent);
+ if (get_device(&other_sch->dev)) {
+ stsch(other_sch->irq, &other_sch->schib);
+ if (other_sch->schib.pmcw.dnv) {
+ other_sch->schib.pmcw.intparm = 0;
+ cio_modify(other_sch);
+ }
+ device_unregister(&other_sch->dev);
+ }
+ }
+ /* Update ssd info here. */
+ css_get_ssd_info(sch);
+ cdev->private->devno = sch->schib.pmcw.dev;
+ } else
+ need_rename = 0;
+ device_remove_files(&cdev->dev);
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_del(&cdev->dev);
+ if (need_rename)
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
+ sch->schib.pmcw.dev);
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_add_changed, (void *)cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+}
+
+static void
+ccw_device_release(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ kfree(cdev->private);
+ kfree(cdev);
+}
+
+/*
+ * Register recognized device.
+ */
+static void
+io_subchannel_register(void *data)
+{
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int ret;
+ unsigned long flags;
+
+ cdev = (struct ccw_device *) data;
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (!list_empty(&sch->dev.children)) {
+ bus_rescan_devices(&ccw_bus_type);
+ goto out;
+ }
+ /* make it known to the system */
+ ret = ccw_device_register(cdev);
+ if (ret) {
+ printk (KERN_WARNING "%s: could not register %s\n",
+ __func__, cdev->dev.bus_id);
+ put_device(&cdev->dev);
+ spin_lock_irqsave(&sch->lock, flags);
+ sch->dev.driver_data = NULL;
+ spin_unlock_irqrestore(&sch->lock, flags);
+ kfree (cdev->private);
+ kfree (cdev);
+ put_device(&sch->dev);
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ return;
+ }
+
+ ret = subchannel_add_files(cdev->dev.parent);
+ if (ret)
+ printk(KERN_WARNING "%s: could not add attributes to %s\n",
+ __func__, sch->dev.bus_id);
+ put_device(&cdev->dev);
+out:
+ cdev->private->flags.recog_done = 1;
+ put_device(&sch->dev);
+ wake_up(&cdev->private->wait_q);
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+}
+
+void
+ccw_device_call_sch_unregister(void *data)
+{
+ struct ccw_device *cdev = data;
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ device_unregister(&sch->dev);
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+ put_device(&cdev->dev);
+ put_device(&sch->dev);
+}
+
+/*
+ * subchannel recognition done. Called from the state machine.
+ */
+void
+io_subchannel_recog_done(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (css_init_done == 0) {
+ cdev->private->flags.recog_done = 1;
+ return;
+ }
+ switch (cdev->private->state) {
+ case DEV_STATE_NOT_OPER:
+ cdev->private->flags.recog_done = 1;
+ /* Remove device found not operational. */
+ if (!get_device(&cdev->dev))
+ break;
+ sch = to_subchannel(cdev->dev.parent);
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister, (void *) cdev);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ break;
+ case DEV_STATE_BOXED:
+ /* Device did not respond in time. */
+ case DEV_STATE_OFFLINE:
+ /*
+ * We can't register the device in interrupt context so
+ * we schedule a work item.
+ */
+ if (!get_device(&cdev->dev))
+ break;
+ PREPARE_WORK(&cdev->private->kick_work,
+ io_subchannel_register, (void *) cdev);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+ break;
+ }
+}
+
+static int
+io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+{
+ int rc;
+ struct ccw_device_private *priv;
+
+ sch->dev.driver_data = cdev;
+ sch->driver = &io_subchannel_driver;
+ cdev->ccwlock = &sch->lock;
+ /* Init private data. */
+ priv = cdev->private;
+ priv->devno = sch->schib.pmcw.dev;
+ priv->irq = sch->irq;
+ priv->state = DEV_STATE_NOT_OPER;
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
+
+ /* Set an initial name for the device. */
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
+ sch->schib.pmcw.dev);
+
+ /* Increase counter of devices currently in recognition. */
+ atomic_inc(&ccw_device_init_count);
+
+ /* Start async. device sensing. */
+ spin_lock_irq(&sch->lock);
+ rc = ccw_device_recognition(cdev);
+ spin_unlock_irq(&sch->lock);
+ if (rc) {
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ }
+ return rc;
+}
+
+static int
+io_subchannel_probe (struct device *pdev)
+{
+ struct subchannel *sch;
+ struct ccw_device *cdev;
+ int rc;
+ unsigned long flags;
+
+ sch = to_subchannel(pdev);
+ if (sch->dev.driver_data) {
+ /*
+ * This subchannel already has an associated ccw_device.
+ * Register it and exit. This happens for all early
+ * device, e.g. the console.
+ */
+ cdev = sch->dev.driver_data;
+ device_initialize(&cdev->dev);
+ ccw_device_register(cdev);
+ subchannel_add_files(&sch->dev);
+ /*
+ * Check if the device is already online. If it is
+ * the reference count needs to be corrected
+ * (see ccw_device_online and css_init_done for the
+ * ugly details).
+ */
+ if (cdev->private->state != DEV_STATE_NOT_OPER &&
+ cdev->private->state != DEV_STATE_OFFLINE &&
+ cdev->private->state != DEV_STATE_BOXED)
+ get_device(&cdev->dev);
+ return 0;
+ }
+ cdev = kmalloc (sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+ memset(cdev, 0, sizeof(struct ccw_device));
+ cdev->private = kmalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private) {
+ kfree(cdev);
+ return -ENOMEM;
+ }
+ memset(cdev->private, 0, sizeof(struct ccw_device_private));
+ atomic_set(&cdev->private->onoff, 0);
+ cdev->dev = (struct device) {
+ .parent = pdev,
+ .release = ccw_device_release,
+ };
+ INIT_LIST_HEAD(&cdev->private->kick_work.entry);
+ /* Do first half of device_register. */
+ device_initialize(&cdev->dev);
+
+ if (!get_device(&sch->dev)) {
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ return -ENODEV;
+ }
+
+ rc = io_subchannel_recog(cdev, to_subchannel(pdev));
+ if (rc) {
+ spin_lock_irqsave(&sch->lock, flags);
+ sch->dev.driver_data = NULL;
+ spin_unlock_irqrestore(&sch->lock, flags);
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ }
+
+ return rc;
+}
+
+static void
+ccw_device_unregister(void *data)
+{
+ struct ccw_device *cdev;
+
+ cdev = (struct ccw_device *)data;
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_unregister(&cdev->dev);
+ put_device(&cdev->dev);
+}
+
+static int
+io_subchannel_remove (struct device *dev)
+{
+ struct ccw_device *cdev;
+ unsigned long flags;
+
+ if (!dev->driver_data)
+ return 0;
+ cdev = dev->driver_data;
+ /* Set ccw device to not operational and drop reference. */
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ dev->driver_data = NULL;
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ /*
+ * Put unregistration on workqueue to avoid livelocks on the css bus
+ * semaphore.
+ */
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_unregister, (void *) cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ return 0;
+}
+
+static int
+io_subchannel_notify(struct device *dev, int event)
+{
+ struct ccw_device *cdev;
+
+ cdev = dev->driver_data;
+ if (!cdev)
+ return 0;
+ if (!cdev->drv)
+ return 0;
+ if (!cdev->online)
+ return 0;
+ return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
+}
+
+static void
+io_subchannel_verify(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = dev->driver_data;
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+}
+
+static void
+io_subchannel_ioterm(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = dev->driver_data;
+ if (!cdev)
+ return;
+ cdev->private->state = DEV_STATE_CLEAR_VERIFY;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+}
+
+static void
+io_subchannel_shutdown(struct device *dev)
+{
+ struct subchannel *sch;
+ struct ccw_device *cdev;
+ int ret;
+
+ sch = to_subchannel(dev);
+ cdev = dev->driver_data;
+
+ if (cio_is_console(sch->irq))
+ return;
+ if (!sch->schib.pmcw.ena)
+ /* Nothing to do. */
+ return;
+ ret = cio_disable_subchannel(sch);
+ if (ret != -EBUSY)
+ /* Subchannel is disabled, we're done. */
+ return;
+ cdev->private->state = DEV_STATE_QUIESCE;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ }
+ cio_disable_subchannel(sch);
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct ccw_device console_cdev;
+static struct ccw_device_private console_private;
+static int console_cdev_in_use;
+
+static int
+ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
+{
+ int rc;
+
+ /* Initialize the ccw_device structure. */
+ cdev->dev = (struct device) {
+ .parent = &sch->dev,
+ };
+ /* Initialize the subchannel structure */
+ sch->dev.parent = &css_bus_device;
+ sch->dev.bus = &css_bus_type;
+
+ rc = io_subchannel_recog(cdev, sch);
+ if (rc)
+ return rc;
+
+ /* Now wait for the async. recognition to come to an end. */
+ spin_lock_irq(cdev->ccwlock);
+ while (!dev_fsm_final_state(cdev))
+ wait_cons_dev();
+ rc = -EIO;
+ if (cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+ ccw_device_online(cdev);
+ while (!dev_fsm_final_state(cdev))
+ wait_cons_dev();
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out_unlock;
+ rc = 0;
+out_unlock:
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+}
+
+struct ccw_device *
+ccw_device_probe_console(void)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (xchg(&console_cdev_in_use, 1) != 0)
+ return NULL;
+ sch = cio_probe_console();
+ if (IS_ERR(sch)) {
+ console_cdev_in_use = 0;
+ return (void *) sch;
+ }
+ memset(&console_cdev, 0, sizeof(struct ccw_device));
+ memset(&console_private, 0, sizeof(struct ccw_device_private));
+ console_cdev.private = &console_private;
+ ret = ccw_device_console_enable(&console_cdev, sch);
+ if (ret) {
+ cio_release_console();
+ console_cdev_in_use = 0;
+ return ERR_PTR(ret);
+ }
+ console_cdev.online = 1;
+ return &console_cdev;
+}
+#endif
+
+/*
+ * get ccw_device matching the busid, but only if owned by cdrv
+ */
+struct ccw_device *
+get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id)
+{
+ struct device *d, *dev;
+ struct device_driver *drv;
+
+ drv = get_driver(&cdrv->driver);
+ if (!drv)
+ return 0;
+
+ down_read(&drv->bus->subsys.rwsem);
+
+ dev = NULL;
+ list_for_each_entry(d, &drv->devices, driver_list) {
+ dev = get_device(d);
+
+ if (dev && !strncmp(bus_id, dev->bus_id, BUS_ID_SIZE))
+ break;
+ else if (dev) {
+ put_device(dev);
+ dev = NULL;
+ }
+ }
+ up_read(&drv->bus->subsys.rwsem);
+ put_driver(drv);
+
+ return dev ? to_ccwdev(dev) : 0;
+}
+
+/************************** device driver handling ************************/
+
+/* This is the implementation of the ccw_driver class. The probe, remove
+ * and release methods are initially very similar to the device_driver
+ * implementations, with the difference that they have ccw_device
+ * arguments.
+ *
+ * A ccw driver also contains the information that is needed for
+ * device matching.
+ */
+static int
+ccw_device_probe (struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
+ int ret;
+
+ cdev->drv = cdrv; /* to let the driver call _set_online */
+
+ ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
+
+ if (ret) {
+ cdev->drv = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ccw_device_remove (struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = cdev->drv;
+ int ret;
+
+ pr_debug("removing device %s\n", cdev->dev.bus_id);
+ if (cdrv->remove)
+ cdrv->remove(cdev);
+ if (cdev->online) {
+ cdev->online = 0;
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_offline(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q,
+ dev_fsm_final_state(cdev));
+ else
+ //FIXME: we can't fail!
+ pr_debug("ccw_device_offline returned %d, device %s\n",
+ ret, cdev->dev.bus_id);
+ }
+ ccw_device_set_timeout(cdev, 0);
+ cdev->drv = 0;
+ return 0;
+}
+
+int
+ccw_driver_register (struct ccw_driver *cdriver)
+{
+ struct device_driver *drv = &cdriver->driver;
+
+ drv->bus = &ccw_bus_type;
+ drv->name = cdriver->name;
+ drv->probe = ccw_device_probe;
+ drv->remove = ccw_device_remove;
+
+ return driver_register(drv);
+}
+
+void
+ccw_driver_unregister (struct ccw_driver *cdriver)
+{
+ driver_unregister(&cdriver->driver);
+}
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_online);
+EXPORT_SYMBOL(ccw_device_set_offline);
+EXPORT_SYMBOL(ccw_driver_register);
+EXPORT_SYMBOL(ccw_driver_unregister);
+EXPORT_SYMBOL(get_ccwdev_by_busid);
+EXPORT_SYMBOL(ccw_bus_type);
+EXPORT_SYMBOL(ccw_device_work);
+EXPORT_SYMBOL(ccw_device_notify_work);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
new file mode 100644
index 000000000000..a3aa056d7245
--- /dev/null
+++ b/drivers/s390/cio/device.h
@@ -0,0 +1,115 @@
+#ifndef S390_DEVICE_H
+#define S390_DEVICE_H
+
+/*
+ * states of the device statemachine
+ */
+enum dev_state {
+ DEV_STATE_NOT_OPER,
+ DEV_STATE_SENSE_PGID,
+ DEV_STATE_SENSE_ID,
+ DEV_STATE_OFFLINE,
+ DEV_STATE_VERIFY,
+ DEV_STATE_ONLINE,
+ DEV_STATE_W4SENSE,
+ DEV_STATE_DISBAND_PGID,
+ DEV_STATE_BOXED,
+ /* states to wait for i/o completion before doing something */
+ DEV_STATE_CLEAR_VERIFY,
+ DEV_STATE_TIMEOUT_KILL,
+ DEV_STATE_WAIT4IO,
+ DEV_STATE_QUIESCE,
+ /* special states for devices gone not operational */
+ DEV_STATE_DISCONNECTED,
+ DEV_STATE_DISCONNECTED_SENSE_ID,
+ DEV_STATE_CMFCHANGE,
+ /* last element! */
+ NR_DEV_STATES
+};
+
+/*
+ * asynchronous events of the device statemachine
+ */
+enum dev_event {
+ DEV_EVENT_NOTOPER,
+ DEV_EVENT_INTERRUPT,
+ DEV_EVENT_TIMEOUT,
+ DEV_EVENT_VERIFY,
+ /* last element! */
+ NR_DEV_EVENTS
+};
+
+struct ccw_device;
+
+/*
+ * action called through jumptable
+ */
+typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
+extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
+
+static inline void
+dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
+}
+
+/*
+ * Delivers 1 if the device state is final.
+ */
+static inline int
+dev_fsm_final_state(struct ccw_device *cdev)
+{
+ return (cdev->private->state == DEV_STATE_NOT_OPER ||
+ cdev->private->state == DEV_STATE_OFFLINE ||
+ cdev->private->state == DEV_STATE_ONLINE ||
+ cdev->private->state == DEV_STATE_BOXED);
+}
+
+extern struct workqueue_struct *ccw_device_work;
+extern struct workqueue_struct *ccw_device_notify_work;
+
+void io_subchannel_recog_done(struct ccw_device *cdev);
+
+int ccw_device_cancel_halt_clear(struct ccw_device *);
+
+int ccw_device_register(struct ccw_device *);
+void ccw_device_do_unreg_rereg(void *);
+void ccw_device_call_sch_unregister(void *);
+
+int ccw_device_recognition(struct ccw_device *);
+int ccw_device_online(struct ccw_device *);
+int ccw_device_offline(struct ccw_device *);
+
+/* Function prototypes for device status and basic sense stuff. */
+void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
+int ccw_device_do_sense(struct ccw_device *, struct irb *);
+
+/* Function prototypes for sense id stuff. */
+void ccw_device_sense_id_start(struct ccw_device *);
+void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
+void ccw_device_sense_id_done(struct ccw_device *, int);
+
+/* Function prototypes for path grouping stuff. */
+void ccw_device_sense_pgid_start(struct ccw_device *);
+void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
+void ccw_device_sense_pgid_done(struct ccw_device *, int);
+
+void ccw_device_verify_start(struct ccw_device *);
+void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
+void ccw_device_verify_done(struct ccw_device *, int);
+
+void ccw_device_disband_start(struct ccw_device *);
+void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
+void ccw_device_disband_done(struct ccw_device *, int);
+
+int ccw_device_call_handler(struct ccw_device *);
+
+int ccw_device_stlck(struct ccw_device *);
+
+/* qdio needs this. */
+void ccw_device_set_timeout(struct ccw_device *, int);
+
+void retry_set_schib(struct ccw_device *cdev);
+#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
new file mode 100644
index 000000000000..9b7f6f548b1d
--- /dev/null
+++ b/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1250 @@
+/*
+ * drivers/s390/cio/device_fsm.c
+ * finite state machine for device handling
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Cornelia Huck(cohuck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "qdio.h"
+
+int
+device_is_online(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return 0;
+ cdev = sch->dev.driver_data;
+ return (cdev->private->state == DEV_STATE_ONLINE);
+}
+
+int
+device_is_disconnected(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return 0;
+ cdev = sch->dev.driver_data;
+ return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+}
+
+void
+device_set_disconnected(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return;
+ cdev = sch->dev.driver_data;
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+}
+
+void
+device_set_waiting(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return;
+ cdev = sch->dev.driver_data;
+ ccw_device_set_timeout(cdev, 10*HZ);
+ cdev->private->state = DEV_STATE_WAIT4IO;
+}
+
+/*
+ * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+ */
+static void
+ccw_device_timeout(unsigned long data)
+{
+ struct ccw_device *cdev;
+
+ cdev = (struct ccw_device *) data;
+ spin_lock_irq(cdev->ccwlock);
+ dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+/*
+ * Set timeout
+ */
+void
+ccw_device_set_timeout(struct ccw_device *cdev, int expires)
+{
+ if (expires == 0) {
+ del_timer(&cdev->private->timer);
+ return;
+ }
+ if (timer_pending(&cdev->private->timer)) {
+ if (mod_timer(&cdev->private->timer, jiffies + expires))
+ return;
+ }
+ cdev->private->timer.function = ccw_device_timeout;
+ cdev->private->timer.data = (unsigned long) cdev;
+ cdev->private->timer.expires = jiffies + expires;
+ add_timer(&cdev->private->timer);
+}
+
+/* Kill any pending timers after machine check. */
+void
+device_kill_pending_timer(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return;
+ cdev = sch->dev.driver_data;
+ ccw_device_set_timeout(cdev, 0);
+}
+
+/*
+ * Cancel running i/o. This is called repeatedly since halt/clear are
+ * asynchronous operations. We do one try with cio_cancel, two tries
+ * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
+ * Returns 0 if device now idle, -ENODEV for device not operational and
+ * -EBUSY if an interrupt is expected (either from halt/clear or from a
+ * status pending).
+ */
+int
+ccw_device_cancel_halt_clear(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ ret = stsch(sch->irq, &sch->schib);
+ if (ret || !sch->schib.pmcw.dnv)
+ return -ENODEV;
+ if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
+ /* Not operational or no activity -> done. */
+ return 0;
+ /* Stage 1: cancel io. */
+ if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
+ !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
+ ret = cio_cancel(sch);
+ if (ret != -EINVAL)
+ return ret;
+ /* cancel io unsuccessful. From now on it is asynchronous. */
+ cdev->private->iretry = 3; /* 3 halt retries. */
+ }
+ if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
+ /* Stage 2: halt io. */
+ if (cdev->private->iretry) {
+ cdev->private->iretry--;
+ ret = cio_halt(sch);
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ /* halt io unsuccessful. */
+ cdev->private->iretry = 255; /* 255 clear retries. */
+ }
+ /* Stage 3: clear io. */
+ if (cdev->private->iretry) {
+ cdev->private->iretry--;
+ ret = cio_clear (sch);
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ panic("Can't stop i/o on subchannel.\n");
+}
+
+static int
+ccw_device_handle_oper(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ cdev->private->flags.recog_done = 1;
+ /*
+ * Check if cu type and device type still match. If
+ * not, it is certainly another device and we have to
+ * de- and re-register. Also check here for non-matching devno.
+ */
+ if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
+ cdev->id.cu_model != cdev->private->senseid.cu_model ||
+ cdev->id.dev_type != cdev->private->senseid.dev_type ||
+ cdev->id.dev_model != cdev->private->senseid.dev_model ||
+ cdev->private->devno != sch->schib.pmcw.dev) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_do_unreg_rereg, (void *)cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ return 0;
+ }
+ cdev->private->flags.donotify = 1;
+ return 1;
+}
+
+/*
+ * The machine won't give us any notification by machine check if a chpid has
+ * been varied online on the SE so we have to find out by magic (i. e. driving
+ * the channel subsystem to device selection and updating our path masks).
+ */
+static inline void
+__recover_lost_chpids(struct subchannel *sch, int old_lpm)
+{
+ int mask, i;
+
+ for (i = 0; i<8; i++) {
+ mask = 0x80 >> i;
+ if (!(sch->lpm & mask))
+ continue;
+ if (old_lpm & mask)
+ continue;
+ chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
+ }
+}
+
+/*
+ * Stop device recognition.
+ */
+static void
+ccw_device_recog_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+ int notify, old_lpm, same_dev;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ ccw_device_set_timeout(cdev, 0);
+ cio_disable_subchannel(sch);
+ /*
+ * Now that we tried recognition, we have performed device selection
+ * through ssch() and the path information is up to date.
+ */
+ old_lpm = sch->lpm;
+ stsch(sch->irq, &sch->schib);
+ sch->lpm = sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom &
+ sch->opm;
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
+ /* Force reprobe on all chpids. */
+ old_lpm = 0;
+ if (sch->lpm != old_lpm)
+ __recover_lost_chpids(sch, old_lpm);
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
+ if (state == DEV_STATE_NOT_OPER) {
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ return;
+ }
+ /* Boxed devices don't need extra treatment. */
+ }
+ notify = 0;
+ same_dev = 0; /* Keep the compiler quiet... */
+ switch (state) {
+ case DEV_STATE_NOT_OPER:
+ CIO_DEBUG(KERN_WARNING, 2,
+ "SenseID : unknown device %04x on subchannel %04x\n",
+ cdev->private->devno, sch->irq);
+ break;
+ case DEV_STATE_OFFLINE:
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
+ same_dev = ccw_device_handle_oper(cdev);
+ notify = 1;
+ }
+ /* fill out sense information */
+ cdev->id = (struct ccw_device_id) {
+ .cu_type = cdev->private->senseid.cu_type,
+ .cu_model = cdev->private->senseid.cu_model,
+ .dev_type = cdev->private->senseid.dev_type,
+ .dev_model = cdev->private->senseid.dev_model,
+ };
+ if (notify) {
+ cdev->private->state = DEV_STATE_OFFLINE;
+ if (same_dev) {
+ /* Get device online again. */
+ ccw_device_online(cdev);
+ wake_up(&cdev->private->wait_q);
+ }
+ return;
+ }
+ /* Issue device info message. */
+ CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
+ "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
+ "%04X/%02X\n", cdev->private->devno,
+ cdev->id.cu_type, cdev->id.cu_model,
+ cdev->id.dev_type, cdev->id.dev_model);
+ break;
+ case DEV_STATE_BOXED:
+ CIO_DEBUG(KERN_WARNING, 2,
+ "SenseID : boxed device %04x on subchannel %04x\n",
+ cdev->private->devno, sch->irq);
+ break;
+ }
+ cdev->private->state = state;
+ io_subchannel_recog_done(cdev);
+ if (state != DEV_STATE_NOT_OPER)
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Function called from device_id.c after sense id has completed.
+ */
+void
+ccw_device_sense_id_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME: /* Sense id stopped by timeout. */
+ ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+static void
+ccw_device_oper_notify(void *data)
+{
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int ret;
+
+ cdev = (struct ccw_device *)data;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = (sch->driver && sch->driver->notify) ?
+ sch->driver->notify(&sch->dev, CIO_OPER) : 0;
+ if (!ret)
+ /* Driver doesn't want device back. */
+ ccw_device_do_unreg_rereg((void *)cdev);
+ else
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Finished with online/offline processing.
+ */
+static void
+ccw_device_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (state != DEV_STATE_ONLINE)
+ cio_disable_subchannel(sch);
+
+ /* Reset device status. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ cdev->private->state = state;
+
+
+ if (state == DEV_STATE_BOXED)
+ CIO_DEBUG(KERN_WARNING, 2,
+ "Boxed device %04x on subchannel %04x\n",
+ cdev->private->devno, sch->irq);
+
+ if (cdev->private->flags.donotify) {
+ cdev->private->flags.donotify = 0;
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
+ (void *)cdev);
+ queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ }
+ wake_up(&cdev->private->wait_q);
+
+ if (css_init_done && state != DEV_STATE_ONLINE)
+ put_device (&cdev->dev);
+}
+
+/*
+ * Function called from device_pgid.c after sense path ground has completed.
+ */
+void
+ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ switch (err) {
+ case 0:
+ /* Start Path Group verification. */
+ sch->vpm = 0; /* Start with no path groups set. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+ break;
+ case -ETIME: /* Sense path group id stopped by timeout. */
+ case -EUSERS: /* device is reserved for someone else. */
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ case -EOPNOTSUPP: /* path grouping not supported, just set online. */
+ cdev->private->options.pgroup = 0;
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ break;
+ default:
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/*
+ * Start device recognition.
+ */
+int
+ccw_device_recognition(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
+ (cdev->private->state != DEV_STATE_BOXED))
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
+ if (ret != 0)
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ return ret;
+
+ /* After 60s the device recognition is considered to have failed. */
+ ccw_device_set_timeout(cdev, 60*HZ);
+
+ /*
+ * We used to start here with a sense pgid to find out whether a device
+ * is locked by someone else. Unfortunately, the sense pgid command
+ * code has other meanings on devices predating the path grouping
+ * algorithm, so we start with sense id and box the device after an
+ * timeout (or if sense pgid during path verification detects the device
+ * is locked, as may happen on newer devices).
+ */
+ cdev->private->flags.recog_done = 0;
+ cdev->private->state = DEV_STATE_SENSE_ID;
+ ccw_device_sense_id_start(cdev);
+ return 0;
+}
+
+/*
+ * Handle timeout in device recognition.
+ */
+static void
+ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ switch (ret) {
+ case 0:
+ ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ break;
+ case -ENODEV:
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ default:
+ ccw_device_set_timeout(cdev, 3*HZ);
+ }
+}
+
+
+static void
+ccw_device_nopath_notify(void *data)
+{
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int ret;
+
+ cdev = (struct ccw_device *)data;
+ sch = to_subchannel(cdev->dev.parent);
+ /* Extra sanity. */
+ if (sch->lpm)
+ return;
+ ret = (sch->driver && sch->driver->notify) ?
+ sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
+ if (!ret) {
+ if (get_device(&sch->dev)) {
+ /* Driver doesn't want to keep device. */
+ cio_disable_subchannel(sch);
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister,
+ (void *)cdev);
+ queue_work(ccw_device_work,
+ &cdev->private->kick_work);
+ } else
+ put_device(&sch->dev);
+ }
+ } else {
+ cio_disable_subchannel(sch);
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ }
+}
+
+void
+ccw_device_verify_done(struct ccw_device *cdev, int err)
+{
+ cdev->private->flags.doverify = 0;
+ switch (err) {
+ case -EOPNOTSUPP: /* path grouping not supported, just set online. */
+ cdev->private->options.pgroup = 0;
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ /* Deliver fake irb to device driver, if needed. */
+ if (cdev->private->flags.fake_irb) {
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ cdev->private->irb.scsw = (struct scsw) {
+ .cc = 1,
+ .fctl = SCSW_FCTL_START_FUNC,
+ .actl = SCSW_ACTL_START_PEND,
+ .stctl = SCSW_STCTL_STATUS_PEND,
+ };
+ cdev->private->flags.fake_irb = 0;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->irb);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ }
+ break;
+ case -ETIME:
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/*
+ * Get device online.
+ */
+int
+ccw_device_online(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if ((cdev->private->state != DEV_STATE_OFFLINE) &&
+ (cdev->private->state != DEV_STATE_BOXED))
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ if (css_init_done && !get_device(&cdev->dev))
+ return -ENODEV;
+ ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
+ if (ret != 0) {
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ if (ret == -ENODEV)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ return ret;
+ }
+ /* Do we want to do path grouping? */
+ if (!cdev->private->options.pgroup) {
+ /* No, set state online immediately. */
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ return 0;
+ }
+ /* Do a SensePGID first. */
+ cdev->private->state = DEV_STATE_SENSE_PGID;
+ ccw_device_sense_pgid_start(cdev);
+ return 0;
+}
+
+void
+ccw_device_disband_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME:
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/*
+ * Shutdown device.
+ */
+int
+ccw_device_offline(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ if (sch->schib.scsw.actl != 0)
+ return -EBUSY;
+ return -EINVAL;
+ }
+ if (sch->schib.scsw.actl != 0)
+ return -EBUSY;
+ /* Are we doing path grouping? */
+ if (!cdev->private->options.pgroup) {
+ /* No, set state offline immediately. */
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
+ /* Start Set Path Group commands. */
+ cdev->private->state = DEV_STATE_DISBAND_PGID;
+ ccw_device_disband_start(cdev);
+ return 0;
+}
+
+/*
+ * Handle timeout in device online/offline process.
+ */
+static void
+ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ switch (ret) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ case -ENODEV:
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ default:
+ ccw_device_set_timeout(cdev, 3*HZ);
+ }
+}
+
+/*
+ * Handle not oper event in device recognition.
+ */
+static void
+ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+}
+
+/*
+ * Handle not operational event while offline.
+ */
+static void
+ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ sch = to_subchannel(cdev->dev.parent);
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister, (void *)cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Handle not operational event while online.
+ */
+static void
+ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (sch->driver->notify &&
+ sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ cio_disable_subchannel(sch);
+ if (sch->schib.scsw.actl != 0) {
+ // FIXME: not-oper indication to device driver ?
+ ccw_device_call_handler(cdev);
+ }
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister, (void *)cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Handle path verification event.
+ */
+static void
+ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ if (!cdev->private->options.pgroup)
+ return;
+ if (cdev->private->state == DEV_STATE_W4SENSE) {
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Since we might not just be coming from an interrupt from the
+ * subchannel we have to update the schib.
+ */
+ stsch(sch->irq, &sch->schib);
+
+ if (sch->schib.scsw.actl != 0 ||
+ (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
+ /*
+ * No final status yet or final status not yet delivered
+ * to the device driver. Can't do path verfication now,
+ * delay until final status was delivered.
+ */
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ /* Device is idle, we can do the path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+}
+
+/*
+ * Got an interrupt for a normal io (state online).
+ */
+static void
+ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Check for unsolicited interrupt. */
+ if ((irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
+ && (!irb->scsw.cc)) {
+ if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ !irb->esw.esw0.erw.cons) {
+ /* Unit check but no sense data. Need basic sense. */
+ if (ccw_device_do_sense(cdev, irb) != 0)
+ goto call_handler_unsol;
+ memcpy(irb, &cdev->private->irb, sizeof(struct irb));
+ cdev->private->state = DEV_STATE_W4SENSE;
+ cdev->private->intparm = 0;
+ return;
+ }
+call_handler_unsol:
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ return;
+ }
+ /* Accumulate status and find out if a basic sense is needed. */
+ ccw_device_accumulate_irb(cdev, irb);
+ if (cdev->private->flags.dosense) {
+ if (ccw_device_do_sense(cdev, irb) == 0) {
+ cdev->private->state = DEV_STATE_W4SENSE;
+ }
+ return;
+ }
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+/*
+ * Got an timeout in online state.
+ */
+static void
+ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ccw_device_set_timeout(cdev, 0);
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ if (ret == -ENODEV) {
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work,
+ &cdev->private->kick_work);
+ } else
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+}
+
+/*
+ * Got an interrupt for a basic sense.
+ */
+void
+ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Check for unsolicited interrupt. */
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (irb->scsw.cc == 1)
+ /* Basic sense hasn't started. Try again. */
+ ccw_device_do_sense(cdev, irb);
+ else {
+ printk("Huh? %s(%s): unsolicited interrupt...\n",
+ __FUNCTION__, cdev->dev.bus_id);
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ }
+ return;
+ }
+ /* Add basic sense info to irb. */
+ ccw_device_accumulate_basic_sense(cdev, irb);
+ if (cdev->private->flags.dosense) {
+ /* Another basic sense is needed. */
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
+ cdev->private->state = DEV_STATE_ONLINE;
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Accumulate status. We don't do basic sense. */
+ ccw_device_accumulate_irb(cdev, irb);
+ /* Try to start delayed device verification. */
+ ccw_device_online_verify(cdev, 0);
+ /* Note: Don't call handler for cio initiated clear! */
+}
+
+static void
+ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ ccw_device_set_timeout(cdev, 0);
+ /* OK, i/o is dead now. Call interrupt handler. */
+ cdev->private->state = DEV_STATE_ONLINE;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ } else if (cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ return;
+ }
+ if (ret == -ENODEV) {
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work,
+ &cdev->private->kick_work);
+ } else
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ return;
+ }
+ //FIXME: Can we get here?
+ cdev->private->state = DEV_STATE_ONLINE;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+}
+
+static void
+ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+ struct subchannel *sch;
+
+ irb = (struct irb *) __LC_IRB;
+ /*
+ * Accumulate status and find out if a basic sense is needed.
+ * This is fine since we have already adapted the lpm.
+ */
+ ccw_device_accumulate_irb(cdev, irb);
+ if (cdev->private->flags.dosense) {
+ if (ccw_device_do_sense(cdev, irb) == 0) {
+ cdev->private->state = DEV_STATE_W4SENSE;
+ }
+ return;
+ }
+
+ /* Iff device is idle, reset timeout. */
+ sch = to_subchannel(cdev->dev.parent);
+ if (!stsch(sch->irq, &sch->schib))
+ if (sch->schib.scsw.actl == 0)
+ ccw_device_set_timeout(cdev, 0);
+ /* Call the handler. */
+ ccw_device_call_handler(cdev);
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ } else if (cdev->private->flags.doverify)
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ ccw_device_set_timeout(cdev, 0);
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ if (ret == -ENODEV) {
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work,
+ &cdev->private->kick_work);
+ } else
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ return;
+ }
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+ if (!sch->lpm) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify, (void *)cdev);
+ queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ } else if (cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ /* When the I/O has terminated, we have to start verification. */
+ if (cdev->private->options.pgroup)
+ cdev->private->flags.doverify = 1;
+}
+
+static void
+ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ switch (dev_event) {
+ case DEV_EVENT_INTERRUPT:
+ irb = (struct irb *) __LC_IRB;
+ /* Check for unsolicited interrupt. */
+ if ((irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
+ (!irb->scsw.cc))
+ /* FIXME: we should restart stlck here, but this
+ * is extremely unlikely ... */
+ goto out_wakeup;
+
+ ccw_device_accumulate_irb(cdev, irb);
+ /* We don't care about basic sense etc. */
+ break;
+ default: /* timeout */
+ break;
+ }
+out_wakeup:
+ wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ return;
+
+ /* After 60s the device recognition is considered to have failed. */
+ ccw_device_set_timeout(cdev, 60*HZ);
+
+ cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
+ ccw_device_sense_id_start(cdev);
+}
+
+void
+device_trigger_reprobe(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ if (!sch->dev.driver_data)
+ return;
+ cdev = sch->dev.driver_data;
+ if (cdev->private->state != DEV_STATE_DISCONNECTED)
+ return;
+
+ /* Update some values. */
+ if (stsch(sch->irq, &sch->schib))
+ return;
+
+ /*
+ * The pim, pam, pom values may not be accurate, but they are the best
+ * we have before performing device selection :/
+ */
+ sch->lpm = sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom &
+ sch->opm;
+ /* Re-set some bits in the pmcw that were lost. */
+ sch->schib.pmcw.isc = 3;
+ sch->schib.pmcw.csense = 1;
+ sch->schib.pmcw.ena = 0;
+ if ((sch->lpm & (sch->lpm - 1)) != 0)
+ sch->schib.pmcw.mp = 1;
+ sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
+ /* We should also udate ssd info, but this has to wait. */
+ ccw_device_start_id(cdev, 0);
+}
+
+static void
+ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * An interrupt in state offline means a previous disable was not
+ * successful. Try again.
+ */
+ cio_disable_subchannel(sch);
+}
+
+static void
+ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ retry_set_schib(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
+
+
+static void
+ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_set_timeout(cdev, 0);
+ if (dev_event == DEV_EVENT_NOTOPER)
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ else
+ cdev->private->state = DEV_STATE_OFFLINE;
+ wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ switch (ret) {
+ case 0:
+ cdev->private->state = DEV_STATE_OFFLINE;
+ wake_up(&cdev->private->wait_q);
+ break;
+ case -ENODEV:
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ wake_up(&cdev->private->wait_q);
+ break;
+ default:
+ ccw_device_set_timeout(cdev, HZ/10);
+ }
+}
+
+/*
+ * No operation action. This is used e.g. to ignore a timeout event in
+ * state offline.
+ */
+static void
+ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
+{
+}
+
+/*
+ * Bug operation action.
+ */
+static void
+ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
+ cdev->private->state, dev_event);
+ BUG();
+}
+
+/*
+ * device statemachine
+ */
+fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+ [DEV_STATE_NOT_OPER] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_bug,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_SENSE_PGID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_OFFLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_VERIFY] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_ONLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_W4SENSE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_DISBAND_PGID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_BOXED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
+ [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ /* states to wait for i/o completion before doing something */
+ [DEV_STATE_CLEAR_VERIFY] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_TIMEOUT_KILL] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
+ },
+ [DEV_STATE_WAIT4IO] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify,
+ },
+ [DEV_STATE_QUIESCE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
+ [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
+ [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ /* special states for devices gone not operational */
+ [DEV_STATE_DISCONNECTED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
+ [DEV_EVENT_TIMEOUT] = ccw_device_bug,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_DISCONNECTED_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_CMFCHANGE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
+ [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
+ },
+};
+
+/*
+ * io_subchannel_irq is called for "real" interrupts or for status
+ * pending conditions on msch.
+ */
+void
+io_subchannel_irq (struct device *pdev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_subchannel(pdev)->dev.driver_data;
+
+ CIO_TRACE_EVENT (3, "IRQ");
+ CIO_TRACE_EVENT (3, pdev->bus_id);
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+}
+
+EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
new file mode 100644
index 000000000000..0e68fb511dc9
--- /dev/null
+++ b/drivers/s390/cio/device_id.c
@@ -0,0 +1,355 @@
+/*
+ * drivers/s390/cio/device_id.c
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Cornelia Huck(cohuck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Sense ID functions.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/delay.h>
+#include <asm/cio.h>
+#include <asm/lowcore.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+
+/*
+ * diag210 is used under VM to get information about a virtual device
+ */
+#ifdef CONFIG_ARCH_S390X
+int
+diag210(struct diag210 * addr)
+{
+ /*
+ * diag 210 needs its data below the 2GB border, so we
+ * use a static data area to be sure
+ */
+ static struct diag210 diag210_tmp;
+ static DEFINE_SPINLOCK(diag210_lock);
+ unsigned long flags;
+ int ccode;
+
+ spin_lock_irqsave(&diag210_lock, flags);
+ diag210_tmp = *addr;
+
+ asm volatile (
+ " lhi %0,-1\n"
+ " sam31\n"
+ " diag %1,0,0x210\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1: sam64\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+ : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
+
+ *addr = diag210_tmp;
+ spin_unlock_irqrestore(&diag210_lock, flags);
+
+ return ccode;
+}
+#else
+int
+diag210(struct diag210 * addr)
+{
+ int ccode;
+
+ asm volatile (
+ " lhi %0,-1\n"
+ " diag %1,0,0x210\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+ : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
+
+ return ccode;
+}
+#endif
+
+/*
+ * Input :
+ * devno - device number
+ * ps - pointer to sense ID data area
+ * Output : none
+ */
+static void
+VM_virtual_device_info (__u16 devno, struct senseid *ps)
+{
+ static struct {
+ int vrdcvcla, vrdcvtyp, cu_type;
+ } vm_devices[] = {
+ { 0x08, 0x01, 0x3480 },
+ { 0x08, 0x02, 0x3430 },
+ { 0x08, 0x10, 0x3420 },
+ { 0x08, 0x42, 0x3424 },
+ { 0x08, 0x44, 0x9348 },
+ { 0x08, 0x81, 0x3490 },
+ { 0x08, 0x82, 0x3422 },
+ { 0x10, 0x41, 0x1403 },
+ { 0x10, 0x42, 0x3211 },
+ { 0x10, 0x43, 0x3203 },
+ { 0x10, 0x45, 0x3800 },
+ { 0x10, 0x47, 0x3262 },
+ { 0x10, 0x48, 0x3820 },
+ { 0x10, 0x49, 0x3800 },
+ { 0x10, 0x4a, 0x4245 },
+ { 0x10, 0x4b, 0x4248 },
+ { 0x10, 0x4d, 0x3800 },
+ { 0x10, 0x4e, 0x3820 },
+ { 0x10, 0x4f, 0x3820 },
+ { 0x10, 0x82, 0x2540 },
+ { 0x10, 0x84, 0x3525 },
+ { 0x20, 0x81, 0x2501 },
+ { 0x20, 0x82, 0x2540 },
+ { 0x20, 0x84, 0x3505 },
+ { 0x40, 0x01, 0x3278 },
+ { 0x40, 0x04, 0x3277 },
+ { 0x40, 0x80, 0x2250 },
+ { 0x40, 0xc0, 0x5080 },
+ { 0x80, 0x00, 0x3215 },
+ };
+ struct diag210 diag_data;
+ int ccode, i;
+
+ CIO_TRACE_EVENT (4, "VMvdinf");
+
+ diag_data = (struct diag210) {
+ .vrdcdvno = devno,
+ .vrdclen = sizeof (diag_data),
+ };
+
+ ccode = diag210 (&diag_data);
+ ps->reserved = 0xff;
+
+ /* Special case for bloody osa devices. */
+ if (diag_data.vrdcvcla == 0x02 &&
+ diag_data.vrdcvtyp == 0x20) {
+ ps->cu_type = 0x3088;
+ ps->cu_model = 0x60;
+ return;
+ }
+ for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++)
+ if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla &&
+ diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) {
+ ps->cu_type = vm_devices[i].cu_type;
+ return;
+ }
+ CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
+ "vdev class : %02X, vdev type : %04X \n ... "
+ "rdev class : %02X, rdev type : %04X, "
+ "rdev model: %02X\n",
+ devno, ccode,
+ diag_data.vrdcvcla, diag_data.vrdcvtyp,
+ diag_data.vrdcrccl, diag_data.vrdccrty,
+ diag_data.vrdccrmd);
+}
+
+/*
+ * Start Sense ID helper function.
+ * Try to obtain the 'control unit'/'device type' information
+ * associated with the subchannel.
+ */
+static int
+__ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ struct ccw1 *ccw;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Setup sense channel program. */
+ ccw = cdev->private->iccws;
+ if (sch->schib.pmcw.pim != 0x80) {
+ /* more than one path installed. */
+ ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
+ ccw->cda = 0;
+ ccw->count = 0;
+ ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ccw++;
+ }
+ ccw->cmd_code = CCW_CMD_SENSE_ID;
+ ccw->cda = (__u32) __pa (&cdev->private->senseid);
+ ccw->count = sizeof (struct senseid);
+ ccw->flags = CCW_FLAG_SLI;
+
+ /* Reset device status. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ /* Try on every path. */
+ ret = -ENODEV;
+ while (cdev->private->imask != 0) {
+ if ((sch->opm & cdev->private->imask) != 0 &&
+ cdev->private->iretry > 0) {
+ cdev->private->iretry--;
+ ret = cio_start (sch, cdev->private->iccws,
+ cdev->private->imask);
+ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
+ if (ret != -EACCES)
+ return ret;
+ }
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ }
+ return ret;
+}
+
+void
+ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ int ret;
+
+ memset (&cdev->private->senseid, 0, sizeof (struct senseid));
+ cdev->private->senseid.cu_type = 0xFFFF;
+ cdev->private->imask = 0x80;
+ cdev->private->iretry = 5;
+ ret = __ccw_device_sense_id_start(cdev);
+ if (ret && ret != -EBUSY)
+ ccw_device_sense_id_done(cdev, ret);
+}
+
+/*
+ * Called from interrupt context to check if a valid answer
+ * to Sense ID was received.
+ */
+static int
+ccw_device_check_sense_id(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+
+ sch = to_subchannel(cdev->dev.parent);
+ irb = &cdev->private->irb;
+ /* Did we get a proper answer ? */
+ if (cdev->private->senseid.cu_type != 0xFFFF &&
+ cdev->private->senseid.reserved == 0xFF) {
+ if (irb->scsw.count < sizeof (struct senseid) - 8)
+ cdev->private->flags.esid = 1;
+ return 0; /* Success */
+ }
+ /* Check the error cases. */
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return -ETIME;
+ if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
+ /*
+ * if the device doesn't support the SenseID
+ * command further retries wouldn't help ...
+ * NB: We don't check here for intervention required like we
+ * did before, because tape devices with no tape inserted
+ * may present this status *in conjunction with* the
+ * sense id information. So, for intervention required,
+ * we use the "whack it until it talks" strategy...
+ */
+ CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x "
+ "reports cmd reject\n",
+ cdev->private->devno, sch->irq);
+ return -EOPNOTSUPP;
+ }
+ if (irb->esw.esw0.erw.cons) {
+ CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, "
+ "lpum %02X, cnt %02d, sns :"
+ " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
+ cdev->private->devno,
+ irb->esw.esw0.sublog.lpum,
+ irb->esw.esw0.erw.scnt,
+ irb->ecw[0], irb->ecw[1],
+ irb->ecw[2], irb->ecw[3],
+ irb->ecw[4], irb->ecw[5],
+ irb->ecw[6], irb->ecw[7]);
+ return -EAGAIN;
+ }
+ if (irb->scsw.cc == 3) {
+ if ((sch->orb.lpm &
+ sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
+ CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on"
+ " subchannel %04x is 'not operational'\n",
+ sch->orb.lpm, cdev->private->devno,
+ sch->irq);
+ return -EACCES;
+ }
+ /* Hmm, whatever happened, try again. */
+ CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
+ "subchannel %04x returns status %02X%02X\n",
+ cdev->private->devno, sch->irq,
+ irb->scsw.dstat, irb->scsw.cstat);
+ return -EAGAIN;
+}
+
+/*
+ * Got interrupt for Sense ID.
+ */
+void
+ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ irb = (struct irb *) __LC_IRB;
+ /* Retry sense id, if needed. */
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if ((irb->scsw.cc == 1) || !irb->scsw.actl) {
+ ret = __ccw_device_sense_id_start(cdev);
+ if (ret && ret != -EBUSY)
+ ccw_device_sense_id_done(cdev, ret);
+ }
+ return;
+ }
+ if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
+ return;
+ ret = ccw_device_check_sense_id(cdev);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ switch (ret) {
+ /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
+ case 0: /* Sense id succeeded. */
+ case -ETIME: /* Sense id stopped by timeout. */
+ ccw_device_sense_id_done(cdev, ret);
+ break;
+ case -EACCES: /* channel is not operational. */
+ sch->lpm &= ~cdev->private->imask;
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ /* fall through. */
+ case -EAGAIN: /* try again. */
+ ret = __ccw_device_sense_id_start(cdev);
+ if (ret == 0 || ret == -EBUSY)
+ break;
+ /* fall through. */
+ default: /* Sense ID failed. Try asking VM. */
+ if (MACHINE_IS_VM) {
+ VM_virtual_device_info (cdev->private->devno,
+ &cdev->private->senseid);
+ if (cdev->private->senseid.cu_type != 0xFFFF) {
+ /* Got the device information from VM. */
+ ccw_device_sense_id_done(cdev, 0);
+ return;
+ }
+ }
+ /*
+ * If we can't couldn't identify the device type we
+ * consider the device "not operational".
+ */
+ ccw_device_sense_id_done(cdev, -ENODEV);
+ break;
+ }
+}
+
+EXPORT_SYMBOL(diag210);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
new file mode 100644
index 000000000000..11e260e0b9c9
--- /dev/null
+++ b/drivers/s390/cio/device_ops.c
@@ -0,0 +1,603 @@
+/*
+ * drivers/s390/cio/device_ops.c
+ *
+ * $Revision: 1.55 $
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Cornelia Huck (cohuck@de.ibm.com)
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/ccwdev.h>
+#include <asm/idals.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc.h"
+#include "device.h"
+#include "qdio.h"
+
+int
+ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
+{
+ /*
+ * The flag usage is mutal exclusive ...
+ */
+ if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ (flags & CCWDEV_REPORT_ALL))
+ return -EINVAL;
+ cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+ cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
+ cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
+ cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ return 0;
+}
+
+int
+ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_WAIT4IO &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch)
+ return -ENODEV;
+ ret = cio_clear(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+int
+ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = 1;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
+ cdev->private->flags.doverify)
+ return -EBUSY;
+ ret = cio_set_options (sch, flags);
+ if (ret)
+ return ret;
+ ret = cio_start_key (sch, cpa, lpm, key);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+
+int
+ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags, int expires)
+{
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ ccw_device_set_timeout(cdev, expires);
+ ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
+ if (ret != 0)
+ ccw_device_set_timeout(cdev, 0);
+ return ret;
+}
+
+int
+ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags)
+{
+ return ccw_device_start_key(cdev, cpa, intparm, lpm,
+ default_storage_key, flags);
+}
+
+int
+ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags,
+ int expires)
+{
+ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
+ default_storage_key, flags,
+ expires);
+}
+
+
+int
+ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_WAIT4IO &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch)
+ return -ENODEV;
+ ret = cio_halt(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+int
+ccw_device_resume(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (!cdev)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
+ return -EINVAL;
+ return cio_resume(sch);
+}
+
+/*
+ * Pass interrupt to device driver.
+ */
+int
+ccw_device_call_handler(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ unsigned int stctl;
+ int ending_status;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ /*
+ * we allow for the device action handler if .
+ * - we received ending status
+ * - the action handler requested to see all interrupts
+ * - we received an intermediate status
+ * - fast notification was requested (primary status)
+ * - unsolicited interrupts
+ */
+ stctl = cdev->private->irb.scsw.stctl;
+ ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+ (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+ (stctl == SCSW_STCTL_STATUS_PEND);
+ if (!ending_status &&
+ !cdev->private->options.repall &&
+ !(stctl & SCSW_STCTL_INTER_STATUS) &&
+ !(cdev->private->options.fast &&
+ (stctl & SCSW_STCTL_PRIM_STATUS)))
+ return 0;
+
+ /*
+ * Now we are ready to call the device driver interrupt handler.
+ */
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->irb);
+
+ /*
+ * Clear the old and now useless interrupt response block.
+ */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ return 1;
+}
+
+/*
+ * Search for CIW command in extended sense data.
+ */
+struct ciw *
+ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+{
+ int ciw_cnt;
+
+ if (cdev->private->flags.esid == 0)
+ return NULL;
+ for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+ if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->senseid.ciw + ciw_cnt;
+ return NULL;
+}
+
+__u8
+ccw_device_get_path_mask(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch)
+ return 0;
+ else
+ return sch->vpm;
+}
+
+static void
+ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
+{
+ if (!ip)
+ /* unsolicited interrupt */
+ return;
+
+ /* Abuse intparm for error reporting. */
+ if (IS_ERR(irb))
+ cdev->private->intparm = -EIO;
+ else if ((irb->scsw.dstat !=
+ (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
+ (irb->scsw.cstat != 0)) {
+ /*
+ * We didn't get channel end / device end. Check if path
+ * verification has been started; we can retry after it has
+ * finished. We also retry unit checks except for command reject
+ * or intervention required.
+ */
+ if (cdev->private->flags.doverify ||
+ cdev->private->state == DEV_STATE_VERIFY)
+ cdev->private->intparm = -EAGAIN;
+ if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ !(irb->ecw[0] &
+ (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
+ cdev->private->intparm = -EAGAIN;
+ else
+ cdev->private->intparm = -EIO;
+
+ } else
+ cdev->private->intparm = 0;
+ wake_up(&cdev->private->wait_q);
+}
+
+static inline int
+__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
+{
+ int ret;
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ do {
+ ret = cio_start (sch, ccw, lpm);
+ if ((ret == -EBUSY) || (ret == -EACCES)) {
+ /* Try again later. */
+ spin_unlock_irq(&sch->lock);
+ msleep(10);
+ spin_lock_irq(&sch->lock);
+ continue;
+ }
+ if (ret != 0)
+ /* Non-retryable error. */
+ break;
+ /* Wait for end of request. */
+ cdev->private->intparm = magic;
+ spin_unlock_irq(&sch->lock);
+ wait_event(cdev->private->wait_q,
+ (cdev->private->intparm == -EIO) ||
+ (cdev->private->intparm == -EAGAIN) ||
+ (cdev->private->intparm == 0));
+ spin_lock_irq(&sch->lock);
+ /* Check at least for channel end / device end */
+ if (cdev->private->intparm == -EIO) {
+ /* Non-retryable error. */
+ ret = -EIO;
+ break;
+ }
+ if (cdev->private->intparm == 0)
+ /* Success. */
+ break;
+ /* Try again later. */
+ spin_unlock_irq(&sch->lock);
+ msleep(10);
+ spin_lock_irq(&sch->lock);
+ } while (1);
+
+ return ret;
+}
+
+/**
+ * read_dev_chars() - read device characteristics
+ * @param cdev target ccw device
+ * @param buffer pointer to buffer for rdc data
+ * @param length size of rdc data
+ * @returns 0 for success, negative error value on failure
+ *
+ * Context:
+ * called for online device, lock not held
+ **/
+int
+read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
+{
+ void (*handler)(struct ccw_device *, unsigned long, struct irb *);
+ struct subchannel *sch;
+ int ret;
+ struct ccw1 *rdc_ccw;
+
+ if (!cdev)
+ return -ENODEV;
+ if (!buffer || !length)
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT (4, "rddevch");
+ CIO_TRACE_EVENT (4, sch->dev.bus_id);
+
+ rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!rdc_ccw)
+ return -ENOMEM;
+ memset(rdc_ccw, 0, sizeof(struct ccw1));
+ rdc_ccw->cmd_code = CCW_CMD_RDC;
+ rdc_ccw->count = length;
+ rdc_ccw->flags = CCW_FLAG_SLI;
+ ret = set_normalized_cda (rdc_ccw, (*buffer));
+ if (ret != 0) {
+ kfree(rdc_ccw);
+ return ret;
+ }
+
+ spin_lock_irq(&sch->lock);
+ /* Save interrupt handler. */
+ handler = cdev->handler;
+ /* Temporarily install own handler. */
+ cdev->handler = ccw_device_wake_up;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ ret = -ENODEV;
+ else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
+ cdev->private->flags.doverify)
+ ret = -EBUSY;
+ else
+ /* 0x00D9C4C3 == ebcdic "RDC" */
+ ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
+
+ /* Restore interrupt handler. */
+ cdev->handler = handler;
+ spin_unlock_irq(&sch->lock);
+
+ clear_normalized_cda (rdc_ccw);
+ kfree(rdc_ccw);
+
+ return ret;
+}
+
+/*
+ * Read Configuration data using path mask
+ */
+int
+read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
+{
+ void (*handler)(struct ccw_device *, unsigned long, struct irb *);
+ struct subchannel *sch;
+ struct ciw *ciw;
+ char *rcd_buf;
+ int ret;
+ struct ccw1 *rcd_ccw;
+
+ if (!cdev)
+ return -ENODEV;
+ if (!buffer || !length)
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT (4, "rdconf");
+ CIO_TRACE_EVENT (4, sch->dev.bus_id);
+
+ /*
+ * scan for RCD command in extended SenseID data
+ */
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
+ if (!ciw || ciw->cmd == 0)
+ return -EOPNOTSUPP;
+
+ rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!rcd_ccw)
+ return -ENOMEM;
+ memset(rcd_ccw, 0, sizeof(struct ccw1));
+ rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
+ if (!rcd_buf) {
+ kfree(rcd_ccw);
+ return -ENOMEM;
+ }
+ memset (rcd_buf, 0, ciw->count);
+ rcd_ccw->cmd_code = ciw->cmd;
+ rcd_ccw->cda = (__u32) __pa (rcd_buf);
+ rcd_ccw->count = ciw->count;
+ rcd_ccw->flags = CCW_FLAG_SLI;
+
+ spin_lock_irq(&sch->lock);
+ /* Save interrupt handler. */
+ handler = cdev->handler;
+ /* Temporarily install own handler. */
+ cdev->handler = ccw_device_wake_up;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ ret = -ENODEV;
+ else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
+ cdev->private->flags.doverify)
+ ret = -EBUSY;
+ else
+ /* 0x00D9C3C4 == ebcdic "RCD" */
+ ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
+
+ /* Restore interrupt handler. */
+ cdev->handler = handler;
+ spin_unlock_irq(&sch->lock);
+
+ /*
+ * on success we update the user input parms
+ */
+ if (ret) {
+ kfree (rcd_buf);
+ *buffer = NULL;
+ *length = 0;
+ } else {
+ *length = ciw->count;
+ *buffer = rcd_buf;
+ }
+ kfree(rcd_ccw);
+
+ return ret;
+}
+
+/*
+ * Read Configuration data
+ */
+int
+read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
+{
+ return read_conf_data_lpm (cdev, buffer, length, 0);
+}
+
+/*
+ * Try to break the lock on a boxed device.
+ */
+int
+ccw_device_stlck(struct ccw_device *cdev)
+{
+ void *buf, *buf2;
+ unsigned long flags;
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+
+ if (cdev->drv && !cdev->private->options.force)
+ return -EINVAL;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT(2, "stl lock");
+ CIO_TRACE_EVENT(2, cdev->dev.bus_id);
+
+ buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
+ if (!buf2) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&sch->lock, flags);
+ ret = cio_enable_subchannel(sch, 3);
+ if (ret)
+ goto out_unlock;
+ /*
+ * Setup ccw. We chain an unconditional reserve and a release so we
+ * only break the lock.
+ */
+ cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
+ cdev->private->iccws[0].cda = (__u32) __pa(buf);
+ cdev->private->iccws[0].count = 32;
+ cdev->private->iccws[0].flags = CCW_FLAG_CC;
+ cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
+ cdev->private->iccws[1].cda = (__u32) __pa(buf2);
+ cdev->private->iccws[1].count = 32;
+ cdev->private->iccws[1].flags = 0;
+ ret = cio_start(sch, cdev->private->iccws, 0);
+ if (ret) {
+ cio_disable_subchannel(sch); //FIXME: return code?
+ goto out_unlock;
+ }
+ cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
+ spin_unlock_irqrestore(&sch->lock, flags);
+ wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
+ spin_lock_irqsave(&sch->lock, flags);
+ cio_disable_subchannel(sch); //FIXME: return code?
+ if ((cdev->private->irb.scsw.dstat !=
+ (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
+ (cdev->private->irb.scsw.cstat != 0))
+ ret = -EIO;
+ /* Clear irb. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+out_unlock:
+ if (buf)
+ kfree(buf);
+ if (buf2)
+ kfree(buf2);
+ spin_unlock_irqrestore(&sch->lock, flags);
+ return ret;
+}
+
+void *
+ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return chsc_get_chp_desc(sch, chp_no);
+}
+
+// FIXME: these have to go:
+
+int
+_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+{
+ return cdev->private->irq;
+}
+
+int
+_ccw_device_get_device_number(struct ccw_device *cdev)
+{
+ return cdev->private->devno;
+}
+
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_options);
+EXPORT_SYMBOL(ccw_device_clear);
+EXPORT_SYMBOL(ccw_device_halt);
+EXPORT_SYMBOL(ccw_device_resume);
+EXPORT_SYMBOL(ccw_device_start_timeout);
+EXPORT_SYMBOL(ccw_device_start);
+EXPORT_SYMBOL(ccw_device_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_start_key);
+EXPORT_SYMBOL(ccw_device_get_ciw);
+EXPORT_SYMBOL(ccw_device_get_path_mask);
+EXPORT_SYMBOL(read_conf_data);
+EXPORT_SYMBOL(read_dev_chars);
+EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
+EXPORT_SYMBOL(_ccw_device_get_device_number);
+EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
+EXPORT_SYMBOL_GPL(read_conf_data_lpm);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
new file mode 100644
index 000000000000..0adac8a67331
--- /dev/null
+++ b/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,448 @@
+/*
+ * drivers/s390/cio/device_pgid.c
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Cornelia Huck(cohuck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Path Group ID functions.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/lowcore.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+
+/*
+ * Start Sense Path Group ID helper function. Used in ccw_device_recog
+ * and ccw_device_sense_pgid.
+ */
+static int
+__ccw_device_sense_pgid_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ struct ccw1 *ccw;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Setup sense path group id channel program. */
+ ccw = cdev->private->iccws;
+ ccw->cmd_code = CCW_CMD_SENSE_PGID;
+ ccw->cda = (__u32) __pa (&cdev->private->pgid);
+ ccw->count = sizeof (struct pgid);
+ ccw->flags = CCW_FLAG_SLI;
+
+ /* Reset device status. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ /* Try on every path. */
+ ret = -ENODEV;
+ while (cdev->private->imask != 0) {
+ /* Try every path multiple times. */
+ if (cdev->private->iretry > 0) {
+ cdev->private->iretry--;
+ ret = cio_start (sch, cdev->private->iccws,
+ cdev->private->imask);
+ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
+ if (ret != -EACCES)
+ return ret;
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
+ "%04x, lpm %02X, became 'not "
+ "operational'\n",
+ cdev->private->devno, sch->irq,
+ cdev->private->imask);
+
+ }
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ }
+ return ret;
+}
+
+void
+ccw_device_sense_pgid_start(struct ccw_device *cdev)
+{
+ int ret;
+
+ cdev->private->state = DEV_STATE_SENSE_PGID;
+ cdev->private->imask = 0x80;
+ cdev->private->iretry = 5;
+ memset (&cdev->private->pgid, 0, sizeof (struct pgid));
+ ret = __ccw_device_sense_pgid_start(cdev);
+ if (ret && ret != -EBUSY)
+ ccw_device_sense_pgid_done(cdev, ret);
+}
+
+/*
+ * Called from interrupt context to check if a valid answer
+ * to Sense Path Group ID was received.
+ */
+static int
+__ccw_device_check_sense_pgid(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+
+ sch = to_subchannel(cdev->dev.parent);
+ irb = &cdev->private->irb;
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return -ETIME;
+ if (irb->esw.esw0.erw.cons &&
+ (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
+ /*
+ * If the device doesn't support the Sense Path Group ID
+ * command further retries wouldn't help ...
+ */
+ return -EOPNOTSUPP;
+ }
+ if (irb->esw.esw0.erw.cons) {
+ CIO_MSG_EVENT(2, "SNID - device %04x, unit check, "
+ "lpum %02X, cnt %02d, sns : "
+ "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
+ cdev->private->devno,
+ irb->esw.esw0.sublog.lpum,
+ irb->esw.esw0.erw.scnt,
+ irb->ecw[0], irb->ecw[1],
+ irb->ecw[2], irb->ecw[3],
+ irb->ecw[4], irb->ecw[5],
+ irb->ecw[6], irb->ecw[7]);
+ return -EAGAIN;
+ }
+ if (irb->scsw.cc == 3) {
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
+ "%04x, lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->irq, sch->orb.lpm);
+ return -EACCES;
+ }
+ if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x "
+ "is reserved by someone else\n",
+ cdev->private->devno, sch->irq);
+ return -EUSERS;
+ }
+ return 0;
+}
+
+/*
+ * Got interrupt for Sense Path Group ID.
+ */
+void
+ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+ int ret;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Retry sense pgid for cc=1. */
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (irb->scsw.cc == 1) {
+ ret = __ccw_device_sense_pgid_start(cdev);
+ if (ret && ret != -EBUSY)
+ ccw_device_sense_pgid_done(cdev, ret);
+ }
+ return;
+ }
+ if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
+ return;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = __ccw_device_check_sense_pgid(cdev);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ switch (ret) {
+ /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
+ case 0: /* Sense Path Group ID successful. */
+ if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
+ memcpy(&cdev->private->pgid, &global_pgid,
+ sizeof(struct pgid));
+ ccw_device_sense_pgid_done(cdev, 0);
+ break;
+ case -EOPNOTSUPP: /* Sense Path Group ID not supported */
+ ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
+ break;
+ case -ETIME: /* Sense path group id stopped by timeout. */
+ ccw_device_sense_pgid_done(cdev, -ETIME);
+ break;
+ case -EACCES: /* channel is not operational. */
+ sch->lpm &= ~cdev->private->imask;
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ /* Fall through. */
+ case -EAGAIN: /* Try again. */
+ ret = __ccw_device_sense_pgid_start(cdev);
+ if (ret != 0 && ret != -EBUSY)
+ ccw_device_sense_pgid_done(cdev, -ENODEV);
+ break;
+ case -EUSERS: /* device is reserved for someone else. */
+ ccw_device_sense_pgid_done(cdev, -EUSERS);
+ break;
+ }
+}
+
+/*
+ * Path Group ID helper function.
+ */
+static int
+__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
+{
+ struct subchannel *sch;
+ struct ccw1 *ccw;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ /* Setup sense path group id channel program. */
+ cdev->private->pgid.inf.fc = func;
+ ccw = cdev->private->iccws;
+ if (!cdev->private->flags.pgid_single) {
+ cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH;
+ ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
+ ccw->cda = 0;
+ ccw->count = 0;
+ ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ccw++;
+ } else
+ cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH;
+
+ ccw->cmd_code = CCW_CMD_SET_PGID;
+ ccw->cda = (__u32) __pa (&cdev->private->pgid);
+ ccw->count = sizeof (struct pgid);
+ ccw->flags = CCW_FLAG_SLI;
+
+ /* Reset device status. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ /* Try multiple times. */
+ ret = -ENODEV;
+ if (cdev->private->iretry > 0) {
+ cdev->private->iretry--;
+ ret = cio_start (sch, cdev->private->iccws,
+ cdev->private->imask);
+ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
+ if ((ret != -EACCES) && (ret != -ENODEV))
+ return ret;
+ }
+ /* PGID command failed on this path. Switch it off. */
+ sch->lpm &= ~cdev->private->imask;
+ sch->vpm &= ~cdev->private->imask;
+ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
+ "%04x, lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->irq, cdev->private->imask);
+ return ret;
+}
+
+/*
+ * Called from interrupt context to check if a valid answer
+ * to Set Path Group ID was received.
+ */
+static int
+__ccw_device_check_pgid(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+
+ sch = to_subchannel(cdev->dev.parent);
+ irb = &cdev->private->irb;
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return -ETIME;
+ if (irb->esw.esw0.erw.cons) {
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return -EOPNOTSUPP;
+ /* Hmm, whatever happened, try again. */
+ CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, "
+ "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
+ cdev->private->devno, irb->esw.esw0.erw.scnt,
+ irb->ecw[0], irb->ecw[1],
+ irb->ecw[2], irb->ecw[3],
+ irb->ecw[4], irb->ecw[5],
+ irb->ecw[6], irb->ecw[7]);
+ return -EAGAIN;
+ }
+ if (irb->scsw.cc == 3) {
+ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
+ "%04x, lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->irq,
+ cdev->private->imask);
+ return -EACCES;
+ }
+ return 0;
+}
+
+static void
+__ccw_device_verify_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ __u8 imask, func;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ while (sch->vpm != sch->lpm) {
+ /* Find first unequal bit in vpm vs. lpm */
+ for (imask = 0x80; imask != 0; imask >>= 1)
+ if ((sch->vpm & imask) != (sch->lpm & imask))
+ break;
+ cdev->private->imask = imask;
+ func = (sch->vpm & imask) ?
+ SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH;
+ ret = __ccw_device_do_pgid(cdev, func);
+ if (ret == 0 || ret == -EBUSY)
+ return;
+ cdev->private->iretry = 5;
+ }
+ ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+}
+
+/*
+ * Got interrupt for Set Path Group ID.
+ */
+void
+ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+ int ret;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Retry set pgid for cc=1. */
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (irb->scsw.cc == 1)
+ __ccw_device_verify_start(cdev);
+ return;
+ }
+ if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
+ return;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = __ccw_device_check_pgid(cdev);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ switch (ret) {
+ /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
+ case 0:
+ /* Establish or Resign Path Group done. Update vpm. */
+ if ((sch->lpm & cdev->private->imask) != 0)
+ sch->vpm |= cdev->private->imask;
+ else
+ sch->vpm &= ~cdev->private->imask;
+ cdev->private->iretry = 5;
+ __ccw_device_verify_start(cdev);
+ break;
+ case -EOPNOTSUPP:
+ /*
+ * One of those strange devices which claim to be able
+ * to do multipathing but not for Set Path Group ID.
+ */
+ if (cdev->private->flags.pgid_single) {
+ ccw_device_verify_done(cdev, -EOPNOTSUPP);
+ break;
+ }
+ cdev->private->flags.pgid_single = 1;
+ /* fall through. */
+ case -EAGAIN: /* Try again. */
+ __ccw_device_verify_start(cdev);
+ break;
+ case -ETIME: /* Set path group id stopped by timeout. */
+ ccw_device_verify_done(cdev, -ETIME);
+ break;
+ case -EACCES: /* channel is not operational. */
+ sch->lpm &= ~cdev->private->imask;
+ sch->vpm &= ~cdev->private->imask;
+ cdev->private->iretry = 5;
+ __ccw_device_verify_start(cdev);
+ break;
+ }
+}
+
+void
+ccw_device_verify_start(struct ccw_device *cdev)
+{
+ cdev->private->flags.pgid_single = 0;
+ cdev->private->iretry = 5;
+ __ccw_device_verify_start(cdev);
+}
+
+static void
+__ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ while (cdev->private->imask != 0) {
+ if (sch->lpm & cdev->private->imask) {
+ ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
+ if (ret == 0)
+ return;
+ }
+ cdev->private->iretry = 5;
+ cdev->private->imask >>= 1;
+ }
+ ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+}
+
+/*
+ * Got interrupt for Unset Path Group ID.
+ */
+void
+ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+ struct irb *irb;
+ int ret;
+
+ irb = (struct irb *) __LC_IRB;
+ /* Retry set pgid for cc=1. */
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (irb->scsw.cc == 1)
+ __ccw_device_disband_start(cdev);
+ return;
+ }
+ if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
+ return;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = __ccw_device_check_pgid(cdev);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ switch (ret) {
+ /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
+ case 0: /* disband successful. */
+ sch->vpm = 0;
+ ccw_device_disband_done(cdev, ret);
+ break;
+ case -EOPNOTSUPP:
+ /*
+ * One of those strange devices which claim to be able
+ * to do multipathing but not for Unset Path Group ID.
+ */
+ cdev->private->flags.pgid_single = 1;
+ /* fall through. */
+ case -EAGAIN: /* Try again. */
+ __ccw_device_disband_start(cdev);
+ break;
+ case -ETIME: /* Set path group id stopped by timeout. */
+ ccw_device_disband_done(cdev, -ETIME);
+ break;
+ case -EACCES: /* channel is not operational. */
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ __ccw_device_disband_start(cdev);
+ break;
+ }
+}
+
+void
+ccw_device_disband_start(struct ccw_device *cdev)
+{
+ cdev->private->flags.pgid_single = 0;
+ cdev->private->iretry = 5;
+ cdev->private->imask = 0x80;
+ __ccw_device_disband_start(cdev);
+}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
new file mode 100644
index 000000000000..4ab2e0d95009
--- /dev/null
+++ b/drivers/s390/cio/device_status.c
@@ -0,0 +1,385 @@
+/*
+ * drivers/s390/cio/device_status.c
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Cornelia Huck(cohuck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Status accumulation and basic sense functions.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+
+/*
+ * Check for any kind of channel or interface control check but don't
+ * issue the message for the console device
+ */
+static inline void
+ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK |
+ SCHN_STAT_INTF_CTRL_CHK)))
+ return;
+
+ CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
+ "received"
+ " ... device %04X on subchannel %04X, dev_stat "
+ ": %02X sch_stat : %02X\n",
+ cdev->private->devno, cdev->private->irq,
+ cdev->private->irb.scsw.dstat,
+ cdev->private->irb.scsw.cstat);
+
+ if (irb->scsw.cc != 3) {
+ char dbf_text[15];
+
+ sprintf(dbf_text, "chk%x", cdev->private->irq);
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, &cdev->private->irb, sizeof (struct irb));
+ }
+}
+
+/*
+ * Some paths became not operational (pno bit in scsw is set).
+ */
+static void
+ccw_device_path_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ stsch (sch->irq, &sch->schib);
+
+ CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are "
+ "not operational \n", __FUNCTION__, sch->irq,
+ sch->schib.pmcw.pnom);
+
+ sch->lpm &= ~sch->schib.pmcw.pnom;
+ if (cdev->private->options.pgroup)
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * Copy valid bits from the extended control word to device irb.
+ */
+static inline void
+ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Copy extended control bit if it is valid... yes there
+ * are condition that have to be met for the extended control
+ * bit to have meaning. Sick.
+ */
+ cdev->private->irb.scsw.ectl = 0;
+ if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) &&
+ !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS))
+ cdev->private->irb.scsw.ectl = irb->scsw.ectl;
+ /* Check if extended control word is valid. */
+ if (!cdev->private->irb.scsw.ectl)
+ return;
+ /* Copy concurrent sense / model dependent information. */
+ memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+}
+
+/*
+ * Check if extended status word is valid.
+ */
+static inline int
+ccw_device_accumulate_esw_valid(struct irb *irb)
+{
+ if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
+ return 0;
+ if (irb->scsw.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
+ !(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
+ return 0;
+ return 1;
+}
+
+/*
+ * Copy valid bits from the extended status word to device irb.
+ */
+static inline void
+ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+ struct sublog *cdev_sublog, *sublog;
+
+ if (!ccw_device_accumulate_esw_valid(irb))
+ return;
+
+ cdev_irb = &cdev->private->irb;
+
+ /* Copy last path used mask. */
+ cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+
+ /* Copy subchannel logout information if esw is of format 0. */
+ if (irb->scsw.eswf) {
+ cdev_sublog = &cdev_irb->esw.esw0.sublog;
+ sublog = &irb->esw.esw0.sublog;
+ /* Copy extended status flags. */
+ cdev_sublog->esf = sublog->esf;
+ /*
+ * Copy fields that have a meaning for channel data check
+ * channel control check and interface control check.
+ */
+ if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK |
+ SCHN_STAT_INTF_CTRL_CHK)) {
+ /* Copy ancillary report bit. */
+ cdev_sublog->arep = sublog->arep;
+ /* Copy field-validity-flags. */
+ cdev_sublog->fvf = sublog->fvf;
+ /* Copy storage access code. */
+ cdev_sublog->sacc = sublog->sacc;
+ /* Copy termination code. */
+ cdev_sublog->termc = sublog->termc;
+ /* Copy sequence code. */
+ cdev_sublog->seqc = sublog->seqc;
+ }
+ /* Copy device status check. */
+ cdev_sublog->devsc = sublog->devsc;
+ /* Copy secondary error. */
+ cdev_sublog->serr = sublog->serr;
+ /* Copy i/o-error alert. */
+ cdev_sublog->ioerr = sublog->ioerr;
+ /* Copy channel path timeout bit. */
+ if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK)
+ cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
+ /* Copy failing storage address validity flag. */
+ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
+ if (cdev_irb->esw.esw0.erw.fsavf) {
+ /* ... and copy the failing storage address. */
+ memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
+ sizeof (irb->esw.esw0.faddr));
+ /* ... and copy the failing storage address format. */
+ cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
+ }
+ /* Copy secondary ccw address validity bit. */
+ cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
+ if (irb->esw.esw0.erw.scavf)
+ /* ... and copy the secondary ccw address. */
+ cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
+
+ }
+ /* FIXME: DCTI for format 2? */
+
+ /* Copy authorization bit. */
+ cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
+ /* Copy path verification required flag. */
+ cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
+ if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
+ cdev->private->flags.doverify = 1;
+ /* Copy concurrent sense bit. */
+ cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
+ if (irb->esw.esw0.erw.cons)
+ cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
+}
+
+/*
+ * Accumulate status from irb to devstat.
+ */
+void
+ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (irb->scsw.pno && irb->scsw.fctl != 0 &&
+ (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
+ (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
+ ccw_device_path_notoper(cdev);
+
+ /*
+ * Don't accumulate unsolicited interrupts.
+ */
+ if ((irb->scsw.stctl ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
+ (!irb->scsw.cc))
+ return;
+
+ cdev_irb = &cdev->private->irb;
+
+ /* Copy bits which are valid only for the start function. */
+ if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) {
+ /* Copy key. */
+ cdev_irb->scsw.key = irb->scsw.key;
+ /* Copy suspend control bit. */
+ cdev_irb->scsw.sctl = irb->scsw.sctl;
+ /* Accumulate deferred condition code. */
+ cdev_irb->scsw.cc |= irb->scsw.cc;
+ /* Copy ccw format bit. */
+ cdev_irb->scsw.fmt = irb->scsw.fmt;
+ /* Copy prefetch bit. */
+ cdev_irb->scsw.pfch = irb->scsw.pfch;
+ /* Copy initial-status-interruption-control. */
+ cdev_irb->scsw.isic = irb->scsw.isic;
+ /* Copy address limit checking control. */
+ cdev_irb->scsw.alcc = irb->scsw.alcc;
+ /* Copy suppress suspend bit. */
+ cdev_irb->scsw.ssi = irb->scsw.ssi;
+ }
+
+ /* Take care of the extended control bit and extended control word. */
+ ccw_device_accumulate_ecw(cdev, irb);
+
+ /* Accumulate function control. */
+ cdev_irb->scsw.fctl |= irb->scsw.fctl;
+ /* Copy activity control. */
+ cdev_irb->scsw.actl= irb->scsw.actl;
+ /* Accumulate status control. */
+ cdev_irb->scsw.stctl |= irb->scsw.stctl;
+ /*
+ * Copy ccw address if it is valid. This is a bit simplified
+ * but should be close enough for all practical purposes.
+ */
+ if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) ||
+ ((irb->scsw.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
+ (irb->scsw.actl & SCSW_ACTL_DEVACT) &&
+ (irb->scsw.actl & SCSW_ACTL_SCHACT)) ||
+ (irb->scsw.actl & SCSW_ACTL_SUSPENDED))
+ cdev_irb->scsw.cpa = irb->scsw.cpa;
+ /* Accumulate device status, but not the device busy flag. */
+ cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY;
+ cdev_irb->scsw.dstat |= irb->scsw.dstat;
+ /* Accumulate subchannel status. */
+ cdev_irb->scsw.cstat |= irb->scsw.cstat;
+ /* Copy residual count if it is valid. */
+ if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0)
+ cdev_irb->scsw.count = irb->scsw.count;
+
+ /* Take care of bits in the extended status word. */
+ ccw_device_accumulate_esw(cdev, irb);
+
+ /*
+ * Check whether we must issue a SENSE CCW ourselves if there is no
+ * concurrent sense facility installed for the subchannel.
+ * No sense is required if no delayed sense is pending
+ * and we did not get a unit check without sense information.
+ *
+ * Note: We should check for ioinfo[irq]->flags.consns but VM
+ * violates the ESA/390 architecture and doesn't present an
+ * operand exception for virtual devices without concurrent
+ * sense facility available/supported when enabling the
+ * concurrent sense facility.
+ */
+ if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ !(cdev_irb->esw.esw0.erw.cons))
+ cdev->private->flags.dosense = 1;
+}
+
+/*
+ * Do a basic sense.
+ */
+int
+ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ /* A sense is required, can we do it now ? */
+ if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+ /*
+ * we received an Unit Check but we have no final
+ * status yet, therefore we must delay the SENSE
+ * processing. We must not report this intermediate
+ * status to the device interrupt handler.
+ */
+ return -EBUSY;
+
+ /*
+ * We have ending status but no sense information. Do a basic sense.
+ */
+ sch = to_subchannel(cdev->dev.parent);
+ sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
+ sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
+ sch->sense_ccw.count = SENSE_MAX_COUNT;
+ sch->sense_ccw.flags = CCW_FLAG_SLI;
+
+ return cio_start (sch, &sch->sense_ccw, 0xff);
+}
+
+/*
+ * Add information from basic sense to devstat.
+ */
+void
+ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (irb->scsw.pno && irb->scsw.fctl != 0 &&
+ (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
+ (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
+ ccw_device_path_notoper(cdev);
+
+ if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->scsw.dstat & DEV_STAT_CHN_END)) {
+ cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ }
+ /* Check if path verification is required. */
+ if (ccw_device_accumulate_esw_valid(irb) &&
+ irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * This function accumulates the status into the private devstat and
+ * starts a basic sense if one is needed.
+ */
+int
+ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ ccw_device_accumulate_irb(cdev, irb);
+ if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+ return -EBUSY;
+ /* Check for basic sense. */
+ if (cdev->private->flags.dosense &&
+ !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
+ cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ return 0;
+ }
+ if (cdev->private->flags.dosense) {
+ ccw_device_do_sense(cdev, irb);
+ return -EBUSY;
+ }
+ return 0;
+}
+
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
new file mode 100644
index 000000000000..c874607d9a80
--- /dev/null
+++ b/drivers/s390/cio/ioasm.h
@@ -0,0 +1,228 @@
+#ifndef S390_CIO_IOASM_H
+#define S390_CIO_IOASM_H
+
+/*
+ * TPI info structure
+ */
+struct tpi_info {
+ __u32 reserved1 : 16; /* reserved 0x00000001 */
+ __u32 irq : 16; /* aka. subchannel number */
+ __u32 intparm; /* interruption parameter */
+ __u32 adapter_IO : 1;
+ __u32 reserved2 : 1;
+ __u32 isc : 3;
+ __u32 reserved3 : 12;
+ __u32 int_type : 3;
+ __u32 reserved4 : 12;
+} __attribute__ ((packed));
+
+
+/*
+ * Some S390 specific IO instructions as inline
+ */
+
+extern __inline__ int stsch(int irq, volatile struct schib *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " stsch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int msch(int irq, volatile struct schib *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " msch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int msch_err(int irq, volatile struct schib *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lhi %0,%3\n"
+ " lr 1,%1\n"
+ " msch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+#ifdef CONFIG_ARCH_S390X
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+#endif
+ : "=&d" (ccode)
+ : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int tsch(int irq, volatile struct irb *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " tsch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int tpi( volatile struct tpi_info *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " tpi 0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int ssch(int irq, volatile struct orb *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " ssch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L), "a" (addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int rsch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " rsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int csch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int hsch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " hsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int xsch(int irq)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " .insn rre,0xb2760000,%1,0\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (irq | 0x10000L)
+ : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int chsc(void *chsc_area)
+{
+ int cc;
+
+ __asm__ __volatile__ (
+ ".insn rre,0xb25f0000,%1,0 \n\t"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ : "=d" (cc)
+ : "d" (chsc_area)
+ : "cc" );
+
+ return cc;
+}
+
+extern __inline__ int iac( void)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " iac 1\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : : "cc", "1" );
+ return ccode;
+}
+
+extern __inline__ int rchp(int chpid)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lr 1,%1\n"
+ " rchp\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (chpid)
+ : "cc", "1" );
+ return ccode;
+}
+
+#endif
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
new file mode 100644
index 000000000000..bbe9f45d1438
--- /dev/null
+++ b/drivers/s390/cio/qdio.c
@@ -0,0 +1,3468 @@
+/*
+ *
+ * linux/drivers/s390/cio/qdio.c
+ *
+ * Linux for S/390 QDIO base support, Hipersocket base support
+ * version 2
+ *
+ * Copyright 2000,2002 IBM Corporation
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com>
+ *
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ *
+ *
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+
+#include <asm/ccwdev.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/timex.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "airq.h"
+#include "qdio.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+#define VERSION_QDIO_C "$Revision: 1.98 $"
+
+/****************** MODULE PARAMETER VARIABLES ********************/
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support version 2, " \
+ "Copyright 2000 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+/******************** HERE WE GO ***********************************/
+
+static const char version[] = "QDIO base support version 2 ("
+ VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")";
+
+#ifdef QDIO_PERFORMANCE_STATS
+static int proc_perf_file_registration;
+static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
+static struct qdio_perf_stats perf_stats;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+static int hydra_thinints;
+static int omit_svs;
+
+static int indicator_used[INDICATORS_PER_CACHELINE];
+static __u32 * volatile indicators;
+static __u32 volatile spare_indicator;
+static atomic_t spare_indicator_usecount;
+
+static debug_info_t *qdio_dbf_setup;
+static debug_info_t *qdio_dbf_sbal;
+static debug_info_t *qdio_dbf_trace;
+static debug_info_t *qdio_dbf_sense;
+#ifdef CONFIG_QDIO_DEBUG
+static debug_info_t *qdio_dbf_slsb_out;
+static debug_info_t *qdio_dbf_slsb_in;
+#endif /* CONFIG_QDIO_DEBUG */
+
+/* iQDIO stuff: */
+static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
+ during a while loop */
+static DEFINE_SPINLOCK(ttiq_list_lock);
+static int register_thinint_result;
+static void tiqdio_tl(unsigned long);
+static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
+
+/* not a macro, as one of the arguments is atomic_read */
+static inline int
+qdio_min(int a,int b)
+{
+ if (a<b)
+ return a;
+ else
+ return b;
+}
+
+/***************** SCRUBBER HELPER ROUTINES **********************/
+
+static inline volatile __u64
+qdio_get_micros(void)
+{
+ return (get_clock() >> 10); /* time>>12 is microseconds */
+}
+
+/*
+ * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
+ * the q in any case, so that we'll not be interrupted when we are in
+ * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
+ * ever works (last famous words)
+ */
+static inline int
+qdio_reserve_q(struct qdio_q *q)
+{
+ return atomic_add_return(1,&q->use_count) - 1;
+}
+
+static inline void
+qdio_release_q(struct qdio_q *q)
+{
+ atomic_dec(&q->use_count);
+}
+
+static volatile inline void
+qdio_set_slsb(volatile char *slsb, unsigned char value)
+{
+ xchg((char*)slsb,value);
+}
+
+static inline int
+qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
+ unsigned int gpr3)
+{
+ int cc;
+
+ QDIO_DBF_TEXT4(0,trace,"sigasync");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.siga_syncs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ cc = do_siga_sync(q->irq, gpr2, gpr3);
+ if (cc)
+ QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
+
+ return cc;
+}
+
+static inline int
+qdio_siga_sync_q(struct qdio_q *q)
+{
+ if (q->is_input_q)
+ return qdio_siga_sync(q, 0, q->mask);
+ return qdio_siga_sync(q, q->mask, 0);
+}
+
+/*
+ * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
+ * an access exception
+ */
+static inline int
+qdio_siga_output(struct qdio_q *q)
+{
+ int cc;
+ __u32 busy_bit;
+ __u64 start_time=0;
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.siga_outs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ QDIO_DBF_TEXT4(0,trace,"sigaout");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ for (;;) {
+ cc = do_siga_output(q->irq, q->mask, &busy_bit);
+//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
+ if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
+ if (!start_time)
+ start_time=NOW;
+ if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
+ break;
+ } else
+ break;
+ }
+
+ if ((cc==2) && (busy_bit))
+ cc |= QDIO_SIGA_ERROR_B_BIT_SET;
+
+ if (cc)
+ QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
+
+ return cc;
+}
+
+static inline int
+qdio_siga_input(struct qdio_q *q)
+{
+ int cc;
+
+ QDIO_DBF_TEXT4(0,trace,"sigain");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.siga_ins++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ cc = do_siga_input(q->irq, q->mask);
+
+ if (cc)
+ QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
+
+ return cc;
+}
+
+/* locked by the locks in qdio_activate and qdio_cleanup */
+static __u32 * volatile
+qdio_get_indicator(void)
+{
+ int i;
+
+ for (i=1;i<INDICATORS_PER_CACHELINE;i++)
+ if (!indicator_used[i]) {
+ indicator_used[i]=1;
+ return indicators+i;
+ }
+ atomic_inc(&spare_indicator_usecount);
+ return (__u32 * volatile) &spare_indicator;
+}
+
+/* locked by the locks in qdio_activate and qdio_cleanup */
+static void
+qdio_put_indicator(__u32 *addr)
+{
+ int i;
+
+ if ( (addr) && (addr!=&spare_indicator) ) {
+ i=addr-indicators;
+ indicator_used[i]=0;
+ }
+ if (addr == &spare_indicator)
+ atomic_dec(&spare_indicator_usecount);
+}
+
+static inline volatile void
+tiqdio_clear_summary_bit(__u32 *location)
+{
+ QDIO_DBF_TEXT5(0,trace,"clrsummb");
+ QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
+
+ xchg(location,0);
+}
+
+static inline volatile void
+tiqdio_set_summary_bit(__u32 *location)
+{
+ QDIO_DBF_TEXT5(0,trace,"setsummb");
+ QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
+
+ xchg(location,-1);
+}
+
+static inline void
+tiqdio_sched_tl(void)
+{
+ tasklet_hi_schedule(&tiqdio_tasklet);
+}
+
+static inline void
+qdio_mark_tiq(struct qdio_q *q)
+{
+ unsigned long flags;
+
+ QDIO_DBF_TEXT4(0,trace,"mark iq");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ spin_lock_irqsave(&ttiq_list_lock,flags);
+ if (unlikely(atomic_read(&q->is_in_shutdown)))
+ goto out_unlock;
+
+ if (!q->is_input_q)
+ goto out_unlock;
+
+ if ((q->list_prev) || (q->list_next))
+ goto out_unlock;
+
+ if (!tiq_list) {
+ tiq_list=q;
+ q->list_prev=q;
+ q->list_next=q;
+ } else {
+ q->list_next=tiq_list;
+ q->list_prev=tiq_list->list_prev;
+ tiq_list->list_prev->list_next=q;
+ tiq_list->list_prev=q;
+ }
+ spin_unlock_irqrestore(&ttiq_list_lock,flags);
+
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ tiqdio_sched_tl();
+ return;
+out_unlock:
+ spin_unlock_irqrestore(&ttiq_list_lock,flags);
+ return;
+}
+
+static inline void
+qdio_mark_q(struct qdio_q *q)
+{
+ QDIO_DBF_TEXT4(0,trace,"mark q");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ if (unlikely(atomic_read(&q->is_in_shutdown)))
+ return;
+
+ tasklet_schedule(&q->tasklet);
+}
+
+static inline int
+qdio_stop_polling(struct qdio_q *q)
+{
+#ifdef QDIO_USE_PROCESSING_STATE
+ int gsf;
+
+ if (!atomic_swap(&q->polling,0))
+ return 1;
+
+ QDIO_DBF_TEXT4(0,trace,"stoppoll");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ /* show the card that we are not polling anymore */
+ if (!q->is_input_q)
+ return 1;
+
+ gsf=GET_SAVED_FRONTIER(q);
+ set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
+ (QDIO_MAX_BUFFERS_PER_Q-1)],
+ SLSB_P_INPUT_NOT_INIT);
+ /*
+ * we don't issue this SYNC_MEMORY, as we trust Rick T and
+ * moreover will not use the PROCESSING state under VM, so
+ * q->polling was 0 anyway
+ */
+ /*SYNC_MEMORY;*/
+ if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED)
+ return 1;
+ /*
+ * set our summary bit again, as otherwise there is a
+ * small window we can miss between resetting it and
+ * checking for PRIMED state
+ */
+ if (q->is_thinint_q)
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ return 0;
+
+#else /* QDIO_USE_PROCESSING_STATE */
+ return 1;
+#endif /* QDIO_USE_PROCESSING_STATE */
+}
+
+/*
+ * see the comment in do_QDIO and before qdio_reserve_q about the
+ * sophisticated locking outside of unmark_q, so that we don't need to
+ * disable the interrupts :-)
+*/
+static inline void
+qdio_unmark_q(struct qdio_q *q)
+{
+ unsigned long flags;
+
+ QDIO_DBF_TEXT4(0,trace,"unmark q");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ if ((!q->list_prev)||(!q->list_next))
+ return;
+
+ if ((q->is_thinint_q)&&(q->is_input_q)) {
+ /* iQDIO */
+ spin_lock_irqsave(&ttiq_list_lock,flags);
+ /* in case cleanup has done this already and simultanously
+ * qdio_unmark_q is called from the interrupt handler, we've
+ * got to check this in this specific case again */
+ if ((!q->list_prev)||(!q->list_next))
+ goto out;
+ if (q->list_next==q) {
+ /* q was the only interesting q */
+ tiq_list=NULL;
+ q->list_next=NULL;
+ q->list_prev=NULL;
+ } else {
+ q->list_next->list_prev=q->list_prev;
+ q->list_prev->list_next=q->list_next;
+ tiq_list=q->list_next;
+ q->list_next=NULL;
+ q->list_prev=NULL;
+ }
+out:
+ spin_unlock_irqrestore(&ttiq_list_lock,flags);
+ }
+}
+
+static inline unsigned long
+tiqdio_clear_global_summary(void)
+{
+ unsigned long time;
+
+ QDIO_DBF_TEXT5(0,trace,"clrglobl");
+
+ time = do_clear_global_summary();
+
+ QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
+
+ return time;
+}
+
+
+/************************* OUTBOUND ROUTINES *******************************/
+
+inline static int
+qdio_get_outbound_buffer_frontier(struct qdio_q *q)
+{
+ int f,f_mod_no;
+ volatile char *slsb;
+ int first_not_to_check;
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT4(0,trace,"getobfro");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ slsb=&q->slsb.acc.val[0];
+ f_mod_no=f=q->first_to_check;
+ /*
+ * f points to already processed elements, so f+no_used is correct...
+ * ... but: we don't check 128 buffers, as otherwise
+ * qdio_has_outbound_q_moved would return 0
+ */
+ first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+
+ if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
+ SYNC_MEMORY;
+
+check_next:
+ if (f==first_not_to_check)
+ goto out;
+
+ switch(slsb[f_mod_no]) {
+
+ /* the adapter has not fetched the output yet */
+ case SLSB_CU_OUTPUT_PRIMED:
+ QDIO_DBF_TEXT5(0,trace,"outpprim");
+ break;
+
+ /* the adapter got it */
+ case SLSB_P_OUTPUT_EMPTY:
+ atomic_dec(&q->number_of_buffers_used);
+ f++;
+ f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
+ QDIO_DBF_TEXT5(0,trace,"outpempt");
+ goto check_next;
+
+ case SLSB_P_OUTPUT_ERROR:
+ QDIO_DBF_TEXT3(0,trace,"outperr");
+ sprintf(dbf_text,"%x-%x-%x",f_mod_no,
+ q->sbal[f_mod_no]->element[14].sbalf.value,
+ q->sbal[f_mod_no]->element[15].sbalf.value);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+ QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
+
+ /* kind of process the buffer */
+ set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT);
+
+ /*
+ * we increment the frontier, as this buffer
+ * was processed obviously
+ */
+ atomic_dec(&q->number_of_buffers_used);
+ f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+
+ if (q->qdio_error)
+ q->error_status_flags|=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error=SLSB_P_OUTPUT_ERROR;
+ q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
+
+ break;
+
+ /* no new buffers */
+ default:
+ QDIO_DBF_TEXT5(0,trace,"outpni");
+ }
+out:
+ return (q->first_to_check=f_mod_no);
+}
+
+/* all buffers are processed */
+inline static int
+qdio_is_outbound_q_done(struct qdio_q *q)
+{
+ int no_used;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ no_used=atomic_read(&q->number_of_buffers_used);
+
+#ifdef CONFIG_QDIO_DEBUG
+ if (no_used) {
+ sprintf(dbf_text,"oqisnt%02x",no_used);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+ } else {
+ QDIO_DBF_TEXT4(0,trace,"oqisdone");
+ }
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
+ return (no_used==0);
+}
+
+inline static int
+qdio_has_outbound_q_moved(struct qdio_q *q)
+{
+ int i;
+
+ i=qdio_get_outbound_buffer_frontier(q);
+
+ if ( (i!=GET_SAVED_FRONTIER(q)) ||
+ (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
+ SAVE_FRONTIER(q,i);
+ QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ return 1;
+ } else {
+ QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ return 0;
+ }
+}
+
+inline static void
+qdio_kick_outbound_q(struct qdio_q *q)
+{
+ int result;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT4(0,trace,"kickoutq");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (!q->siga_out)
+ return;
+
+ /* here's the story with cc=2 and busy bit set (thanks, Rick):
+ * VM's CP could present us cc=2 and busy bit set on SIGA-write
+ * during reconfiguration of their Guest LAN (only in HIPERS mode,
+ * QDIO mode is asynchronous -- cc=2 and busy bit there will take
+ * the queues down immediately; and not being under VM we have a
+ * problem on cc=2 and busy bit set right away).
+ *
+ * Therefore qdio_siga_output will try for a short time constantly,
+ * if such a condition occurs. If it doesn't change, it will
+ * increase the busy_siga_counter and save the timestamp, and
+ * schedule the queue for later processing (via mark_q, using the
+ * queue tasklet). __qdio_outbound_processing will check out the
+ * counter. If non-zero, it will call qdio_kick_outbound_q as often
+ * as the value of the counter. This will attempt further SIGA
+ * instructions. For each successful SIGA, the counter is
+ * decreased, for failing SIGAs the counter remains the same, after
+ * all.
+ * After some time of no movement, qdio_kick_outbound_q will
+ * finally fail and reflect corresponding error codes to call
+ * the upper layer module and have it take the queues down.
+ *
+ * Note that this is a change from the original HiperSockets design
+ * (saying cc=2 and busy bit means take the queues down), but in
+ * these days Guest LAN didn't exist... excessive cc=2 with busy bit
+ * conditions will still take the queues down, but the threshold is
+ * higher due to the Guest LAN environment.
+ */
+
+
+ result=qdio_siga_output(q);
+
+ switch (result) {
+ case 0:
+ /* went smooth this time, reset timestamp */
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT3(0,trace,"cc2reslv");
+ sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ q->timing.busy_start=0;
+ break;
+ case (2|QDIO_SIGA_ERROR_B_BIT_SET):
+ /* cc=2 and busy bit: */
+ atomic_inc(&q->busy_siga_counter);
+
+ /* if the last siga was successful, save
+ * timestamp here */
+ if (!q->timing.busy_start)
+ q->timing.busy_start=NOW;
+
+ /* if we're in time, don't touch error_status_flags
+ * and siga_error */
+ if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
+ qdio_mark_q(q);
+ break;
+ }
+ QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ /* else fallthrough and report error */
+ default:
+ /* for plain cc=1, 2 or 3: */
+ if (q->siga_error)
+ q->error_status_flags|=
+ QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ q->error_status_flags|=
+ QDIO_STATUS_LOOK_FOR_ERROR;
+ q->siga_error=result;
+ }
+}
+
+inline static void
+qdio_kick_outbound_handler(struct qdio_q *q)
+{
+ int start, end, real_end, count;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ start = q->first_element_to_kick;
+ /* last_move_ftc was just updated */
+ real_end = GET_SAVED_FRONTIER(q);
+ end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
+ (QDIO_MAX_BUFFERS_PER_Q-1);
+ count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
+ (QDIO_MAX_BUFFERS_PER_Q-1);
+
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0,trace,"kickouth");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ sprintf(dbf_text,"s=%2xc=%2x",start,count);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (q->state==QDIO_IRQ_STATE_ACTIVE)
+ q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
+ q->error_status_flags,
+ q->qdio_error,q->siga_error,q->q_no,start,count,
+ q->int_parm);
+
+ /* for the next time: */
+ q->first_element_to_kick=real_end;
+ q->qdio_error=0;
+ q->siga_error=0;
+ q->error_status_flags=0;
+}
+
+static inline void
+__qdio_outbound_processing(struct qdio_q *q)
+{
+ int siga_attempts;
+
+ QDIO_DBF_TEXT4(0,trace,"qoutproc");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ if (unlikely(qdio_reserve_q(q))) {
+ qdio_release_q(q);
+#ifdef QDIO_PERFORMANCE_STATS
+ o_p_c++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ /* as we're sissies, we'll check next time */
+ if (likely(!atomic_read(&q->is_in_shutdown))) {
+ qdio_mark_q(q);
+ QDIO_DBF_TEXT4(0,trace,"busy,agn");
+ }
+ return;
+ }
+#ifdef QDIO_PERFORMANCE_STATS
+ o_p_nc++;
+ perf_stats.tl_runs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ /* see comment in qdio_kick_outbound_q */
+ siga_attempts=atomic_read(&q->busy_siga_counter);
+ while (siga_attempts) {
+ atomic_dec(&q->busy_siga_counter);
+ qdio_kick_outbound_q(q);
+ siga_attempts--;
+ }
+
+ if (qdio_has_outbound_q_moved(q))
+ qdio_kick_outbound_handler(q);
+
+ if (q->is_iqdio_q) {
+ /*
+ * for asynchronous queues, we better check, if the fill
+ * level is too high. for synchronous queues, the fill
+ * level will never be that high.
+ */
+ if (atomic_read(&q->number_of_buffers_used)>
+ IQDIO_FILL_LEVEL_TO_POLL)
+ qdio_mark_q(q);
+
+ } else if (!q->hydra_gives_outbound_pcis)
+ if (!qdio_is_outbound_q_done(q))
+ qdio_mark_q(q);
+
+ qdio_release_q(q);
+}
+
+static void
+qdio_outbound_processing(struct qdio_q *q)
+{
+ __qdio_outbound_processing(q);
+}
+
+/************************* INBOUND ROUTINES *******************************/
+
+
+inline static int
+qdio_get_inbound_buffer_frontier(struct qdio_q *q)
+{
+ int f,f_mod_no;
+ volatile char *slsb;
+ int first_not_to_check;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif /* CONFIG_QDIO_DEBUG */
+#ifdef QDIO_USE_PROCESSING_STATE
+ int last_position=-1;
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+ QDIO_DBF_TEXT4(0,trace,"getibfro");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ slsb=&q->slsb.acc.val[0];
+ f_mod_no=f=q->first_to_check;
+ /*
+ * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
+ * would return 0
+ */
+ first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+
+ /*
+ * we don't use this one, as a PCI or we after a thin interrupt
+ * will sync the queues
+ */
+ /* SYNC_MEMORY;*/
+
+check_next:
+ f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
+ if (f==first_not_to_check)
+ goto out;
+ switch (slsb[f_mod_no]) {
+
+ /* CU_EMPTY means frontier is reached */
+ case SLSB_CU_INPUT_EMPTY:
+ QDIO_DBF_TEXT5(0,trace,"inptempt");
+ break;
+
+ /* P_PRIMED means set slsb to P_PROCESSING and move on */
+ case SLSB_P_INPUT_PRIMED:
+ QDIO_DBF_TEXT5(0,trace,"inptprim");
+
+#ifdef QDIO_USE_PROCESSING_STATE
+ /*
+ * as soon as running under VM, polling the input queues will
+ * kill VM in terms of CP overhead
+ */
+ if (q->siga_sync) {
+ set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ } else {
+ /* set the previous buffer to NOT_INIT. The current
+ * buffer will be set to PROCESSING at the end of
+ * this function to avoid further interrupts. */
+ if (last_position>=0)
+ set_slsb(&slsb[last_position],
+ SLSB_P_INPUT_NOT_INIT);
+ atomic_set(&q->polling,1);
+ last_position=f_mod_no;
+ }
+#else /* QDIO_USE_PROCESSING_STATE */
+ set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+#endif /* QDIO_USE_PROCESSING_STATE */
+ /*
+ * not needed, as the inbound queue will be synced on the next
+ * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
+ */
+ /*SYNC_MEMORY;*/
+ f++;
+ atomic_dec(&q->number_of_buffers_used);
+ goto check_next;
+
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_PROCESSING:
+ QDIO_DBF_TEXT5(0,trace,"inpnipro");
+ break;
+
+ /* P_ERROR means frontier is reached, break and report error */
+ case SLSB_P_INPUT_ERROR:
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text,"inperr%2x",f_mod_no);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
+
+ /* kind of process the buffer */
+ set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+
+ if (q->qdio_error)
+ q->error_status_flags|=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error=SLSB_P_INPUT_ERROR;
+ q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
+
+ /* we increment the frontier, as this buffer
+ * was processed obviously */
+ f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ atomic_dec(&q->number_of_buffers_used);
+
+#ifdef QDIO_USE_PROCESSING_STATE
+ last_position=-1;
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+ break;
+
+ /* everything else means frontier not changed (HALTED or so) */
+ default:
+ break;
+ }
+out:
+ q->first_to_check=f_mod_no;
+
+#ifdef QDIO_USE_PROCESSING_STATE
+ if (last_position>=0)
+ set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+
+ return q->first_to_check;
+}
+
+inline static int
+qdio_has_inbound_q_moved(struct qdio_q *q)
+{
+ int i;
+
+#ifdef QDIO_PERFORMANCE_STATS
+ static int old_pcis=0;
+ static int old_thinints=0;
+
+ if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
+ perf_stats.start_time_inbound=NOW;
+ else
+ old_pcis=perf_stats.pcis;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ i=qdio_get_inbound_buffer_frontier(q);
+ if ( (i!=GET_SAVED_FRONTIER(q)) ||
+ (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
+ SAVE_FRONTIER(q,i);
+ if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
+ SAVE_TIMESTAMP(q);
+
+ QDIO_DBF_TEXT4(0,trace,"inhasmvd");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ return 1;
+ } else {
+ QDIO_DBF_TEXT4(0,trace,"inhsntmv");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ return 0;
+ }
+}
+
+/* means, no more buffers to be filled */
+inline static int
+tiqdio_is_inbound_q_done(struct qdio_q *q)
+{
+ int no_used;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ no_used=atomic_read(&q->number_of_buffers_used);
+
+ /* propagate the change from 82 to 80 through VM */
+ SYNC_MEMORY;
+
+#ifdef CONFIG_QDIO_DEBUG
+ if (no_used) {
+ sprintf(dbf_text,"iqisnt%02x",no_used);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+ } else {
+ QDIO_DBF_TEXT4(0,trace,"iniqisdo");
+ }
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (!no_used)
+ return 1;
+
+ if (!q->siga_sync)
+ /* we'll check for more primed buffers in qeth_stop_polling */
+ return 0;
+
+ if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED)
+ /*
+ * nothing more to do, if next buffer is not PRIMED.
+ * note that we did a SYNC_MEMORY before, that there
+ * has been a sychnronization.
+ * we will return 0 below, as there is nothing to do
+ * (stop_polling not necessary, as we have not been
+ * using the PROCESSING state
+ */
+ return 0;
+
+ /*
+ * ok, the next input buffer is primed. that means, that device state
+ * change indicator and adapter local summary are set, so we will find
+ * it next time.
+ * we will return 0 below, as there is nothing to do, except scheduling
+ * ourselves for the next time.
+ */
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ tiqdio_sched_tl();
+ return 0;
+}
+
+inline static int
+qdio_is_inbound_q_done(struct qdio_q *q)
+{
+ int no_used;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ no_used=atomic_read(&q->number_of_buffers_used);
+
+ /*
+ * we need that one for synchronization with the adapter, as it
+ * does a kind of PCI avoidance
+ */
+ SYNC_MEMORY;
+
+ if (!no_used) {
+ QDIO_DBF_TEXT4(0,trace,"inqisdnA");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+ return 1;
+ }
+
+ if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) {
+ /* we got something to do */
+ QDIO_DBF_TEXT4(0,trace,"inqisntA");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ return 0;
+ }
+
+ /* on VM, we don't poll, so the q is always done here */
+ if (q->siga_sync)
+ return 1;
+ if (q->hydra_gives_outbound_pcis)
+ return 1;
+
+ /*
+ * at this point we know, that inbound first_to_check
+ * has (probably) not moved (see qdio_inbound_processing)
+ */
+ if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0,trace,"inqisdon");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ return 1;
+ } else {
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0,trace,"inqisntd");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ return 0;
+ }
+}
+
+inline static void
+qdio_kick_inbound_handler(struct qdio_q *q)
+{
+ int count, start, end, real_end, i;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ QDIO_DBF_TEXT4(0,trace,"kickinh");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ start=q->first_element_to_kick;
+ real_end=q->first_to_check;
+ end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+
+ i=start;
+ count=0;
+ while (1) {
+ count++;
+ if (i==end)
+ break;
+ i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ }
+
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text,"s=%2xc=%2x",start,count);
+ QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
+ q->handler(q->cdev,
+ QDIO_STATUS_INBOUND_INT|q->error_status_flags,
+ q->qdio_error,q->siga_error,q->q_no,start,count,
+ q->int_parm);
+
+ /* for the next time: */
+ q->first_element_to_kick=real_end;
+ q->qdio_error=0;
+ q->siga_error=0;
+ q->error_status_flags=0;
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
+ perf_stats.inbound_cnt++;
+#endif /* QDIO_PERFORMANCE_STATS */
+}
+
+static inline void
+__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
+{
+ struct qdio_irq *irq_ptr;
+ struct qdio_q *oq;
+ int i;
+
+ QDIO_DBF_TEXT4(0,trace,"iqinproc");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ /*
+ * we first want to reserve the q, so that we know, that we don't
+ * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
+ * be set
+ */
+ if (unlikely(qdio_reserve_q(q))) {
+ qdio_release_q(q);
+#ifdef QDIO_PERFORMANCE_STATS
+ ii_p_c++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ /*
+ * as we might just be about to stop polling, we make
+ * sure that we check again at least once more
+ */
+ tiqdio_sched_tl();
+ return;
+ }
+#ifdef QDIO_PERFORMANCE_STATS
+ ii_p_nc++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ if (unlikely(atomic_read(&q->is_in_shutdown))) {
+ qdio_unmark_q(q);
+ goto out;
+ }
+
+ /*
+ * we reset spare_ind_was_set, when the queue does not use the
+ * spare indicator
+ */
+ if (spare_ind_was_set)
+ spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
+
+ if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
+ goto out;
+ /*
+ * q->dev_st_chg_ind is the indicator, be it shared or not.
+ * only clear it, if indicator is non-shared
+ */
+ if (!spare_ind_was_set)
+ tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
+
+ if (q->hydra_gives_outbound_pcis) {
+ if (!q->siga_sync_done_on_thinints) {
+ SYNC_MEMORY_ALL;
+ } else if ((!q->siga_sync_done_on_outb_tis)&&
+ (q->hydra_gives_outbound_pcis)) {
+ SYNC_MEMORY_ALL_OUTB;
+ }
+ } else {
+ SYNC_MEMORY;
+ }
+ /*
+ * maybe we have to do work on our outbound queues... at least
+ * we have to check the outbound-int-capable thinint-capable
+ * queues
+ */
+ if (q->hydra_gives_outbound_pcis) {
+ irq_ptr = (struct qdio_irq*)q->irq_ptr;
+ for (i=0;i<irq_ptr->no_output_qs;i++) {
+ oq = irq_ptr->output_qs[i];
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs--;
+#endif /* QDIO_PERFORMANCE_STATS */
+ if (!qdio_is_outbound_q_done(oq))
+ __qdio_outbound_processing(oq);
+ }
+ }
+
+ if (!qdio_has_inbound_q_moved(q))
+ goto out;
+
+ qdio_kick_inbound_handler(q);
+ if (tiqdio_is_inbound_q_done(q))
+ if (!qdio_stop_polling(q)) {
+ /*
+ * we set the flags to get into the stuff next time,
+ * see also comment in qdio_stop_polling
+ */
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ tiqdio_sched_tl();
+ }
+out:
+ qdio_release_q(q);
+}
+
+static void
+tiqdio_inbound_processing(struct qdio_q *q)
+{
+ __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
+}
+
+static inline void
+__qdio_inbound_processing(struct qdio_q *q)
+{
+ int q_laps=0;
+
+ QDIO_DBF_TEXT4(0,trace,"qinproc");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+
+ if (unlikely(qdio_reserve_q(q))) {
+ qdio_release_q(q);
+#ifdef QDIO_PERFORMANCE_STATS
+ i_p_c++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ /* as we're sissies, we'll check next time */
+ if (likely(!atomic_read(&q->is_in_shutdown))) {
+ qdio_mark_q(q);
+ QDIO_DBF_TEXT4(0,trace,"busy,agn");
+ }
+ return;
+ }
+#ifdef QDIO_PERFORMANCE_STATS
+ i_p_nc++;
+ perf_stats.tl_runs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+again:
+ if (qdio_has_inbound_q_moved(q)) {
+ qdio_kick_inbound_handler(q);
+ if (!qdio_stop_polling(q)) {
+ q_laps++;
+ if (q_laps<QDIO_Q_LAPS)
+ goto again;
+ }
+ qdio_mark_q(q);
+ } else {
+ if (!qdio_is_inbound_q_done(q))
+ /* means poll time is not yet over */
+ qdio_mark_q(q);
+ }
+
+ qdio_release_q(q);
+}
+
+static void
+qdio_inbound_processing(struct qdio_q *q)
+{
+ __qdio_inbound_processing(q);
+}
+
+/************************* MAIN ROUTINES *******************************/
+
+#ifdef QDIO_USE_PROCESSING_STATE
+static inline int
+tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
+{
+ if (!q) {
+ tiqdio_sched_tl();
+ return 0;
+ }
+
+ /*
+ * under VM, we have not used the PROCESSING state, so no
+ * need to stop polling
+ */
+ if (q->siga_sync)
+ return 2;
+
+ if (unlikely(qdio_reserve_q(q))) {
+ qdio_release_q(q);
+#ifdef QDIO_PERFORMANCE_STATS
+ ii_p_c++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ /*
+ * as we might just be about to stop polling, we make
+ * sure that we check again at least once more
+ */
+
+ /*
+ * sanity -- we'd get here without setting the
+ * dev st chg ind
+ */
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ tiqdio_sched_tl();
+ return 0;
+ }
+ if (qdio_stop_polling(q)) {
+ qdio_release_q(q);
+ return 2;
+ }
+ if (q_laps<QDIO_Q_LAPS-1) {
+ qdio_release_q(q);
+ return 3;
+ }
+ /*
+ * we set the flags to get into the stuff
+ * next time, see also comment in qdio_stop_polling
+ */
+ tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
+ tiqdio_sched_tl();
+ qdio_release_q(q);
+ return 1;
+
+}
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+static inline void
+tiqdio_inbound_checks(void)
+{
+ struct qdio_q *q;
+ int spare_ind_was_set=0;
+#ifdef QDIO_USE_PROCESSING_STATE
+ int q_laps=0;
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+ QDIO_DBF_TEXT4(0,trace,"iqdinbck");
+ QDIO_DBF_TEXT5(0,trace,"iqlocsum");
+
+#ifdef QDIO_USE_PROCESSING_STATE
+again:
+#endif /* QDIO_USE_PROCESSING_STATE */
+
+ /* when the spare indicator is used and set, save that and clear it */
+ if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
+ spare_ind_was_set = 1;
+ tiqdio_clear_summary_bit((__u32*)&spare_indicator);
+ }
+
+ q=(struct qdio_q*)tiq_list;
+ do {
+ if (!q)
+ break;
+ __tiqdio_inbound_processing(q, spare_ind_was_set);
+ q=(struct qdio_q*)q->list_next;
+ } while (q!=(struct qdio_q*)tiq_list);
+
+#ifdef QDIO_USE_PROCESSING_STATE
+ q=(struct qdio_q*)tiq_list;
+ do {
+ int ret;
+
+ ret = tiqdio_reset_processing_state(q, q_laps);
+ switch (ret) {
+ case 0:
+ return;
+ case 1:
+ q_laps++;
+ case 2:
+ q = (struct qdio_q*)q->list_next;
+ break;
+ default:
+ q_laps++;
+ goto again;
+ }
+ } while (q!=(struct qdio_q*)tiq_list);
+#endif /* QDIO_USE_PROCESSING_STATE */
+}
+
+static void
+tiqdio_tl(unsigned long data)
+{
+ QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ tiqdio_inbound_checks();
+}
+
+/********************* GENERAL HELPER_ROUTINES ***********************/
+
+static void
+qdio_release_irq_memory(struct qdio_irq *irq_ptr)
+{
+ int i;
+
+ for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
+ if (!irq_ptr->input_qs[i])
+ goto next;
+
+ if (irq_ptr->input_qs[i]->slib)
+ kfree(irq_ptr->input_qs[i]->slib);
+ kfree(irq_ptr->input_qs[i]);
+
+next:
+ if (!irq_ptr->output_qs[i])
+ continue;
+
+ if (irq_ptr->output_qs[i]->slib)
+ kfree(irq_ptr->output_qs[i]->slib);
+ kfree(irq_ptr->output_qs[i]);
+
+ }
+ kfree(irq_ptr->qdr);
+ kfree(irq_ptr);
+}
+
+static void
+qdio_set_impl_params(struct qdio_irq *irq_ptr,
+ unsigned int qib_param_field_format,
+ /* pointer to 128 bytes or NULL, if no param field */
+ unsigned char *qib_param_field,
+ /* pointer to no_queues*128 words of data or NULL */
+ unsigned int no_input_qs,
+ unsigned int no_output_qs,
+ unsigned long *input_slib_elements,
+ unsigned long *output_slib_elements)
+{
+ int i,j;
+
+ if (!irq_ptr)
+ return;
+
+ irq_ptr->qib.pfmt=qib_param_field_format;
+ if (qib_param_field)
+ memcpy(irq_ptr->qib.parm,qib_param_field,
+ QDIO_MAX_BUFFERS_PER_Q);
+
+ if (input_slib_elements)
+ for (i=0;i<no_input_qs;i++) {
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ irq_ptr->input_qs[i]->slib->slibe[j].parms=
+ input_slib_elements[
+ i*QDIO_MAX_BUFFERS_PER_Q+j];
+ }
+ if (output_slib_elements)
+ for (i=0;i<no_output_qs;i++) {
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ irq_ptr->output_qs[i]->slib->slibe[j].parms=
+ output_slib_elements[
+ i*QDIO_MAX_BUFFERS_PER_Q+j];
+ }
+}
+
+static int
+qdio_alloc_qs(struct qdio_irq *irq_ptr,
+ int no_input_qs, int no_output_qs)
+{
+ int i;
+ struct qdio_q *q;
+ int result=-ENOMEM;
+
+ for (i=0;i<no_input_qs;i++) {
+ q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
+
+ if (!q) {
+ QDIO_PRINT_ERR("kmalloc of q failed!\n");
+ goto out;
+ }
+
+ memset(q,0,sizeof(struct qdio_q));
+
+ q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
+ if (!q->slib) {
+ QDIO_PRINT_ERR("kmalloc of slib failed!\n");
+ goto out;
+ }
+
+ irq_ptr->input_qs[i]=q;
+ }
+
+ for (i=0;i<no_output_qs;i++) {
+ q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
+
+ if (!q) {
+ goto out;
+ }
+
+ memset(q,0,sizeof(struct qdio_q));
+
+ q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
+ if (!q->slib) {
+ QDIO_PRINT_ERR("kmalloc of slib failed!\n");
+ goto out;
+ }
+
+ irq_ptr->output_qs[i]=q;
+ }
+
+ result=0;
+out:
+ return result;
+}
+
+static void
+qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
+ int no_input_qs, int no_output_qs,
+ qdio_handler_t *input_handler,
+ qdio_handler_t *output_handler,
+ unsigned long int_parm,int q_format,
+ unsigned long flags,
+ void **inbound_sbals_array,
+ void **outbound_sbals_array)
+{
+ struct qdio_q *q;
+ int i,j;
+ char dbf_text[20]; /* see qdio_initialize */
+ void *ptr;
+ int available;
+
+ sprintf(dbf_text,"qfqs%4x",cdev->private->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ for (i=0;i<no_input_qs;i++) {
+ q=irq_ptr->input_qs[i];
+
+ memset(q,0,((char*)&q->slib)-((char*)q));
+ sprintf(dbf_text,"in-q%4x",i);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
+
+ memset(q->slib,0,PAGE_SIZE);
+ q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
+
+ available=0;
+
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ q->sbal[j]=*(inbound_sbals_array++);
+
+ q->queue_type=q_format;
+ q->int_parm=int_parm;
+ q->irq=irq_ptr->irq;
+ q->irq_ptr = irq_ptr;
+ q->cdev = cdev;
+ q->mask=1<<(31-i);
+ q->q_no=i;
+ q->is_input_q=1;
+ q->first_to_check=0;
+ q->last_move_ftc=0;
+ q->handler=input_handler;
+ q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
+
+ q->tasklet.data=(unsigned long)q;
+ /* q->is_thinint_q isn't valid at this time, but
+ * irq_ptr->is_thinint_irq is */
+ q->tasklet.func=(void(*)(unsigned long))
+ ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
+ &qdio_inbound_processing);
+
+ /* actually this is not used for inbound queues. yet. */
+ atomic_set(&q->busy_siga_counter,0);
+ q->timing.busy_start=0;
+
+/* for (j=0;j<QDIO_STATS_NUMBER;j++)
+ q->timing.last_transfer_times[j]=(qdio_get_micros()/
+ QDIO_STATS_NUMBER)*j;
+ q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
+*/
+
+ /* fill in slib */
+ if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
+ (unsigned long)(q->slib);
+ q->slib->sla=(unsigned long)(q->sl);
+ q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
+
+ /* fill in sl */
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
+
+ QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
+ ptr=(void*)q->sl;
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+ ptr=(void*)&q->slsb;
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+ ptr=(void*)q->sbal[0];
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+
+ /* fill in slsb */
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
+ set_slsb(&q->slsb.acc.val[j],
+ SLSB_P_INPUT_NOT_INIT);
+/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
+ }
+ }
+
+ for (i=0;i<no_output_qs;i++) {
+ q=irq_ptr->output_qs[i];
+ memset(q,0,((char*)&q->slib)-((char*)q));
+
+ sprintf(dbf_text,"outq%4x",i);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
+
+ memset(q->slib,0,PAGE_SIZE);
+ q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
+
+ available=0;
+
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ q->sbal[j]=*(outbound_sbals_array++);
+
+ q->queue_type=q_format;
+ q->int_parm=int_parm;
+ q->is_input_q=0;
+ q->irq=irq_ptr->irq;
+ q->cdev = cdev;
+ q->irq_ptr = irq_ptr;
+ q->mask=1<<(31-i);
+ q->q_no=i;
+ q->first_to_check=0;
+ q->last_move_ftc=0;
+ q->handler=output_handler;
+
+ q->tasklet.data=(unsigned long)q;
+ q->tasklet.func=(void(*)(unsigned long))
+ &qdio_outbound_processing;
+
+ atomic_set(&q->busy_siga_counter,0);
+ q->timing.busy_start=0;
+
+ /* fill in slib */
+ if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
+ (unsigned long)(q->slib);
+ q->slib->sla=(unsigned long)(q->sl);
+ q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
+
+ /* fill in sl */
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
+ q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
+
+ QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
+ ptr=(void*)q->sl;
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+ ptr=(void*)&q->slsb;
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+ ptr=(void*)q->sbal[0];
+ QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
+
+ /* fill in slsb */
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
+ set_slsb(&q->slsb.acc.val[j],
+ SLSB_P_OUTPUT_NOT_INIT);
+/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
+ }
+ }
+}
+
+static void
+qdio_fill_thresholds(struct qdio_irq *irq_ptr,
+ unsigned int no_input_qs,
+ unsigned int no_output_qs,
+ unsigned int min_input_threshold,
+ unsigned int max_input_threshold,
+ unsigned int min_output_threshold,
+ unsigned int max_output_threshold)
+{
+ int i;
+ struct qdio_q *q;
+
+ for (i=0;i<no_input_qs;i++) {
+ q=irq_ptr->input_qs[i];
+ q->timing.threshold=max_input_threshold;
+/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
+ q->threshold_classes[j].threshold=
+ min_input_threshold+
+ (max_input_threshold-min_input_threshold)/
+ QDIO_STATS_CLASSES;
+ }
+ qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
+ }
+ for (i=0;i<no_output_qs;i++) {
+ q=irq_ptr->output_qs[i];
+ q->timing.threshold=max_output_threshold;
+/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
+ q->threshold_classes[j].threshold=
+ min_output_threshold+
+ (max_output_threshold-min_output_threshold)/
+ QDIO_STATS_CLASSES;
+ }
+ qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
+ }
+}
+
+static int
+tiqdio_thinint_handler(void)
+{
+ QDIO_DBF_TEXT4(0,trace,"thin_int");
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.thinints++;
+ perf_stats.start_time_inbound=NOW;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ /* SVS only when needed:
+ * issue SVS to benefit from iqdio interrupt avoidance
+ * (SVS clears AISOI)*/
+ if (!omit_svs)
+ tiqdio_clear_global_summary();
+
+ tiqdio_inbound_checks();
+ return 0;
+}
+
+static void
+qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
+{
+ int i;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT5(0,trace,"newstate");
+ sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
+ QDIO_DBF_TEXT5(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ irq_ptr->state=state;
+ for (i=0;i<irq_ptr->no_input_qs;i++)
+ irq_ptr->input_qs[i]->state=state;
+ for (i=0;i<irq_ptr->no_output_qs;i++)
+ irq_ptr->output_qs[i]->state=state;
+ mb();
+}
+
+static inline void
+qdio_irq_check_sense(int irq, struct irb *irb)
+{
+ char dbf_text[15];
+
+ if (irb->esw.esw0.erw.cons) {
+ sprintf(dbf_text,"sens%4x",irq);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
+
+ QDIO_PRINT_WARN("sense data available on qdio channel.\n");
+ HEXDUMP16(WARN,"irb: ",irb);
+ HEXDUMP16(WARN,"sense data: ",irb->ecw);
+ }
+
+}
+
+static inline void
+qdio_handle_pci(struct qdio_irq *irq_ptr)
+{
+ int i;
+ struct qdio_q *q;
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.pcis++;
+ perf_stats.start_time_inbound=NOW;
+#endif /* QDIO_PERFORMANCE_STATS */
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+ q=irq_ptr->input_qs[i];
+ if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
+ qdio_mark_q(q);
+ else {
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs--;
+#endif /* QDIO_PERFORMANCE_STATS */
+ __qdio_inbound_processing(q);
+ }
+ }
+ if (!irq_ptr->hydra_gives_outbound_pcis)
+ return;
+ for (i=0;i<irq_ptr->no_output_qs;i++) {
+ q=irq_ptr->output_qs[i];
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs--;
+#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_is_outbound_q_done(q))
+ continue;
+ if (!irq_ptr->sync_done_on_outb_pcis)
+ SYNC_MEMORY;
+ __qdio_outbound_processing(q);
+ }
+}
+
+static void qdio_establish_handle_irq(struct ccw_device*, int, int);
+
+static inline void
+qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
+ int cstat, int dstat)
+{
+ struct qdio_irq *irq_ptr;
+ struct qdio_q *q;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+
+ QDIO_DBF_TEXT2(1, trace, "ick2");
+ sprintf(dbf_text,"%s", cdev->dev.bus_id);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
+ QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
+ QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
+ QDIO_PRINT_ERR("received check condition on activate " \
+ "queues on device %s (cs=x%x, ds=x%x).\n",
+ cdev->dev.bus_id, cstat, dstat);
+ if (irq_ptr->no_input_qs) {
+ q=irq_ptr->input_qs[0];
+ } else if (irq_ptr->no_output_qs) {
+ q=irq_ptr->output_qs[0];
+ } else {
+ QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
+ cdev->dev.bus_id);
+ goto omit_handler_call;
+ }
+ q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
+ QDIO_STATUS_LOOK_FOR_ERROR,
+ 0,0,0,-1,-1,q->int_parm);
+omit_handler_call:
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
+
+}
+
+static void
+qdio_call_shutdown(void *data)
+{
+ struct ccw_device *cdev;
+
+ cdev = (struct ccw_device *)data;
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ put_device(&cdev->dev);
+}
+
+static void
+qdio_timeout_handler(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT2(0, trace, "qtoh");
+ sprintf(dbf_text, "%s", cdev->dev.bus_id);
+ QDIO_DBF_TEXT2(0, trace, dbf_text);
+
+ irq_ptr = cdev->private->qdio_data;
+ sprintf(dbf_text, "state:%d", irq_ptr->state);
+ QDIO_DBF_TEXT2(0, trace, dbf_text);
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
+ irq_ptr->irq);
+ QDIO_DBF_TEXT2(1,setup,"eq:timeo");
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ break;
+ case QDIO_IRQ_STATE_CLEANUP:
+ QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
+ irq_ptr->irq);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ break;
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ /* I/O has been terminated by common I/O layer. */
+ QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
+ irq_ptr->irq);
+ QDIO_DBF_TEXT2(1, trace, "cio:term");
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ if (get_device(&cdev->dev)) {
+ /* Can't call shutdown from interrupt context. */
+ PREPARE_WORK(&cdev->private->kick_work,
+ qdio_call_shutdown, (void *)cdev);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ break;
+ default:
+ BUG();
+ }
+ ccw_device_set_timeout(cdev, 0);
+ wake_up(&cdev->private->wait_q);
+}
+
+static void
+qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct qdio_irq *irq_ptr;
+ int cstat,dstat;
+ char dbf_text[15];
+
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0, trace, "qint");
+ sprintf(dbf_text, "%s", cdev->dev.bus_id);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (!intparm) {
+ QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
+ "handler, device %s\n", cdev->dev.bus_id);
+ return;
+ }
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr) {
+ QDIO_DBF_TEXT2(1, trace, "uint");
+ sprintf(dbf_text,"%s", cdev->dev.bus_id);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
+ cdev->dev.bus_id);
+ return;
+ }
+
+ if (IS_ERR(irb)) {
+ /* Currently running i/o is in error. */
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ QDIO_PRINT_ERR("i/o error on device %s\n",
+ cdev->dev.bus_id);
+ return;
+ case -ETIMEDOUT:
+ qdio_timeout_handler(cdev);
+ return;
+ default:
+ QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
+ PTR_ERR(irb), cdev->dev.bus_id);
+ return;
+ }
+ }
+
+ qdio_irq_check_sense(irq_ptr->irq, irb);
+
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text, "state:%d", irq_ptr->state);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ cstat = irb->scsw.cstat;
+ dstat = irb->scsw.dstat;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ qdio_establish_handle_irq(cdev, cstat, dstat);
+ break;
+
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ break;
+
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ if (cstat & SCHN_STAT_PCI) {
+ qdio_handle_pci(irq_ptr);
+ break;
+ }
+
+ if ((cstat&~SCHN_STAT_PCI)||dstat) {
+ qdio_handle_activate_check(cdev, intparm, cstat, dstat);
+ break;
+ }
+ default:
+ QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
+ "device %s?!\n",
+ irq_ptr->state, cdev->dev.bus_id);
+ }
+ wake_up(&cdev->private->wait_q);
+
+}
+
+int
+qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
+ unsigned int queue_number)
+{
+ int cc;
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr;
+ void *ptr;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15]="SyncXXXX";
+#endif
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+#ifdef CONFIG_QDIO_DEBUG
+ *((int*)(&dbf_text[4])) = irq_ptr->irq;
+ QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
+ *((int*)(&dbf_text[0]))=flags;
+ *((int*)(&dbf_text[4]))=queue_number;
+ QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (flags&QDIO_FLAG_SYNC_INPUT) {
+ q=irq_ptr->input_qs[queue_number];
+ if (!q)
+ return -EINVAL;
+ cc = do_siga_sync(q->irq, 0, q->mask);
+ } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
+ q=irq_ptr->output_qs[queue_number];
+ if (!q)
+ return -EINVAL;
+ cc = do_siga_sync(q->irq, q->mask, 0);
+ } else
+ return -EINVAL;
+
+ ptr=&cc;
+ if (cc)
+ QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
+
+ return cc;
+}
+
+static unsigned char
+qdio_check_siga_needs(int sch)
+{
+ int result;
+ unsigned char qdioac;
+
+ struct {
+ struct chsc_header request;
+ u16 reserved1;
+ u16 first_sch;
+ u16 reserved2;
+ u16 last_sch;
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u8 flags;
+ u8 reserved5;
+ u16 sch;
+ u8 qfmt;
+ u8 reserved6;
+ u8 qdioac;
+ u8 sch_class;
+ u8 reserved7;
+ u8 icnt;
+ u8 reserved8;
+ u8 ocnt;
+ } *ssqd_area;
+
+ ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!ssqd_area) {
+ QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
+ "SIGAs for sch x%x.\n", sch);
+ return CHSC_FLAG_SIGA_INPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ }
+ ssqd_area->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x0024,
+ };
+
+ ssqd_area->first_sch = sch;
+ ssqd_area->last_sch = sch;
+
+ result=chsc(ssqd_area);
+
+ if (result) {
+ QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
+ "SIGAs for sch x%x.\n",
+ result,sch);
+ qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ goto out;
+ }
+
+ if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
+ QDIO_PRINT_WARN("response upon checking SIGA needs " \
+ "is 0x%x. Using all SIGAs for sch x%x.\n",
+ ssqd_area->response.code, sch);
+ qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ goto out;
+ }
+ if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+ !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
+ (ssqd_area->sch != sch)) {
+ QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
+ "using all SIGAs.\n",sch);
+ qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
+ goto out;
+ }
+
+ qdioac = ssqd_area->qdioac;
+out:
+ free_page ((unsigned long) ssqd_area);
+ return qdioac;
+}
+
+static unsigned int
+tiqdio_check_chsc_availability(void)
+{
+ char dbf_text[15];
+
+ if (!css_characteristics_avail)
+ return -EIO;
+
+ /* Check for bit 41. */
+ if (!css_general_characteristics.aif) {
+ QDIO_PRINT_WARN("Adapter interruption facility not " \
+ "installed.\n");
+ return -ENOENT;
+ }
+
+ /* Check for bits 107 and 108. */
+ if (!css_chsc_characteristics.scssc ||
+ !css_chsc_characteristics.scsscf) {
+ QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
+ "not available.\n");
+ return -ENOENT;
+ }
+
+ /* Check for OSA/FCP thin interrupts (bit 67). */
+ hydra_thinints = css_general_characteristics.aif_osa;
+ sprintf(dbf_text,"hydrati%1x", hydra_thinints);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
+ /* Check for aif time delay disablement fac (bit 56). If installed,
+ * omit svs even under lpar (good point by rick again) */
+ omit_svs = css_general_characteristics.aif_tdd;
+ sprintf(dbf_text,"omitsvs%1x", omit_svs);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ return 0;
+}
+
+
+static unsigned int
+tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
+{
+ unsigned long real_addr_local_summary_bit;
+ unsigned long real_addr_dev_st_chg_ind;
+ void *ptr;
+ char dbf_text[15];
+
+ unsigned int resp_code;
+ int result;
+
+ struct {
+ struct chsc_header request;
+ u16 operation_code;
+ u16 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32 reserved4:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ /* set to 0x10000000 to enable
+ * time delay disablement facility */
+ u32 reserved5;
+ u32 subsystem_id;
+ u32 reserved6[1004];
+ struct chsc_header response;
+ u32 reserved7;
+ } *scssc_area;
+
+ if (!irq_ptr->is_thinint_irq)
+ return -ENODEV;
+
+ if (reset_to_zero) {
+ real_addr_local_summary_bit=0;
+ real_addr_dev_st_chg_ind=0;
+ } else {
+ real_addr_local_summary_bit=
+ virt_to_phys((volatile void *)indicators);
+ real_addr_dev_st_chg_ind=
+ virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
+ }
+
+ scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scssc_area) {
+ QDIO_PRINT_WARN("No memory for setting indicators on " \
+ "subchannel x%x.\n", irq_ptr->irq);
+ return -ENOMEM;
+ }
+ scssc_area->request = (struct chsc_header) {
+ .length = 0x0fe0,
+ .code = 0x0021,
+ };
+ scssc_area->operation_code = 0;
+
+ scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
+ scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
+ scssc_area->ks = QDIO_STORAGE_KEY;
+ scssc_area->kc = QDIO_STORAGE_KEY;
+ scssc_area->isc = TIQDIO_THININT_ISC;
+ scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
+ /* enables the time delay disablement facility. Don't care
+ * whether it is really there (i.e. we haven't checked for
+ * it) */
+ if (css_general_characteristics.aif_tdd)
+ scssc_area->word_with_d_bit = 0x10000000;
+ else
+ QDIO_PRINT_WARN("Time delay disablement facility " \
+ "not available\n");
+
+
+
+ result = chsc(scssc_area);
+ if (result) {
+ QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
+ "cc=%i.\n",irq_ptr->irq,result);
+ result = -EIO;
+ goto out;
+ }
+
+ resp_code = scssc_area->response.code;
+ if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
+ QDIO_PRINT_WARN("response upon setting indicators " \
+ "is 0x%x.\n",resp_code);
+ sprintf(dbf_text,"sidR%4x",resp_code);
+ QDIO_DBF_TEXT1(0,trace,dbf_text);
+ QDIO_DBF_TEXT1(0,setup,dbf_text);
+ ptr=&scssc_area->response;
+ QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
+ result = -EIO;
+ goto out;
+ }
+
+ QDIO_DBF_TEXT2(0,setup,"setscind");
+ QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
+ sizeof(unsigned long));
+ QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
+ result = 0;
+out:
+ free_page ((unsigned long) scssc_area);
+ return result;
+
+}
+
+static unsigned int
+tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
+{
+ unsigned int resp_code;
+ int result;
+ void *ptr;
+ char dbf_text[15];
+
+ struct {
+ struct chsc_header request;
+ u16 operation_code;
+ u16 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4[2];
+ u32 delay_target;
+ u32 reserved5[1009];
+ struct chsc_header response;
+ u32 reserved6;
+ } *scsscf_area;
+
+ if (!irq_ptr->is_thinint_irq)
+ return -ENODEV;
+
+ scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scsscf_area) {
+ QDIO_PRINT_WARN("No memory for setting delay target on " \
+ "subchannel x%x.\n", irq_ptr->irq);
+ return -ENOMEM;
+ }
+ scsscf_area->request = (struct chsc_header) {
+ .length = 0x0fe0,
+ .code = 0x1027,
+ };
+
+ scsscf_area->delay_target = delay_target<<16;
+
+ result=chsc(scsscf_area);
+ if (result) {
+ QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
+ "cc=%i. Continuing.\n",irq_ptr->irq,result);
+ result = -EIO;
+ goto out;
+ }
+
+ resp_code = scsscf_area->response.code;
+ if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
+ QDIO_PRINT_WARN("response upon setting delay target " \
+ "is 0x%x. Continuing.\n",resp_code);
+ sprintf(dbf_text,"sdtR%4x",resp_code);
+ QDIO_DBF_TEXT1(0,trace,dbf_text);
+ QDIO_DBF_TEXT1(0,setup,dbf_text);
+ ptr=&scsscf_area->response;
+ QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
+ }
+ QDIO_DBF_TEXT2(0,trace,"delytrgt");
+ QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
+ result = 0; /* not critical */
+out:
+ free_page ((unsigned long) scsscf_area);
+ return result;
+}
+
+int
+qdio_cleanup(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+ int rc;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ sprintf(dbf_text,"qcln%4x",irq_ptr->irq);
+ QDIO_DBF_TEXT1(0,trace,dbf_text);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
+ rc = qdio_shutdown(cdev, how);
+ if ((rc == 0) || (rc == -EINPROGRESS))
+ rc = qdio_free(cdev);
+ return rc;
+}
+
+int
+qdio_shutdown(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr;
+ int i;
+ int result = 0;
+ int rc;
+ unsigned long flags;
+ int timeout;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ down(&irq_ptr->setting_up_sema);
+
+ sprintf(dbf_text,"qsqs%4x",irq_ptr->irq);
+ QDIO_DBF_TEXT1(0,trace,dbf_text);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
+ /* mark all qs as uninteresting */
+ for (i=0;i<irq_ptr->no_input_qs;i++)
+ atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
+
+ for (i=0;i<irq_ptr->no_output_qs;i++)
+ atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
+
+ tasklet_kill(&tiqdio_tasklet);
+
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+ qdio_unmark_q(irq_ptr->input_qs[i]);
+ tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ !atomic_read(&irq_ptr->
+ input_qs[i]->
+ use_count),
+ QDIO_NO_USE_COUNT_TIMEOUT);
+ if (atomic_read(&irq_ptr->input_qs[i]->use_count))
+ result=-EINPROGRESS;
+ }
+
+ for (i=0;i<irq_ptr->no_output_qs;i++) {
+ tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ !atomic_read(&irq_ptr->
+ output_qs[i]->
+ use_count),
+ QDIO_NO_USE_COUNT_TIMEOUT);
+ if (atomic_read(&irq_ptr->output_qs[i]->use_count))
+ result=-EINPROGRESS;
+ }
+
+ /* cleanup subchannel */
+ spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
+ if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
+ } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ timeout=QDIO_CLEANUP_HALT_TIMEOUT;
+ } else { /* default behaviour */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ timeout=QDIO_CLEANUP_HALT_TIMEOUT;
+ }
+ if (rc == -ENODEV) {
+ /* No need to wait for device no longer present. */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
+ /*
+ * Whoever put another handler there, has to cope with the
+ * interrupt theirself. Might happen if qdio_shutdown was
+ * called on already shutdown queues, but this shouldn't have
+ * bad side effects.
+ */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ } else if (rc == 0) {
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
+ ccw_device_set_timeout(cdev, timeout);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
+
+ wait_event(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR);
+ } else {
+ QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
+ "device %s\n", result, cdev->dev.bus_id);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ result = rc;
+ goto out;
+ }
+ if (irq_ptr->is_thinint_irq) {
+ qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
+ tiqdio_set_subchannel_ind(irq_ptr,1);
+ /* reset adapter interrupt indicators */
+ }
+
+ /* exchange int handlers, if necessary */
+ if ((void*)cdev->handler == (void*)qdio_handler)
+ cdev->handler=irq_ptr->original_int_handler;
+
+ /* Ignore errors. */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ ccw_device_set_timeout(cdev, 0);
+out:
+ up(&irq_ptr->setting_up_sema);
+ return result;
+}
+
+int
+qdio_free(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ down(&irq_ptr->setting_up_sema);
+
+ sprintf(dbf_text,"qfqs%4x",irq_ptr->irq);
+ QDIO_DBF_TEXT1(0,trace,dbf_text);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
+ cdev->private->qdio_data = 0;
+
+ up(&irq_ptr->setting_up_sema);
+
+ qdio_release_irq_memory(irq_ptr);
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static inline void
+qdio_allocate_do_dbf(struct qdio_initialize *init_data)
+{
+ char dbf_text[20]; /* if a printf printed out more than 8 chars */
+
+ sprintf(dbf_text,"qfmt:%x",init_data->q_format);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
+ sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
+ QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
+ QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
+ sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
+ QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
+ QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
+ QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
+ QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
+ QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
+}
+
+static inline void
+qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
+{
+ irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
+ irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
+
+ irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
+
+ irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
+
+ irq_ptr->qdr->qdf0[i].slsba=
+ (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
+
+ irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
+}
+
+static inline void
+qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
+ int j, int iqfmt)
+{
+ irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
+ irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
+
+ irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
+
+ irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
+
+ irq_ptr->qdr->qdf0[i+j].slsba=
+ (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
+
+ irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
+ irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
+}
+
+
+static inline void
+qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
+{
+ int i;
+
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+ irq_ptr->input_qs[i]->siga_sync=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
+ irq_ptr->input_qs[i]->siga_in=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
+ irq_ptr->input_qs[i]->siga_out=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
+ irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
+ irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
+ irq_ptr->hydra_gives_outbound_pcis;
+ irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
+ ((irq_ptr->qdioac&
+ (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
+ CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
+ (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
+ CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
+
+ }
+}
+
+static inline void
+qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
+{
+ int i;
+
+ for (i=0;i<irq_ptr->no_output_qs;i++) {
+ irq_ptr->output_qs[i]->siga_sync=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
+ irq_ptr->output_qs[i]->siga_in=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
+ irq_ptr->output_qs[i]->siga_out=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
+ irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
+ irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
+ irq_ptr->hydra_gives_outbound_pcis;
+ irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
+ ((irq_ptr->qdioac&
+ (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
+ CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
+ (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
+ CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
+
+ }
+}
+
+static inline int
+qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
+ int dstat)
+{
+ char dbf_text[15];
+ struct qdio_irq *irq_ptr;
+
+ irq_ptr = cdev->private->qdio_data;
+
+ if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
+ sprintf(dbf_text,"ick1%4x",irq_ptr->irq);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
+ QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
+ QDIO_PRINT_ERR("received check condition on establish " \
+ "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
+ irq_ptr->irq,cstat,dstat);
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
+ }
+
+ if (!(dstat & DEV_STAT_DEV_END)) {
+ QDIO_DBF_TEXT2(1,setup,"eq:no de");
+ QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
+ QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
+ QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
+ "device end: dstat=%02x, cstat=%02x\n",
+ irq_ptr->irq, dstat, cstat);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return 1;
+ }
+
+ if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
+ QDIO_DBF_TEXT2(1,setup,"eq:badio");
+ QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
+ QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
+ QDIO_PRINT_ERR("establish queues on irq %04x: got "
+ "the following devstat: dstat=%02x, "
+ "cstat=%02x\n",
+ irq_ptr->irq, dstat, cstat);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return 1;
+ }
+ return 0;
+}
+
+static void
+qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+
+ sprintf(dbf_text,"qehi%4x",cdev->private->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_TEXT0(0,trace,dbf_text);
+
+ if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
+ ccw_device_set_timeout(cdev, 0);
+ return;
+ }
+
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
+ ccw_device_set_timeout(cdev, 0);
+}
+
+int
+qdio_initialize(struct qdio_initialize *init_data)
+{
+ int rc;
+ char dbf_text[15];
+
+ sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_TEXT0(0,trace,dbf_text);
+
+ rc = qdio_allocate(init_data);
+ if (rc == 0) {
+ rc = qdio_establish(init_data);
+ if (rc != 0)
+ qdio_free(init_data->cdev);
+ }
+
+ return rc;
+}
+
+
+int
+qdio_allocate(struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_TEXT0(0,trace,dbf_text);
+ if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
+ (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
+ ((init_data->no_input_qs) && (!init_data->input_handler)) ||
+ ((init_data->no_output_qs) && (!init_data->output_handler)) )
+ return -EINVAL;
+
+ if (!init_data->input_sbal_addr_array)
+ return -EINVAL;
+
+ if (!init_data->output_sbal_addr_array)
+ return -EINVAL;
+
+ qdio_allocate_do_dbf(init_data);
+
+ /* create irq */
+ irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA);
+
+ QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
+ QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
+
+ if (!irq_ptr) {
+ QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
+ return -ENOMEM;
+ }
+
+ memset(irq_ptr,0,sizeof(struct qdio_irq));
+
+ init_MUTEX(&irq_ptr->setting_up_sema);
+
+ /* QDR must be in DMA area since CCW data address is only 32 bit */
+ irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
+ if (!(irq_ptr->qdr)) {
+ kfree(irq_ptr);
+ QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
+ return -ENOMEM;
+ }
+ QDIO_DBF_TEXT0(0,setup,"qdr:");
+ QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
+
+ if (qdio_alloc_qs(irq_ptr,
+ init_data->no_input_qs,
+ init_data->no_output_qs)) {
+ qdio_release_irq_memory(irq_ptr);
+ return -ENOMEM;
+ }
+
+ init_data->cdev->private->qdio_data = irq_ptr;
+
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
+
+ return 0;
+}
+
+int qdio_fill_irq(struct qdio_initialize *init_data)
+{
+ int i;
+ char dbf_text[15];
+ struct ciw *ciw;
+ int is_iqdio;
+ struct qdio_irq *irq_ptr;
+
+ irq_ptr = init_data->cdev->private->qdio_data;
+
+ memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
+
+ /* wipes qib.ac, required by ar7063 */
+ memset(irq_ptr->qdr,0,sizeof(struct qdr));
+
+ irq_ptr->int_parm=init_data->int_parm;
+
+ irq_ptr->irq = init_data->cdev->private->irq;
+ irq_ptr->no_input_qs=init_data->no_input_qs;
+ irq_ptr->no_output_qs=init_data->no_output_qs;
+
+ if (init_data->q_format==QDIO_IQDIO_QFMT) {
+ irq_ptr->is_iqdio_irq=1;
+ irq_ptr->is_thinint_irq=1;
+ } else {
+ irq_ptr->is_iqdio_irq=0;
+ irq_ptr->is_thinint_irq=hydra_thinints;
+ }
+ sprintf(dbf_text,"is_i_t%1x%1x",
+ irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+
+ if (irq_ptr->is_thinint_irq) {
+ irq_ptr->dev_st_chg_ind=qdio_get_indicator();
+ QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
+ if (!irq_ptr->dev_st_chg_ind) {
+ QDIO_PRINT_WARN("no indicator location available " \
+ "for irq 0x%x\n",irq_ptr->irq);
+ qdio_release_irq_memory(irq_ptr);
+ return -ENOBUFS;
+ }
+ }
+
+ /* defaults */
+ irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
+ irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
+ irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
+ irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
+
+ qdio_fill_qs(irq_ptr, init_data->cdev,
+ init_data->no_input_qs,
+ init_data->no_output_qs,
+ init_data->input_handler,
+ init_data->output_handler,init_data->int_parm,
+ init_data->q_format,init_data->flags,
+ init_data->input_sbal_addr_array,
+ init_data->output_sbal_addr_array);
+
+ if (!try_module_get(THIS_MODULE)) {
+ QDIO_PRINT_CRIT("try_module_get() failed!\n");
+ qdio_release_irq_memory(irq_ptr);
+ return -EINVAL;
+ }
+
+ qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
+ init_data->no_output_qs,
+ init_data->min_input_threshold,
+ init_data->max_input_threshold,
+ init_data->min_output_threshold,
+ init_data->max_output_threshold);
+
+ /* fill in qdr */
+ irq_ptr->qdr->qfmt=init_data->q_format;
+ irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
+ irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
+ irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
+ irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
+
+ irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
+ irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
+
+ /* fill in qib */
+ irq_ptr->qib.qfmt=init_data->q_format;
+ if (init_data->no_input_qs)
+ irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
+ if (init_data->no_output_qs)
+ irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
+ memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
+
+ qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
+ init_data->qib_param_field,
+ init_data->no_input_qs,
+ init_data->no_output_qs,
+ init_data->input_slib_elements,
+ init_data->output_slib_elements);
+
+ /* first input descriptors, then output descriptors */
+ is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
+ for (i=0;i<init_data->no_input_qs;i++)
+ qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
+
+ for (i=0;i<init_data->no_output_qs;i++)
+ qdio_allocate_fill_output_desc(irq_ptr, i,
+ init_data->no_input_qs,
+ is_iqdio);
+
+ /* qdr, qib, sls, slsbs, slibs, sbales filled. */
+
+ /* get qdio commands */
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ if (!ciw) {
+ QDIO_DBF_TEXT2(1,setup,"no eq");
+ QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
+ "Trying to use default.\n");
+ } else
+ irq_ptr->equeue = *ciw;
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ if (!ciw) {
+ QDIO_DBF_TEXT2(1,setup,"no aq");
+ QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
+ "Trying to use default.\n");
+ } else
+ irq_ptr->aqueue = *ciw;
+
+ /* Set new interrupt handler. */
+ irq_ptr->original_int_handler = init_data->cdev->handler;
+ init_data->cdev->handler = qdio_handler;
+
+ return 0;
+}
+
+int
+qdio_establish(struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr;
+ unsigned long saveflags;
+ int result, result2;
+ struct ccw_device *cdev;
+ char dbf_text[20];
+
+ cdev=init_data->cdev;
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -EINVAL;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ down(&irq_ptr->setting_up_sema);
+
+ qdio_fill_irq(init_data);
+
+ /* the thinint CHSC stuff */
+ if (irq_ptr->is_thinint_irq) {
+
+ result = tiqdio_set_subchannel_ind(irq_ptr,0);
+ if (result) {
+ up(&irq_ptr->setting_up_sema);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return result;
+ }
+ tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
+ }
+
+ sprintf(dbf_text,"qest%4x",cdev->private->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_TEXT0(0,trace,dbf_text);
+
+ /* establish q */
+ irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
+ irq_ptr->ccw.flags=CCW_FLAG_SLI;
+ irq_ptr->ccw.count=irq_ptr->equeue.count;
+ irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
+
+ ccw_device_set_options(cdev, 0);
+ result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
+ QDIO_DOING_ESTABLISH,0, 0,
+ QDIO_ESTABLISH_TIMEOUT);
+ if (result) {
+ result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
+ QDIO_DOING_ESTABLISH,0,0,
+ QDIO_ESTABLISH_TIMEOUT);
+ sprintf(dbf_text,"eq:io%4x",result);
+ QDIO_DBF_TEXT2(1,setup,dbf_text);
+ if (result2) {
+ sprintf(dbf_text,"eq:io%4x",result);
+ QDIO_DBF_TEXT2(1,setup,dbf_text);
+ }
+ QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->irq,result,result2);
+ result=result2;
+ if (result)
+ ccw_device_set_timeout(cdev, 0);
+ }
+
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
+
+ if (result) {
+ up(&irq_ptr->setting_up_sema);
+ qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return result;
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR,
+ QDIO_ESTABLISH_TIMEOUT);
+
+ if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
+ result = 0;
+ else {
+ up(&irq_ptr->setting_up_sema);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return -EIO;
+ }
+
+ irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
+ /* if this gets set once, we're running under VM and can omit SVSes */
+ if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
+ omit_svs=1;
+
+ sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+
+ sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+
+ irq_ptr->hydra_gives_outbound_pcis=
+ irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
+ irq_ptr->sync_done_on_outb_pcis=
+ irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
+
+ qdio_initialize_set_siga_flags_input(irq_ptr);
+ qdio_initialize_set_siga_flags_output(irq_ptr);
+
+ up(&irq_ptr->setting_up_sema);
+
+ return result;
+
+}
+
+int
+qdio_activate(struct ccw_device *cdev, int flags)
+{
+ struct qdio_irq *irq_ptr;
+ int i,result=0,result2;
+ unsigned long saveflags;
+ char dbf_text[20]; /* see qdio_initialize */
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ down(&irq_ptr->setting_up_sema);
+ if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
+ result=-EBUSY;
+ goto out;
+ }
+
+ sprintf(dbf_text,"qact%4x", irq_ptr->irq);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+ QDIO_DBF_TEXT2(0,trace,dbf_text);
+
+ /* activate q */
+ irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
+ irq_ptr->ccw.flags=CCW_FLAG_SLI;
+ irq_ptr->ccw.count=irq_ptr->aqueue.count;
+ irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
+
+ ccw_device_set_timeout(cdev, 0);
+ ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+ result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
+ 0, DOIO_DENY_PREFETCH);
+ if (result) {
+ result2=ccw_device_start(cdev,&irq_ptr->ccw,
+ QDIO_DOING_ACTIVATE,0,0);
+ sprintf(dbf_text,"aq:io%4x",result);
+ QDIO_DBF_TEXT2(1,setup,dbf_text);
+ if (result2) {
+ sprintf(dbf_text,"aq:io%4x",result);
+ QDIO_DBF_TEXT2(1,setup,dbf_text);
+ }
+ QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->irq,result,result2);
+ result=result2;
+ }
+
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
+ if (result)
+ goto out;
+
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+ if (irq_ptr->is_thinint_irq) {
+ /*
+ * that way we know, that, if we will get interrupted
+ * by tiqdio_inbound_processing, qdio_unmark_q will
+ * not be called
+ */
+ qdio_reserve_q(irq_ptr->input_qs[i]);
+ qdio_mark_tiq(irq_ptr->input_qs[i]);
+ qdio_release_q(irq_ptr->input_qs[i]);
+ }
+ }
+
+ if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+ irq_ptr->input_qs[i]->is_input_q|=
+ QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
+ }
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ ((irq_ptr->state ==
+ QDIO_IRQ_STATE_STOPPED) ||
+ (irq_ptr->state ==
+ QDIO_IRQ_STATE_ERR)),
+ QDIO_ACTIVATE_TIMEOUT);
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_STOPPED:
+ case QDIO_IRQ_STATE_ERR:
+ up(&irq_ptr->setting_up_sema);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ down(&irq_ptr->setting_up_sema);
+ result = -EIO;
+ break;
+ default:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+ result = 0;
+ }
+ out:
+ up(&irq_ptr->setting_up_sema);
+
+ return result;
+}
+
+/* buffers filled forwards again to make Rick happy */
+static inline void
+qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
+ unsigned int count, struct qdio_buffer *buffers)
+{
+ for (;;) {
+ set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY);
+ count--;
+ if (!count) break;
+ qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ }
+
+ /* not necessary, as the queues are synced during the SIGA read */
+ /*SYNC_MEMORY;*/
+}
+
+static inline void
+qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
+ unsigned int count, struct qdio_buffer *buffers)
+{
+ for (;;) {
+ set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED);
+ count--;
+ if (!count) break;
+ qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ }
+
+ /* SIGA write will sync the queues */
+ /*SYNC_MEMORY;*/
+}
+
+static inline void
+do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
+ unsigned int qidx, unsigned int count,
+ struct qdio_buffer *buffers)
+{
+ int used_elements;
+
+ /* This is the inbound handling of queues */
+ used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
+
+ qdio_do_qdio_fill_input(q,qidx,count,buffers);
+
+ if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
+ (callflags&QDIO_FLAG_UNDER_INTERRUPT))
+ atomic_swap(&q->polling,0);
+
+ if (used_elements)
+ return;
+ if (callflags&QDIO_FLAG_DONT_SIGA)
+ return;
+ if (q->siga_in) {
+ int result;
+
+ result=qdio_siga_input(q);
+ if (result) {
+ if (q->siga_error)
+ q->error_status_flags|=
+ QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
+ q->siga_error=result;
+ }
+ }
+
+ qdio_mark_q(q);
+}
+
+static inline void
+do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
+ unsigned int qidx, unsigned int count,
+ struct qdio_buffer *buffers)
+{
+ int used_elements;
+
+ /* This is the outbound handling of queues */
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.start_time_outbound=NOW;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ qdio_do_qdio_fill_output(q,qidx,count,buffers);
+
+ used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
+
+ if (callflags&QDIO_FLAG_DONT_SIGA) {
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ return;
+ }
+ if (q->is_iqdio_q) {
+ /* one siga for every sbal */
+ while (count--)
+ qdio_kick_outbound_q(q);
+
+ __qdio_outbound_processing(q);
+ } else {
+ /* under VM, we do a SIGA sync unconditionally */
+ SYNC_MEMORY;
+ else {
+ /*
+ * w/o shadow queues (else branch of
+ * SYNC_MEMORY :-/ ), we try to
+ * fast-requeue buffers
+ */
+ if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
+ &(QDIO_MAX_BUFFERS_PER_Q-1)]!=
+ SLSB_CU_OUTPUT_PRIMED) {
+ qdio_kick_outbound_q(q);
+ } else {
+ QDIO_DBF_TEXT3(0,trace, "fast-req");
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.fast_reqs++;
+#endif /* QDIO_PERFORMANCE_STATS */
+ }
+ }
+ /*
+ * only marking the q could take too long,
+ * the upper layer module could do a lot of
+ * traffic in that time
+ */
+ __qdio_outbound_processing(q);
+ }
+
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+#endif /* QDIO_PERFORMANCE_STATS */
+}
+
+/* count must be 1 in iqdio */
+int
+do_QDIO(struct ccw_device *cdev,unsigned int callflags,
+ unsigned int queue_number, unsigned int qidx,
+ unsigned int count,struct qdio_buffer *buffers)
+{
+ struct qdio_irq *irq_ptr;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[20];
+
+ sprintf(dbf_text,"doQD%04x",cdev->private->irq);
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
+ (count>QDIO_MAX_BUFFERS_PER_Q) ||
+ (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
+ return -EINVAL;
+
+ if (count==0)
+ return 0;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+#ifdef CONFIG_QDIO_DEBUG
+ if (callflags&QDIO_FLAG_SYNC_INPUT)
+ QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
+ sizeof(void*));
+ else
+ QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
+ sizeof(void*));
+ sprintf(dbf_text,"flag%04x",callflags);
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+ sprintf(dbf_text,"qi%02xct%02x",qidx,count);
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
+ return -EBUSY;
+
+ if (callflags&QDIO_FLAG_SYNC_INPUT)
+ do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
+ callflags, qidx, count, buffers);
+ else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
+ do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
+ callflags, qidx, count, buffers);
+ else {
+ QDIO_DBF_TEXT3(1,trace,"doQD:inv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#ifdef QDIO_PERFORMANCE_STATS
+static int
+qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
+ int buffer_length, int *eof, void *data)
+{
+ int c=0;
+
+ /* we are always called with buffer_length=4k, so we all
+ deliver on the first read */
+ if (offset>0)
+ return 0;
+
+#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
+ _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
+ _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
+ _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
+ _OUTP_IT("Number of tasklet runs (total) : %u\n",
+ perf_stats.tl_runs);
+ _OUTP_IT("\n");
+ _OUTP_IT("Number of SIGA sync's issued : %u\n",
+ perf_stats.siga_syncs);
+ _OUTP_IT("Number of SIGA in's issued : %u\n",
+ perf_stats.siga_ins);
+ _OUTP_IT("Number of SIGA out's issued : %u\n",
+ perf_stats.siga_outs);
+ _OUTP_IT("Number of PCIs caught : %u\n",
+ perf_stats.pcis);
+ _OUTP_IT("Number of adapter interrupts caught : %u\n",
+ perf_stats.thinints);
+ _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
+ perf_stats.fast_reqs);
+ _OUTP_IT("\n");
+ _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
+ perf_stats.inbound_time);
+ _OUTP_IT("Number of inbound transfers : %u\n",
+ perf_stats.inbound_cnt);
+ _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
+ perf_stats.outbound_time);
+ _OUTP_IT("Number of do_QDIOs outbound : %u\n",
+ perf_stats.outbound_cnt);
+ _OUTP_IT("\n");
+
+ return c;
+}
+
+static struct proc_dir_entry *qdio_perf_proc_file;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+static void
+qdio_add_procfs_entry(void)
+{
+#ifdef QDIO_PERFORMANCE_STATS
+ proc_perf_file_registration=0;
+ qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
+ S_IFREG|0444,&proc_root);
+ if (qdio_perf_proc_file) {
+ qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
+ } else proc_perf_file_registration=-1;
+
+ if (proc_perf_file_registration)
+ QDIO_PRINT_WARN("was not able to register perf. " \
+ "proc-file (%i).\n",
+ proc_perf_file_registration);
+#endif /* QDIO_PERFORMANCE_STATS */
+}
+
+static void
+qdio_remove_procfs_entry(void)
+{
+#ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs=0;
+
+ if (!proc_perf_file_registration) /* means if it went ok earlier */
+ remove_proc_entry(QDIO_PERF,&proc_root);
+#endif /* QDIO_PERFORMANCE_STATS */
+}
+
+static void
+tiqdio_register_thinints(void)
+{
+ char dbf_text[20];
+ register_thinint_result=
+ s390_register_adapter_interrupt(&tiqdio_thinint_handler);
+ if (register_thinint_result) {
+ sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_PRINT_ERR("failed to register adapter handler " \
+ "(rc=%i).\nAdapter interrupts might " \
+ "not work. Continuing.\n",
+ register_thinint_result);
+ }
+}
+
+static void
+tiqdio_unregister_thinints(void)
+{
+ if (!register_thinint_result)
+ s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
+}
+
+static int
+qdio_get_qdio_memory(void)
+{
+ int i;
+ indicator_used[0]=1;
+
+ for (i=1;i<INDICATORS_PER_CACHELINE;i++)
+ indicator_used[i]=0;
+ indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
+ GFP_KERNEL);
+ if (!indicators) return -ENOMEM;
+ memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE));
+ return 0;
+}
+
+static void
+qdio_release_qdio_memory(void)
+{
+ if (indicators)
+ kfree(indicators);
+}
+
+static void
+qdio_unregister_dbf_views(void)
+{
+ if (qdio_dbf_setup)
+ debug_unregister(qdio_dbf_setup);
+ if (qdio_dbf_sbal)
+ debug_unregister(qdio_dbf_sbal);
+ if (qdio_dbf_sense)
+ debug_unregister(qdio_dbf_sense);
+ if (qdio_dbf_trace)
+ debug_unregister(qdio_dbf_trace);
+#ifdef CONFIG_QDIO_DEBUG
+ if (qdio_dbf_slsb_out)
+ debug_unregister(qdio_dbf_slsb_out);
+ if (qdio_dbf_slsb_in)
+ debug_unregister(qdio_dbf_slsb_in);
+#endif /* CONFIG_QDIO_DEBUG */
+}
+
+static int
+qdio_register_dbf_views(void)
+{
+ qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
+ QDIO_DBF_SETUP_INDEX,
+ QDIO_DBF_SETUP_NR_AREAS,
+ QDIO_DBF_SETUP_LEN);
+ if (!qdio_dbf_setup)
+ goto oom;
+ debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
+
+ qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
+ QDIO_DBF_SBAL_INDEX,
+ QDIO_DBF_SBAL_NR_AREAS,
+ QDIO_DBF_SBAL_LEN);
+ if (!qdio_dbf_sbal)
+ goto oom;
+
+ debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
+
+ qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
+ QDIO_DBF_SENSE_INDEX,
+ QDIO_DBF_SENSE_NR_AREAS,
+ QDIO_DBF_SENSE_LEN);
+ if (!qdio_dbf_sense)
+ goto oom;
+
+ debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
+
+ qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
+ QDIO_DBF_TRACE_INDEX,
+ QDIO_DBF_TRACE_NR_AREAS,
+ QDIO_DBF_TRACE_LEN);
+ if (!qdio_dbf_trace)
+ goto oom;
+
+ debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
+
+#ifdef CONFIG_QDIO_DEBUG
+ qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
+ QDIO_DBF_SLSB_OUT_INDEX,
+ QDIO_DBF_SLSB_OUT_NR_AREAS,
+ QDIO_DBF_SLSB_OUT_LEN);
+ if (!qdio_dbf_slsb_out)
+ goto oom;
+ debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
+
+ qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
+ QDIO_DBF_SLSB_IN_INDEX,
+ QDIO_DBF_SLSB_IN_NR_AREAS,
+ QDIO_DBF_SLSB_IN_LEN);
+ if (!qdio_dbf_slsb_in)
+ goto oom;
+ debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
+#endif /* CONFIG_QDIO_DEBUG */
+ return 0;
+oom:
+ QDIO_PRINT_ERR("not enough memory for dbf.\n");
+ qdio_unregister_dbf_views();
+ return -ENOMEM;
+}
+
+static int __init
+init_QDIO(void)
+{
+ int res;
+#ifdef QDIO_PERFORMANCE_STATS
+ void *ptr;
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ printk("qdio: loading %s\n",version);
+
+ res=qdio_get_qdio_memory();
+ if (res)
+ return res;
+
+ res = qdio_register_dbf_views();
+ if (res)
+ return res;
+
+ QDIO_DBF_TEXT0(0,setup,"initQDIO");
+
+#ifdef QDIO_PERFORMANCE_STATS
+ memset((void*)&perf_stats,0,sizeof(perf_stats));
+ QDIO_DBF_TEXT0(0,setup,"perfstat");
+ ptr=&perf_stats;
+ QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
+#endif /* QDIO_PERFORMANCE_STATS */
+
+ qdio_add_procfs_entry();
+
+ if (tiqdio_check_chsc_availability())
+ QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
+
+ tiqdio_register_thinints();
+
+ return 0;
+ }
+
+static void __exit
+cleanup_QDIO(void)
+{
+ tiqdio_unregister_thinints();
+ qdio_remove_procfs_entry();
+ qdio_release_qdio_memory();
+ qdio_unregister_dbf_views();
+
+ printk("qdio: %s: module removed\n",version);
+}
+
+module_init(init_QDIO);
+module_exit(cleanup_QDIO);
+
+EXPORT_SYMBOL(qdio_allocate);
+EXPORT_SYMBOL(qdio_establish);
+EXPORT_SYMBOL(qdio_initialize);
+EXPORT_SYMBOL(qdio_activate);
+EXPORT_SYMBOL(do_QDIO);
+EXPORT_SYMBOL(qdio_shutdown);
+EXPORT_SYMBOL(qdio_free);
+EXPORT_SYMBOL(qdio_cleanup);
+EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
new file mode 100644
index 000000000000..9ad14db24143
--- /dev/null
+++ b/drivers/s390/cio/qdio.h
@@ -0,0 +1,648 @@
+#ifndef _CIO_QDIO_H
+#define _CIO_QDIO_H
+
+#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
+
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_VERBOSE_LEVEL 9
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_VERBOSE_LEVEL 5
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_USE_PROCESSING_STATE
+
+#ifdef CONFIG_QDIO_PERF_STATS
+#define QDIO_PERFORMANCE_STATS
+#endif /* CONFIG_QDIO_PERF_STATS */
+
+#define QDIO_MINIMAL_BH_RELIEF_TIME 16
+#define QDIO_TIMER_POLL_VALUE 1
+#define IQDIO_TIMER_POLL_VALUE 1
+
+/*
+ * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
+ * we never know, whether we'll get initiative again, e.g. to give the
+ * transmit skb's back to the stack, however the stack may be waiting for
+ * them... therefore we define 4 as threshold to start polling (which
+ * will stop as soon as the asynchronous queue catches up)
+ * btw, this only applies to the asynchronous HiperSockets queue
+ */
+#define IQDIO_FILL_LEVEL_TO_POLL 4
+
+#define TIQDIO_THININT_ISC 3
+#define TIQDIO_DELAY_TARGET 0
+#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
+#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
+#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
+#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
+#define IQDIO_LOCAL_LAPS 4
+#define IQDIO_LOCAL_LAPS_INT 1
+#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
+/*#define IQDIO_IQDC_INT_PARM 0x1234*/
+
+#define QDIO_Q_LAPS 5
+
+#define QDIO_STORAGE_KEY 0
+
+#define L2_CACHELINE_SIZE 256
+#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
+
+#define QDIO_PERF "qdio_perf"
+
+/* must be a power of 2 */
+/*#define QDIO_STATS_NUMBER 4
+
+#define QDIO_STATS_CLASSES 2
+#define QDIO_STATS_COUNT_NEEDED 2*/
+
+#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
+ exiting without having use_count
+ of the queue to 0 */
+
+#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
+#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10)
+#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
+#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
+
+enum qdio_irq_states {
+ QDIO_IRQ_STATE_INACTIVE,
+ QDIO_IRQ_STATE_ESTABLISHED,
+ QDIO_IRQ_STATE_ACTIVE,
+ QDIO_IRQ_STATE_STOPPED,
+ QDIO_IRQ_STATE_CLEANUP,
+ QDIO_IRQ_STATE_ERR,
+ NR_QDIO_IRQ_STATES,
+};
+
+/* used as intparm in do_IO: */
+#define QDIO_DOING_SENSEID 0
+#define QDIO_DOING_ESTABLISH 1
+#define QDIO_DOING_ACTIVATE 2
+#define QDIO_DOING_CLEANUP 3
+
+/************************* DEBUG FACILITY STUFF *********************/
+
+#define QDIO_DBF_HEX(ex,name,level,addr,len) \
+ do { \
+ if (ex) \
+ debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
+ else \
+ debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+#define QDIO_DBF_TEXT(ex,name,level,text) \
+ do { \
+ if (ex) \
+ debug_text_exception(qdio_dbf_##name,level,text); \
+ else \
+ debug_text_event(qdio_dbf_##name,level,text); \
+ } while (0)
+
+
+#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
+#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
+#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
+#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
+#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
+#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
+#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
+#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
+#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
+#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
+#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
+#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
+#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
+#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
+#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
+#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
+#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_SETUP_NAME "qdio_setup"
+#define QDIO_DBF_SETUP_LEN 8
+#define QDIO_DBF_SETUP_INDEX 2
+#define QDIO_DBF_SETUP_NR_AREAS 1
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_SETUP_LEVEL 6
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_SETUP_LEVEL 2
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
+#define QDIO_DBF_SBAL_LEN 256
+#define QDIO_DBF_SBAL_INDEX 2
+#define QDIO_DBF_SBAL_NR_AREAS 2
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_SBAL_LEVEL 6
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_SBAL_LEVEL 2
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_TRACE_NAME "qdio_trace"
+#define QDIO_DBF_TRACE_LEN 8
+#define QDIO_DBF_TRACE_NR_AREAS 2
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_TRACE_INDEX 4
+#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_TRACE_INDEX 2
+#define QDIO_DBF_TRACE_LEVEL 2
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_SENSE_NAME "qdio_sense"
+#define QDIO_DBF_SENSE_LEN 64
+#define QDIO_DBF_SENSE_INDEX 1
+#define QDIO_DBF_SENSE_NR_AREAS 1
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_SENSE_LEVEL 6
+#else /* CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_SENSE_LEVEL 2
+#endif /* CONFIG_QDIO_DEBUG */
+
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
+
+#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
+#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
+#define QDIO_DBF_SLSB_OUT_INDEX 8
+#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
+#define QDIO_DBF_SLSB_OUT_LEVEL 6
+
+#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
+#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
+#define QDIO_DBF_SLSB_IN_INDEX 8
+#define QDIO_DBF_SLSB_IN_NR_AREAS 1
+#define QDIO_DBF_SLSB_IN_LEVEL 6
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_PRINTK_HEADER QDIO_NAME ": "
+
+#if QDIO_VERBOSE_LEVEL>8
+#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_STUPID(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>7
+#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_ALL(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>6
+#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_INFO(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>5
+#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_WARN(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>4
+#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_ERR(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>3
+#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_CRIT(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>2
+#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_ALERT(x...)
+#endif
+
+#if QDIO_VERBOSE_LEVEL>1
+#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
+#else
+#define QDIO_PRINT_EMERG(x...)
+#endif
+
+#define HEXDUMP16(importance,header,ptr) \
+QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x\n",*(((char*)ptr)), \
+ *(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4), \
+ *(((char*)ptr)+5),*(((char*)ptr)+6), \
+ *(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10), \
+ *(((char*)ptr)+11),*(((char*)ptr)+12), \
+ *(((char*)ptr)+13),*(((char*)ptr)+14), \
+ *(((char*)ptr)+15)); \
+QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+/****************** END OF DEBUG FACILITY STUFF *********************/
+
+/*
+ * Some instructions as assembly
+ */
+extern __inline__ int
+do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
+{
+ int cc;
+
+#ifndef CONFIG_ARCH_S390X
+ asm volatile (
+ "lhi 0,2 \n\t"
+ "lr 1,%1 \n\t"
+ "lr 2,%2 \n\t"
+ "lr 3,%3 \n\t"
+ "siga 0 \n\t"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ : "=d" (cc)
+ : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "cc", "0", "1", "2", "3"
+ );
+#else /* CONFIG_ARCH_S390X */
+ asm volatile (
+ "lghi 0,2 \n\t"
+ "llgfr 1,%1 \n\t"
+ "llgfr 2,%2 \n\t"
+ "llgfr 3,%3 \n\t"
+ "siga 0 \n\t"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ : "=d" (cc)
+ : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "cc", "0", "1", "2", "3"
+ );
+#endif /* CONFIG_ARCH_S390X */
+ return cc;
+}
+
+extern __inline__ int
+do_siga_input(unsigned int irq, unsigned int mask)
+{
+ int cc;
+
+#ifndef CONFIG_ARCH_S390X
+ asm volatile (
+ "lhi 0,1 \n\t"
+ "lr 1,%1 \n\t"
+ "lr 2,%2 \n\t"
+ "siga 0 \n\t"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ : "=d" (cc)
+ : "d" (0x10000|irq), "d" (mask)
+ : "cc", "0", "1", "2", "memory"
+ );
+#else /* CONFIG_ARCH_S390X */
+ asm volatile (
+ "lghi 0,1 \n\t"
+ "llgfr 1,%1 \n\t"
+ "llgfr 2,%2 \n\t"
+ "siga 0 \n\t"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ : "=d" (cc)
+ : "d" (0x10000|irq), "d" (mask)
+ : "cc", "0", "1", "2", "memory"
+ );
+#endif /* CONFIG_ARCH_S390X */
+
+ return cc;
+}
+
+extern __inline__ int
+do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
+{
+ int cc;
+ __u32 busy_bit;
+
+#ifndef CONFIG_ARCH_S390X
+ asm volatile (
+ "lhi 0,0 \n\t"
+ "lr 1,%2 \n\t"
+ "lr 2,%3 \n\t"
+ "siga 0 \n\t"
+ "0:"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ "srl 0,31 \n\t"
+ "lr %1,0 \n\t"
+ "1: \n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "2: \n\t"
+ "lhi %0,%4 \n\t"
+ "bras 1,3f \n\t"
+ ".long 1b \n\t"
+ "3: \n\t"
+ "l 1,0(1) \n\t"
+ "br 1 \n\t"
+ ".previous \n\t"
+ ".section __ex_table,\"a\"\n\t"
+ ".align 4 \n\t"
+ ".long 0b,2b \n\t"
+ ".previous \n\t"
+ : "=d" (cc), "=d" (busy_bit)
+ : "d" (0x10000|irq), "d" (mask),
+ "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
+ : "cc", "0", "1", "2", "memory"
+ );
+#else /* CONFIG_ARCH_S390X */
+ asm volatile (
+ "lghi 0,0 \n\t"
+ "llgfr 1,%2 \n\t"
+ "llgfr 2,%3 \n\t"
+ "siga 0 \n\t"
+ "0:"
+ "ipm %0 \n\t"
+ "srl %0,28 \n\t"
+ "srl 0,31 \n\t"
+ "llgfr %1,0 \n\t"
+ "1: \n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "lghi %0,%4 \n\t"
+ "jg 1b \n\t"
+ ".previous\n\t"
+ ".section __ex_table,\"a\"\n\t"
+ ".align 8 \n\t"
+ ".quad 0b,1b \n\t"
+ ".previous \n\t"
+ : "=d" (cc), "=d" (busy_bit)
+ : "d" (0x10000|irq), "d" (mask),
+ "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
+ : "cc", "0", "1", "2", "memory"
+ );
+#endif /* CONFIG_ARCH_S390X */
+
+ (*bb) = busy_bit;
+ return cc;
+}
+
+extern __inline__ unsigned long
+do_clear_global_summary(void)
+{
+
+ unsigned long time;
+
+#ifndef CONFIG_ARCH_S390X
+ asm volatile (
+ "lhi 1,3 \n\t"
+ ".insn rre,0xb2650000,2,0 \n\t"
+ "lr %0,3 \n\t"
+ : "=d" (time) : : "cc", "1", "2", "3"
+ );
+#else /* CONFIG_ARCH_S390X */
+ asm volatile (
+ "lghi 1,3 \n\t"
+ ".insn rre,0xb2650000,2,0 \n\t"
+ "lgr %0,3 \n\t"
+ : "=d" (time) : : "cc", "1", "2", "3"
+ );
+#endif /* CONFIG_ARCH_S390X */
+
+ return time;
+}
+
+/*
+ * QDIO device commands returned by extended Sense-ID
+ */
+#define DEFAULT_ESTABLISH_QS_CMD 0x1b
+#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
+#define DEFAULT_ACTIVATE_QS_CMD 0x1f
+#define DEFAULT_ACTIVATE_QS_COUNT 0
+
+/*
+ * additional CIWs returned by extended Sense-ID
+ */
+#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
+
+#define QDIO_CHSC_RESPONSE_CODE_OK 1
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY 0x80
+#define CHSC_FLAG_VALIDITY 0x40
+
+#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
+#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
+#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
+#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
+#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
+
+#ifdef QDIO_PERFORMANCE_STATS
+struct qdio_perf_stats {
+ unsigned int tl_runs;
+
+ unsigned int siga_outs;
+ unsigned int siga_ins;
+ unsigned int siga_syncs;
+ unsigned int pcis;
+ unsigned int thinints;
+ unsigned int fast_reqs;
+
+ __u64 start_time_outbound;
+ unsigned int outbound_cnt;
+ unsigned int outbound_time;
+ __u64 start_time_inbound;
+ unsigned int inbound_cnt;
+ unsigned int inbound_time;
+};
+#endif /* QDIO_PERFORMANCE_STATS */
+
+#define atomic_swap(a,b) xchg((int*)a.counter,b)
+
+/* unlikely as the later the better */
+#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
+#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
+ qdio_siga_sync(q,~0U,~0U)
+#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
+ qdio_siga_sync(q,~0U,0)
+
+#define NOW qdio_get_micros()
+#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW
+#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
+#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
+#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
+
+#define MY_MODULE_STRING(x) #x
+
+#ifdef CONFIG_ARCH_S390X
+#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
+#else /* CONFIG_ARCH_S390X */
+#define QDIO_GET_ADDR(x) ((__u32)(long)x)
+#endif /* CONFIG_ARCH_S390X */
+
+#ifdef CONFIG_QDIO_DEBUG
+#define set_slsb(x,y) \
+ if(q->queue_type==QDIO_TRACE_QTYPE) { \
+ if(q->is_input_q) { \
+ QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
+ } else { \
+ QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
+ } \
+ } \
+ qdio_set_slsb(x,y); \
+ if(q->queue_type==QDIO_TRACE_QTYPE) { \
+ if(q->is_input_q) { \
+ QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
+ } else { \
+ QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
+ } \
+ }
+#else /* CONFIG_QDIO_DEBUG */
+#define set_slsb(x,y) qdio_set_slsb(x,y)
+#endif /* CONFIG_QDIO_DEBUG */
+
+struct qdio_q {
+ volatile struct slsb slsb;
+
+ char unused[QDIO_MAX_BUFFERS_PER_Q];
+
+ __u32 * volatile dev_st_chg_ind;
+
+ int is_input_q;
+ int irq;
+ struct ccw_device *cdev;
+
+ unsigned int is_iqdio_q;
+ unsigned int is_thinint_q;
+
+ /* bit 0 means queue 0, bit 1 means queue 1, ... */
+ unsigned int mask;
+ unsigned int q_no;
+
+ qdio_handler_t (*handler);
+
+ /* points to the next buffer to be checked for having
+ * been processed by the card (outbound)
+ * or to the next buffer the program should check for (inbound) */
+ volatile int first_to_check;
+ /* and the last time it was: */
+ volatile int last_move_ftc;
+
+ atomic_t number_of_buffers_used;
+ atomic_t polling;
+
+ unsigned int siga_in;
+ unsigned int siga_out;
+ unsigned int siga_sync;
+ unsigned int siga_sync_done_on_thinints;
+ unsigned int siga_sync_done_on_outb_tis;
+ unsigned int hydra_gives_outbound_pcis;
+
+ /* used to save beginning position when calling dd_handlers */
+ int first_element_to_kick;
+
+ atomic_t use_count;
+ atomic_t is_in_shutdown;
+
+ void *irq_ptr;
+
+#ifdef QDIO_USE_TIMERS_FOR_POLLING
+ struct timer_list timer;
+ atomic_t timer_already_set;
+ spinlock_t timer_lock;
+#else /* QDIO_USE_TIMERS_FOR_POLLING */
+ struct tasklet_struct tasklet;
+#endif /* QDIO_USE_TIMERS_FOR_POLLING */
+
+ enum qdio_irq_states state;
+
+ /* used to store the error condition during a data transfer */
+ unsigned int qdio_error;
+ unsigned int siga_error;
+ unsigned int error_status_flags;
+
+ /* list of interesting queues */
+ volatile struct qdio_q *list_next;
+ volatile struct qdio_q *list_prev;
+
+ struct sl *sl;
+ volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q];
+
+ struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
+
+ unsigned long int_parm;
+
+ /*struct {
+ int in_bh_check_limit;
+ int threshold;
+ } threshold_classes[QDIO_STATS_CLASSES];*/
+
+ struct {
+ /* inbound: the time to stop polling
+ outbound: the time to kick peer */
+ int threshold; /* the real value */
+
+ /* outbound: last time of do_QDIO
+ inbound: last time of noticing incoming data */
+ /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
+ int last_transfer_index; */
+
+ __u64 last_transfer_time;
+ __u64 busy_start;
+ } timing;
+ atomic_t busy_siga_counter;
+ unsigned int queue_type;
+
+ /* leave this member at the end. won't be cleared in qdio_fill_qs */
+ struct slib *slib; /* a page is allocated under this pointer,
+ sl points into this page, offset PAGE_SIZE/2
+ (after slib) */
+} __attribute__ ((aligned(256)));
+
+struct qdio_irq {
+ __u32 * volatile dev_st_chg_ind;
+
+ unsigned long int_parm;
+ int irq;
+
+ unsigned int is_iqdio_irq;
+ unsigned int is_thinint_irq;
+ unsigned int hydra_gives_outbound_pcis;
+ unsigned int sync_done_on_outb_pcis;
+
+ enum qdio_irq_states state;
+
+ unsigned int no_input_qs;
+ unsigned int no_output_qs;
+
+ unsigned char qdioac;
+
+ struct ccw1 ccw;
+
+ struct ciw equeue;
+ struct ciw aqueue;
+
+ struct qib qib;
+
+ void (*original_int_handler) (struct ccw_device *,
+ unsigned long, struct irb *);
+
+ /* leave these four members together at the end. won't be cleared in qdio_fill_irq */
+ struct qdr *qdr;
+ struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ struct semaphore setting_up_sema;
+};
+#endif
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
new file mode 100644
index 000000000000..15edebbead7f
--- /dev/null
+++ b/drivers/s390/crypto/Makefile
@@ -0,0 +1,6 @@
+#
+# S/390 crypto devices
+#
+
+z90crypt-objs := z90main.o z90hardware.o
+obj-$(CONFIG_Z90CRYPT) += z90crypt.o
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
new file mode 100644
index 000000000000..bcabac7a7c46
--- /dev/null
+++ b/drivers/s390/crypto/z90common.h
@@ -0,0 +1,168 @@
+/*
+ * linux/drivers/s390/crypto/z90common.h
+ *
+ * z90crypt 1.3.2
+ *
+ * Copyright (C) 2001, 2004 IBM Corporation
+ * Author(s): Robert Burroughs (burrough@us.ibm.com)
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _Z90COMMON_H_
+#define _Z90COMMON_H_
+
+#define VERSION_Z90COMMON_H "$Revision: 1.16 $"
+
+
+#define RESPBUFFSIZE 256
+#define PCI_FUNC_KEY_DECRYPT 0x5044
+#define PCI_FUNC_KEY_ENCRYPT 0x504B
+extern int ext_bitlens;
+
+enum devstat {
+ DEV_GONE,
+ DEV_ONLINE,
+ DEV_QUEUE_FULL,
+ DEV_EMPTY,
+ DEV_NO_WORK,
+ DEV_BAD_MESSAGE,
+ DEV_TSQ_EXCEPTION,
+ DEV_RSQ_EXCEPTION,
+ DEV_SEN_EXCEPTION,
+ DEV_REC_EXCEPTION
+};
+
+enum hdstat {
+ HD_NOT_THERE,
+ HD_BUSY,
+ HD_DECONFIGURED,
+ HD_CHECKSTOPPED,
+ HD_ONLINE,
+ HD_TSQ_EXCEPTION
+};
+
+#define Z90C_NO_DEVICES 1
+#define Z90C_AMBIGUOUS_DOMAIN 2
+#define Z90C_INCORRECT_DOMAIN 3
+#define ENOTINIT 4
+
+#define SEN_BUSY 7
+#define SEN_USER_ERROR 8
+#define SEN_QUEUE_FULL 11
+#define SEN_NOT_AVAIL 16
+#define SEN_PAD_ERROR 17
+#define SEN_RETRY 18
+#define SEN_RELEASED 24
+
+#define REC_EMPTY 4
+#define REC_BUSY 6
+#define REC_OPERAND_INV 8
+#define REC_OPERAND_SIZE 9
+#define REC_EVEN_MOD 10
+#define REC_NO_WORK 11
+#define REC_HARDWAR_ERR 12
+#define REC_NO_RESPONSE 13
+#define REC_RETRY_DEV 14
+#define REC_USER_GONE 15
+#define REC_BAD_MESSAGE 16
+#define REC_INVALID_PAD 17
+#define REC_USE_PCICA 18
+
+#define WRONG_DEVICE_TYPE 20
+
+#define REC_FATAL_ERROR 32
+#define SEN_FATAL_ERROR 33
+#define TSQ_FATAL_ERROR 34
+#define RSQ_FATAL_ERROR 35
+
+#define Z90CRYPT_NUM_TYPES 5
+#define PCICA 0
+#define PCICC 1
+#define PCIXCC_MCL2 2
+#define PCIXCC_MCL3 3
+#define CEX2C 4
+#define NILDEV -1
+#define ANYDEV -1
+#define PCIXCC_UNK -2
+
+enum hdevice_type {
+ PCICC_HW = 3,
+ PCICA_HW = 4,
+ PCIXCC_HW = 5,
+ OTHER_HW = 6,
+ CEX2C_HW = 7
+};
+
+struct CPRBX {
+ unsigned short cprb_len;
+ unsigned char cprb_ver_id;
+ unsigned char pad_000[3];
+ unsigned char func_id[2];
+ unsigned char cprb_flags[4];
+ unsigned int req_parml;
+ unsigned int req_datal;
+ unsigned int rpl_msgbl;
+ unsigned int rpld_parml;
+ unsigned int rpl_datal;
+ unsigned int rpld_datal;
+ unsigned int req_extbl;
+ unsigned char pad_001[4];
+ unsigned int rpld_extbl;
+ unsigned char req_parmb[16];
+ unsigned char req_datab[16];
+ unsigned char rpl_parmb[16];
+ unsigned char rpl_datab[16];
+ unsigned char req_extb[16];
+ unsigned char rpl_extb[16];
+ unsigned short ccp_rtcode;
+ unsigned short ccp_rscode;
+ unsigned int mac_data_len;
+ unsigned char logon_id[8];
+ unsigned char mac_value[8];
+ unsigned char mac_content_flgs;
+ unsigned char pad_002;
+ unsigned short domain;
+ unsigned char pad_003[12];
+ unsigned char pad_004[36];
+};
+
+#ifndef DEV_NAME
+#define DEV_NAME "z90crypt"
+#endif
+#define PRINTK(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#define PRINTKN(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
+#define PRINTKW(fmt, args...) \
+ printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#define PRINTKC(fmt, args...) \
+ printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+
+#ifdef Z90CRYPT_DEBUG
+#define PDEBUG(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#else
+#define PDEBUG(fmt, args...) do {} while (0)
+#endif
+
+#define UMIN(a,b) ((a) < (b) ? (a) : (b))
+#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
+
+
+#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
new file mode 100644
index 000000000000..82a1d97001d7
--- /dev/null
+++ b/drivers/s390/crypto/z90crypt.h
@@ -0,0 +1,258 @@
+/*
+ * linux/drivers/s390/crypto/z90crypt.h
+ *
+ * z90crypt 1.3.2
+ *
+ * Copyright (C) 2001, 2004 IBM Corporation
+ * Author(s): Robert Burroughs (burrough@us.ibm.com)
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _Z90CRYPT_H_
+#define _Z90CRYPT_H_
+
+#include <linux/ioctl.h>
+
+#define VERSION_Z90CRYPT_H "$Revision: 1.11 $"
+
+#define z90crypt_VERSION 1
+#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
+#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support
+
+/**
+ * If we are not using the sparse checker, __user has no use.
+ */
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+#else
+# define __user
+#endif
+
+/**
+ * struct ica_rsa_modexpo
+ *
+ * Requirements:
+ * - outputdatalength is at least as large as inputdatalength.
+ * - All key parts are right justified in their fields, padded on
+ * the left with zeroes.
+ * - length(b_key) = inputdatalength
+ * - length(n_modulus) = inputdatalength
+ */
+struct ica_rsa_modexpo {
+ char __user * inputdata;
+ unsigned int inputdatalength;
+ char __user * outputdata;
+ unsigned int outputdatalength;
+ char __user * b_key;
+ char __user * n_modulus;
+};
+
+/**
+ * struct ica_rsa_modexpo_crt
+ *
+ * Requirements:
+ * - inputdatalength is even.
+ * - outputdatalength is at least as large as inputdatalength.
+ * - All key parts are right justified in their fields, padded on
+ * the left with zeroes.
+ * - length(bp_key) = inputdatalength/2 + 8
+ * - length(bq_key) = inputdatalength/2
+ * - length(np_key) = inputdatalength/2 + 8
+ * - length(nq_key) = inputdatalength/2
+ * - length(u_mult_inv) = inputdatalength/2 + 8
+ */
+struct ica_rsa_modexpo_crt {
+ char __user * inputdata;
+ unsigned int inputdatalength;
+ char __user * outputdata;
+ unsigned int outputdatalength;
+ char __user * bp_key;
+ char __user * bq_key;
+ char __user * np_prime;
+ char __user * nq_prime;
+ char __user * u_mult_inv;
+};
+
+#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks
+
+/**
+ * Interface notes:
+ *
+ * The ioctl()s which are implemented (along with relevant details)
+ * are:
+ *
+ * ICARSAMODEXPO
+ * Perform an RSA operation using a Modulus-Exponent pair
+ * This takes an ica_rsa_modexpo struct as its arg.
+ *
+ * NOTE: please refer to the comments preceding this structure
+ * for the implementation details for the contents of the
+ * block
+ *
+ * ICARSACRT
+ * Perform an RSA operation using a Chinese-Remainder Theorem key
+ * This takes an ica_rsa_modexpo_crt struct as its arg.
+ *
+ * NOTE: please refer to the comments preceding this structure
+ * for the implementation details for the contents of the
+ * block
+ *
+ * Z90STAT_TOTALCOUNT
+ * Return an integer count of all device types together.
+ *
+ * Z90STAT_PCICACOUNT
+ * Return an integer count of all PCICAs.
+ *
+ * Z90STAT_PCICCCOUNT
+ * Return an integer count of all PCICCs.
+ *
+ * Z90STAT_PCIXCCMCL2COUNT
+ * Return an integer count of all MCL2 PCIXCCs.
+ *
+ * Z90STAT_PCIXCCMCL3COUNT
+ * Return an integer count of all MCL3 PCIXCCs.
+ *
+ * Z90STAT_CEX2CCOUNT
+ * Return an integer count of all CEX2Cs.
+ *
+ * Z90STAT_REQUESTQ_COUNT
+ * Return an integer count of the number of entries waiting to be
+ * sent to a device.
+ *
+ * Z90STAT_PENDINGQ_COUNT
+ * Return an integer count of the number of entries sent to a
+ * device awaiting the reply.
+ *
+ * Z90STAT_TOTALOPEN_COUNT
+ * Return an integer count of the number of open file handles.
+ *
+ * Z90STAT_DOMAIN_INDEX
+ * Return the integer value of the Cryptographic Domain.
+ *
+ * Z90STAT_STATUS_MASK
+ * Return an 64 element array of unsigned chars for the status of
+ * all devices.
+ * 0x01: PCICA
+ * 0x02: PCICC
+ * 0x03: PCIXCC_MCL2
+ * 0x04: PCIXCC_MCL3
+ * 0x05: CEX2C
+ * 0x0d: device is disabled via the proc filesystem
+ *
+ * Z90STAT_QDEPTH_MASK
+ * Return an 64 element array of unsigned chars for the queue
+ * depth of all devices.
+ *
+ * Z90STAT_PERDEV_REQCNT
+ * Return an 64 element array of unsigned integers for the number
+ * of successfully completed requests per device since the device
+ * was detected and made available.
+ *
+ * ICAZ90STATUS (deprecated)
+ * Return some device driver status in a ica_z90_status struct
+ * This takes an ica_z90_status struct as its arg.
+ *
+ * NOTE: this ioctl() is deprecated, and has been replaced with
+ * single ioctl()s for each type of status being requested
+ *
+ * Z90STAT_PCIXCCCOUNT (deprecated)
+ * Return an integer count of all PCIXCCs (MCL2 + MCL3).
+ * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
+ * MCL2 PCIXCCs.
+ *
+ * Z90QUIESCE (not recommended)
+ * Quiesce the driver. This is intended to stop all new
+ * requests from being processed. Its use is NOT recommended,
+ * except in circumstances where there is no other way to stop
+ * callers from accessing the driver. Its original use was to
+ * allow the driver to be "drained" of work in preparation for
+ * a system shutdown.
+ *
+ * NOTE: once issued, this ban on new work cannot be undone
+ * except by unloading and reloading the driver.
+ */
+
+/**
+ * Supported ioctl calls
+ */
+#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0)
+#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0)
+
+/* DEPRECATED status calls (bound for removal at some point) */
+#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status)
+#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int)
+
+/* unrelated to ICA callers */
+#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11)
+
+/* New status calls */
+#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int)
+#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int)
+#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int)
+#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
+#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
+#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
+#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
+#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
+#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
+#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int)
+#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64])
+#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64])
+#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64])
+
+/**
+ * local errno definitions
+ */
+#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
+#define EWORKPEND 130 // user issues ioctl while another pending
+#define ERELEASED 131 // user released while ioctl pending
+#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
+#define ETIMEOUT 133 // request timed out
+#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
+#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
+ // (retry in software)
+
+/**
+ * DEPRECATED STRUCTURES
+ */
+
+/**
+ * This structure is DEPRECATED and the corresponding ioctl() has been
+ * replaced with individual ioctl()s for each piece of data!
+ * This structure will NOT survive past version 1.3.1, so switch to the
+ * new ioctl()s.
+ */
+#define MASK_LENGTH 64 // mask length
+struct ica_z90_status {
+ int totalcount;
+ int leedslitecount; // PCICA
+ int leeds2count; // PCICC
+ // int PCIXCCCount; is not in struct for backward compatibility
+ int requestqWaitCount;
+ int pendingqWaitCount;
+ int totalOpenCount;
+ int cryptoDomain;
+ // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
+ // 5=CEX2C
+ unsigned char status[MASK_LENGTH];
+ // qdepth: # work elements waiting for each device
+ unsigned char qdepth[MASK_LENGTH];
+};
+
+#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
new file mode 100644
index 000000000000..beb6a5e0da22
--- /dev/null
+++ b/drivers/s390/crypto/z90hardware.c
@@ -0,0 +1,2243 @@
+/*
+ * linux/drivers/s390/crypto/z90hardware.c
+ *
+ * z90crypt 1.3.2
+ *
+ * Copyright (C) 2001, 2004 IBM Corporation
+ * Author(s): Robert Burroughs (burrough@us.ibm.com)
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/uaccess.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include "z90crypt.h"
+#include "z90common.h"
+
+#define VERSION_Z90HARDWARE_C "$Revision: 1.33 $"
+
+char z90hardware_version[] __initdata =
+ "z90hardware.o (" VERSION_Z90HARDWARE_C "/"
+ VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
+
+struct cca_token_hdr {
+ unsigned char token_identifier;
+ unsigned char version;
+ unsigned short token_length;
+ unsigned char reserved[4];
+};
+
+#define CCA_TKN_HDR_ID_EXT 0x1E
+
+struct cca_private_ext_ME_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char private_key_hash[20];
+ unsigned char reserved1[4];
+ unsigned char key_format;
+ unsigned char reserved2;
+ unsigned char key_name_hash[20];
+ unsigned char key_use_flags[4];
+ unsigned char reserved3[6];
+ unsigned char reserved4[24];
+ unsigned char confounder[24];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+};
+
+#define CCA_PVT_USAGE_ALL 0x80
+
+struct cca_public_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char reserved[2];
+ unsigned short exponent_len;
+ unsigned short modulus_bit_len;
+ unsigned short modulus_byte_len;
+ unsigned char exponent[3];
+};
+
+struct cca_private_ext_ME {
+ struct cca_token_hdr pvtMEHdr;
+ struct cca_private_ext_ME_sec pvtMESec;
+ struct cca_public_sec pubMESec;
+};
+
+struct cca_public_key {
+ struct cca_token_hdr pubHdr;
+ struct cca_public_sec pubSec;
+};
+
+struct cca_pvt_ext_CRT_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char private_key_hash[20];
+ unsigned char reserved1[4];
+ unsigned char key_format;
+ unsigned char reserved2;
+ unsigned char key_name_hash[20];
+ unsigned char key_use_flags[4];
+ unsigned short p_len;
+ unsigned short q_len;
+ unsigned short dp_len;
+ unsigned short dq_len;
+ unsigned short u_len;
+ unsigned short mod_len;
+ unsigned char reserved3[4];
+ unsigned short pad_len;
+ unsigned char reserved4[52];
+ unsigned char confounder[8];
+};
+
+#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
+#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
+
+struct cca_private_ext_CRT {
+ struct cca_token_hdr pvtCrtHdr;
+ struct cca_pvt_ext_CRT_sec pvtCrtSec;
+ struct cca_public_sec pubCrtSec;
+};
+
+struct ap_status_word {
+ unsigned char q_stat_flags;
+ unsigned char response_code;
+ unsigned char reserved[2];
+};
+
+#define AP_Q_STATUS_EMPTY 0x80
+#define AP_Q_STATUS_REPLIES_WAITING 0x40
+#define AP_Q_STATUS_ARRAY_FULL 0x20
+
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+
+#define AP_MAX_CDX_BITL 4
+#define AP_RQID_RESERVED_BITL 4
+#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
+
+struct type4_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code;
+ unsigned short msg_len;
+ unsigned char request_code;
+ unsigned char msg_fmt;
+ unsigned short reserved2;
+};
+
+#define TYPE4_TYPE_CODE 0x04
+#define TYPE4_REQU_CODE 0x40
+
+#define TYPE4_SME_LEN 0x0188
+#define TYPE4_LME_LEN 0x0308
+#define TYPE4_SCR_LEN 0x01E0
+#define TYPE4_LCR_LEN 0x03A0
+
+#define TYPE4_SME_FMT 0x00
+#define TYPE4_LME_FMT 0x10
+#define TYPE4_SCR_FMT 0x40
+#define TYPE4_LCR_FMT 0x50
+
+struct type4_sme {
+ struct type4_hdr header;
+ unsigned char message[128];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+};
+
+struct type4_lme {
+ struct type4_hdr header;
+ unsigned char message[256];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+};
+
+struct type4_scr {
+ struct type4_hdr header;
+ unsigned char message[128];
+ unsigned char dp[72];
+ unsigned char dq[64];
+ unsigned char p[72];
+ unsigned char q[64];
+ unsigned char u[72];
+};
+
+struct type4_lcr {
+ struct type4_hdr header;
+ unsigned char message[256];
+ unsigned char dp[136];
+ unsigned char dq[128];
+ unsigned char p[136];
+ unsigned char q[128];
+ unsigned char u[136];
+};
+
+union type4_msg {
+ struct type4_sme sme;
+ struct type4_lme lme;
+ struct type4_scr scr;
+ struct type4_lcr lcr;
+};
+
+struct type84_hdr {
+ unsigned char reserved1;
+ unsigned char code;
+ unsigned short len;
+ unsigned char reserved2[4];
+};
+
+#define TYPE84_RSP_CODE 0x84
+
+struct type6_hdr {
+ unsigned char reserved1;
+ unsigned char type;
+ unsigned char reserved2[2];
+ unsigned char right[4];
+ unsigned char reserved3[2];
+ unsigned char reserved4[2];
+ unsigned char apfs[4];
+ unsigned int offset1;
+ unsigned int offset2;
+ unsigned int offset3;
+ unsigned int offset4;
+ unsigned char agent_id[16];
+ unsigned char rqid[2];
+ unsigned char reserved5[2];
+ unsigned char function_code[2];
+ unsigned char reserved6[2];
+ unsigned int ToCardLen1;
+ unsigned int ToCardLen2;
+ unsigned int ToCardLen3;
+ unsigned int ToCardLen4;
+ unsigned int FromCardLen1;
+ unsigned int FromCardLen2;
+ unsigned int FromCardLen3;
+ unsigned int FromCardLen4;
+};
+
+struct CPRB {
+ unsigned char cprb_len[2];
+ unsigned char cprb_ver_id;
+ unsigned char pad_000;
+ unsigned char srpi_rtcode[4];
+ unsigned char srpi_verb;
+ unsigned char flags;
+ unsigned char func_id[2];
+ unsigned char checkpoint_flag;
+ unsigned char resv2;
+ unsigned char req_parml[2];
+ unsigned char req_parmp[4];
+ unsigned char req_datal[4];
+ unsigned char req_datap[4];
+ unsigned char rpl_parml[2];
+ unsigned char pad_001[2];
+ unsigned char rpl_parmp[4];
+ unsigned char rpl_datal[4];
+ unsigned char rpl_datap[4];
+ unsigned char ccp_rscode[2];
+ unsigned char ccp_rtcode[2];
+ unsigned char repd_parml[2];
+ unsigned char mac_data_len[2];
+ unsigned char repd_datal[4];
+ unsigned char req_pc[2];
+ unsigned char res_origin[8];
+ unsigned char mac_value[8];
+ unsigned char logon_id[8];
+ unsigned char usage_domain[2];
+ unsigned char resv3[18];
+ unsigned char svr_namel[2];
+ unsigned char svr_name[8];
+};
+
+struct type6_msg {
+ struct type6_hdr header;
+ struct CPRB CPRB;
+};
+
+union request_msg {
+ union type4_msg t4msg;
+ struct type6_msg t6msg;
+};
+
+struct request_msg_ext {
+ int q_nr;
+ unsigned char *psmid;
+ union request_msg reqMsg;
+};
+
+struct type82_hdr {
+ unsigned char reserved1;
+ unsigned char type;
+ unsigned char reserved2[2];
+ unsigned char reply_code;
+ unsigned char reserved3[3];
+};
+
+#define TYPE82_RSP_CODE 0x82
+
+#define REPLY_ERROR_MACHINE_FAILURE 0x10
+#define REPLY_ERROR_PREEMPT_FAILURE 0x12
+#define REPLY_ERROR_CHECKPT_FAILURE 0x14
+#define REPLY_ERROR_MESSAGE_TYPE 0x20
+#define REPLY_ERROR_INVALID_COMM_CD 0x21
+#define REPLY_ERROR_INVALID_MSG_LEN 0x23
+#define REPLY_ERROR_RESERVD_FIELD 0x24
+#define REPLY_ERROR_FORMAT_FIELD 0x29
+#define REPLY_ERROR_INVALID_COMMAND 0x30
+#define REPLY_ERROR_MALFORMED_MSG 0x40
+#define REPLY_ERROR_RESERVED_FIELDO 0x50
+#define REPLY_ERROR_WORD_ALIGNMENT 0x60
+#define REPLY_ERROR_MESSAGE_LENGTH 0x80
+#define REPLY_ERROR_OPERAND_INVALID 0x82
+#define REPLY_ERROR_OPERAND_SIZE 0x84
+#define REPLY_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REPLY_ERROR_RESERVED_FIELD 0x88
+#define REPLY_ERROR_TRANSPORT_FAIL 0x90
+#define REPLY_ERROR_PACKET_TRUNCATED 0xA0
+#define REPLY_ERROR_ZERO_BUFFER_LEN 0xB0
+
+struct type86_hdr {
+ unsigned char reserved1;
+ unsigned char type;
+ unsigned char format;
+ unsigned char reserved2;
+ unsigned char reply_code;
+ unsigned char reserved3[3];
+};
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE86_FMT2 0x02
+
+struct type86_fmt2_msg {
+ struct type86_hdr hdr;
+ unsigned char reserved[4];
+ unsigned char apfs[4];
+ unsigned int count1;
+ unsigned int offset1;
+ unsigned int count2;
+ unsigned int offset2;
+ unsigned int count3;
+ unsigned int offset3;
+ unsigned int count4;
+ unsigned int offset4;
+};
+
+static struct type6_hdr static_type6_hdr = {
+ 0x00,
+ 0x06,
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ 0x00000058,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
+ 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x50,0x44},
+ {0x00,0x00},
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static struct type6_hdr static_type6_hdrX = {
+ 0x00,
+ 0x06,
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ 0x00000058,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x50,0x44},
+ {0x00,0x00},
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000
+};
+
+static struct CPRB static_cprb = {
+ {0x70,0x00},
+ 0x41,
+ 0x00,
+ {0x00,0x00,0x00,0x00},
+ 0x00,
+ 0x00,
+ {0x54,0x32},
+ 0x01,
+ 0x00,
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00},
+ {0x08,0x00},
+ {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
+};
+
+struct function_and_rules_block {
+ unsigned char function_code[2];
+ unsigned char ulen[2];
+ unsigned char only_rule[8];
+};
+
+static struct function_and_rules_block static_pkd_function_and_rules = {
+ {0x50,0x44},
+ {0x0A,0x00},
+ {'P','K','C','S','-','1','.','2'}
+};
+
+static struct function_and_rules_block static_pke_function_and_rules = {
+ {0x50,0x4B},
+ {0x0A,0x00},
+ {'P','K','C','S','-','1','.','2'}
+};
+
+struct T6_keyBlock_hdr {
+ unsigned char blen[2];
+ unsigned char ulen[2];
+ unsigned char flags[2];
+};
+
+static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
+ {0x89,0x01},
+ {0x87,0x01},
+ {0x00}
+};
+
+static struct CPRBX static_cprbx = {
+ 0x00DC,
+ 0x02,
+ {0x00,0x00,0x00},
+ {0x54,0x32},
+ {0x00,0x00,0x00,0x00},
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ {0x00,0x00,0x00,0x00},
+ 0x00000000,
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ 0x0000,
+ 0x0000,
+ 0x00000000,
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ 0x00,
+ 0x00,
+ 0x0000,
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
+};
+
+static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
+ {0x50,0x44},
+ {0x00,0x0A},
+ {'P','K','C','S','-','1','.','2'}
+};
+
+static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
+ {0x50,0x4B},
+ {0x00,0x0A},
+ {'Z','E','R','O','-','P','A','D'}
+};
+
+static struct function_and_rules_block static_pkd_function_and_rulesX = {
+ {0x50,0x44},
+ {0x00,0x0A},
+ {'Z','E','R','O','-','P','A','D'}
+};
+
+static struct function_and_rules_block static_pke_function_and_rulesX = {
+ {0x50,0x4B},
+ {0x00,0x0A},
+ {'M','R','P',' ',' ',' ',' ',' '}
+};
+
+struct T6_keyBlock_hdrX {
+ unsigned short blen;
+ unsigned short ulen;
+ unsigned char flags[2];
+};
+
+static unsigned char static_pad[256] = {
+0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
+0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
+0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
+0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
+0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
+0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
+0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
+0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
+0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
+0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
+0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
+0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
+0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
+0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
+0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
+0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
+};
+
+static struct cca_private_ext_ME static_pvt_me_key = {
+ {
+ 0x1E,
+ 0x00,
+ 0x0183,
+ {0x00,0x00,0x00,0x00}
+ },
+
+ {
+ 0x02,
+ 0x00,
+ 0x016C,
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00},
+ 0x00,
+ 0x00,
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00},
+ {0x80,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
+ },
+
+ {
+ 0x04,
+ 0x00,
+ 0x000F,
+ {0x00,0x00},
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ {0x01,0x00,0x01}
+ }
+};
+
+static struct cca_public_key static_public_key = {
+ {
+ 0x1E,
+ 0x00,
+ 0x0000,
+ {0x00,0x00,0x00,0x00}
+ },
+
+ {
+ 0x04,
+ 0x00,
+ 0x0000,
+ {0x00,0x00},
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ {0x01,0x00,0x01}
+ }
+};
+
+#define FIXED_TYPE6_ME_LEN 0x0000025F
+
+#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
+
+#define FIXED_TYPE6_ME_LENX 0x000002CB
+
+#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
+
+static struct cca_public_sec static_cca_pub_sec = {
+ 0x04,
+ 0x00,
+ 0x000f,
+ {0x00,0x00},
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ {0x01,0x00,0x01}
+};
+
+#define FIXED_TYPE6_CR_LEN 0x00000177
+
+#define FIXED_TYPE6_CR_LENX 0x000001E3
+
+#define MAX_RESPONSE_SIZE 0x00000710
+
+#define MAX_RESPONSEX_SIZE 0x0000077C
+
+#define RESPONSE_CPRB_SIZE 0x000006B8
+#define RESPONSE_CPRBX_SIZE 0x00000724
+
+#define CALLER_HEADER 12
+
+static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
+
+static inline int
+testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
+{
+ int ccode;
+
+ asm volatile
+#ifdef __s390x__
+ (" llgfr 0,%4 \n"
+ " slgr 1,1 \n"
+ " lgr 2,1 \n"
+ "0: .long 0xb2af0000 \n"
+ "1: ipm %0 \n"
+ " srl %0,28 \n"
+ " iihh %0,0 \n"
+ " iihl %0,0 \n"
+ " lgr %1,1 \n"
+ " lgr %3,2 \n"
+ " srl %3,24 \n"
+ " sll 2,24 \n"
+ " srl 2,24 \n"
+ " lgr %2,2 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h5 \n"
+ " jg 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 8 \n"
+ " .quad 0b,3b \n"
+ " .quad 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
+ :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
+ :"cc","0","1","2","memory");
+#else
+ (" lr 0,%4 \n"
+ " slr 1,1 \n"
+ " lr 2,1 \n"
+ "0: .long 0xb2af0000 \n"
+ "1: ipm %0 \n"
+ " srl %0,28 \n"
+ " lr %1,1 \n"
+ " lr %3,2 \n"
+ " srl %3,24 \n"
+ " sll 2,24 \n"
+ " srl 2,24 \n"
+ " lr %2,2 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h5 \n"
+ " bras 1,4f \n"
+ " .long 2b \n"
+ "4: \n"
+ " l 1,0(1) \n"
+ " br 1 \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .long 0b,3b \n"
+ " .long 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
+ :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
+ :"cc","0","1","2","memory");
+#endif
+ return ccode;
+}
+
+static inline int
+resetq(int q_nr, struct ap_status_word *stat_p)
+{
+ int ccode;
+
+ asm volatile
+#ifdef __s390x__
+ (" llgfr 0,%2 \n"
+ " lghi 1,1 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " slgr 1,1 \n"
+ " lgr 2,1 \n"
+ "0: .long 0xb2af0000 \n"
+ "1: ipm %0 \n"
+ " srl %0,28 \n"
+ " iihh %0,0 \n"
+ " iihl %0,0 \n"
+ " lgr %1,1 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h3 \n"
+ " jg 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 8 \n"
+ " .quad 0b,3b \n"
+ " .quad 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat_p)
+ :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
+ :"cc","0","1","2","memory");
+#else
+ (" lr 0,%2 \n"
+ " lhi 1,1 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " slr 1,1 \n"
+ " lr 2,1 \n"
+ "0: .long 0xb2af0000 \n"
+ "1: ipm %0 \n"
+ " srl %0,28 \n"
+ " lr %1,1 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h3 \n"
+ " bras 1,4f \n"
+ " .long 2b \n"
+ "4: \n"
+ " l 1,0(1) \n"
+ " br 1 \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .long 0b,3b \n"
+ " .long 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat_p)
+ :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
+ :"cc","0","1","2","memory");
+#endif
+ return ccode;
+}
+
+static inline int
+sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
+{
+ int ccode;
+
+ asm volatile
+#ifdef __s390x__
+ (" lgr 6,%3 \n"
+ " llgfr 7,%2 \n"
+ " llgt 0,0(6) \n"
+ " lghi 1,64 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " la 6,4(6) \n"
+ " llgt 2,0(6) \n"
+ " llgt 3,4(6) \n"
+ " la 6,8(6) \n"
+ " slr 1,1 \n"
+ "0: .long 0xb2ad0026 \n"
+ "1: brc 2,0b \n"
+ " ipm %0 \n"
+ " srl %0,28 \n"
+ " iihh %0,0 \n"
+ " iihl %0,0 \n"
+ " lgr %1,1 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h4 \n"
+ " jg 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 8 \n"
+ " .quad 0b,3b \n"
+ " .quad 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat)
+ :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
+ :"cc","0","1","2","3","6","7","memory");
+#else
+ (" lr 6,%3 \n"
+ " lr 7,%2 \n"
+ " l 0,0(6) \n"
+ " lhi 1,64 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " la 6,4(6) \n"
+ " l 2,0(6) \n"
+ " l 3,4(6) \n"
+ " la 6,8(6) \n"
+ " slr 1,1 \n"
+ "0: .long 0xb2ad0026 \n"
+ "1: brc 2,0b \n"
+ " ipm %0 \n"
+ " srl %0,28 \n"
+ " lr %1,1 \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h4 \n"
+ " bras 1,4f \n"
+ " .long 2b \n"
+ "4: \n"
+ " l 1,0(1) \n"
+ " br 1 \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .long 0b,3b \n"
+ " .long 1b,3b \n"
+ ".previous"
+ :"=d" (ccode),"=d" (*stat)
+ :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
+ :"cc","0","1","2","3","6","7","memory");
+#endif
+ return ccode;
+}
+
+static inline int
+rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
+ struct ap_status_word *st)
+{
+ int ccode;
+
+ asm volatile
+#ifdef __s390x__
+ (" llgfr 0,%2 \n"
+ " lgr 3,%4 \n"
+ " lgr 6,%3 \n"
+ " llgfr 7,%5 \n"
+ " lghi 1,128 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " slgr 1,1 \n"
+ " lgr 2,1 \n"
+ " lgr 4,1 \n"
+ " lgr 5,1 \n"
+ "0: .long 0xb2ae0046 \n"
+ "1: brc 2,0b \n"
+ " brc 4,0b \n"
+ " ipm %0 \n"
+ " srl %0,28 \n"
+ " iihh %0,0 \n"
+ " iihl %0,0 \n"
+ " lgr %1,1 \n"
+ " st 4,0(3) \n"
+ " st 5,4(3) \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h6 \n"
+ " jg 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 8 \n"
+ " .quad 0b,3b \n"
+ " .quad 1b,3b \n"
+ ".previous"
+ :"=d"(ccode),"=d"(*st)
+ :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
+ :"cc","0","1","2","3","4","5","6","7","memory");
+#else
+ (" lr 0,%2 \n"
+ " lr 3,%4 \n"
+ " lr 6,%3 \n"
+ " lr 7,%5 \n"
+ " lhi 1,128 \n"
+ " sll 1,24 \n"
+ " or 0,1 \n"
+ " slr 1,1 \n"
+ " lr 2,1 \n"
+ " lr 4,1 \n"
+ " lr 5,1 \n"
+ "0: .long 0xb2ae0046 \n"
+ "1: brc 2,0b \n"
+ " brc 4,0b \n"
+ " ipm %0 \n"
+ " srl %0,28 \n"
+ " lr %1,1 \n"
+ " st 4,0(3) \n"
+ " st 5,4(3) \n"
+ "2: \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ " lhi %0,%h6 \n"
+ " bras 1,4f \n"
+ " .long 2b \n"
+ "4: \n"
+ " l 1,0(1) \n"
+ " br 1 \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .long 0b,3b \n"
+ " .long 1b,3b \n"
+ ".previous"
+ :"=d"(ccode),"=d"(*st)
+ :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
+ :"cc","0","1","2","3","4","5","6","7","memory");
+#endif
+ return ccode;
+}
+
+static inline void
+itoLe2(int *i_p, unsigned char *lechars)
+{
+ *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
+ *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
+}
+
+static inline void
+le2toI(unsigned char *lechars, int *i_p)
+{
+ unsigned char *ic_p;
+ *i_p = 0;
+ ic_p = (unsigned char *) i_p;
+ *(ic_p + 2) = *(lechars + 1);
+ *(ic_p + 3) = *(lechars);
+}
+
+static inline int
+is_empty(unsigned char *ptr, int len)
+{
+ return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
+}
+
+enum hdstat
+query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
+{
+ int q_nr, i, t_depth, t_dev_type;
+ enum devstat ccode;
+ struct ap_status_word stat_word;
+ enum hdstat stat;
+ int break_out;
+
+ q_nr = (deviceNr << SKIP_BITL) + cdx;
+ stat = HD_BUSY;
+ ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
+ PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
+ break_out = 0;
+ for (i = 0; i < resetNr; i++) {
+ if (ccode > 3) {
+ PRINTKC("Exception testing device %d\n", i);
+ return HD_TSQ_EXCEPTION;
+ }
+ switch (ccode) {
+ case 0:
+ PDEBUG("t_dev_type %d\n", t_dev_type);
+ break_out = 1;
+ stat = HD_ONLINE;
+ *q_depth = t_depth + 1;
+ switch (t_dev_type) {
+ case OTHER_HW:
+ stat = HD_NOT_THERE;
+ *dev_type = NILDEV;
+ break;
+ case PCICA_HW:
+ *dev_type = PCICA;
+ break;
+ case PCICC_HW:
+ *dev_type = PCICC;
+ break;
+ case PCIXCC_HW:
+ *dev_type = PCIXCC_UNK;
+ break;
+ case CEX2C_HW:
+ *dev_type = CEX2C;
+ break;
+ default:
+ *dev_type = NILDEV;
+ break;
+ }
+ PDEBUG("available device %d: Q depth = %d, dev "
+ "type = %d, stat = %02X%02X%02X%02X\n",
+ deviceNr, *q_depth, *dev_type,
+ stat_word.q_stat_flags,
+ stat_word.response_code,
+ stat_word.reserved[0],
+ stat_word.reserved[1]);
+ break;
+ case 3:
+ switch (stat_word.response_code) {
+ case AP_RESPONSE_NORMAL:
+ stat = HD_ONLINE;
+ break_out = 1;
+ *q_depth = t_depth + 1;
+ *dev_type = t_dev_type;
+ PDEBUG("cc3, available device "
+ "%d: Q depth = %d, dev "
+ "type = %d, stat = "
+ "%02X%02X%02X%02X\n",
+ deviceNr, *q_depth,
+ *dev_type,
+ stat_word.q_stat_flags,
+ stat_word.response_code,
+ stat_word.reserved[0],
+ stat_word.reserved[1]);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ stat = HD_NOT_THERE;
+ break_out = 1;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ PDEBUG("device %d in reset\n",
+ deviceNr);
+ break;
+ case AP_RESPONSE_DECONFIGURED:
+ stat = HD_DECONFIGURED;
+ break_out = 1;
+ break;
+ case AP_RESPONSE_CHECKSTOPPED:
+ stat = HD_CHECKSTOPPED;
+ break_out = 1;
+ break;
+ case AP_RESPONSE_BUSY:
+ PDEBUG("device %d busy\n",
+ deviceNr);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ stat = HD_NOT_THERE;
+ break_out = 1;
+ break;
+ }
+ if (break_out)
+ break;
+
+ udelay(5);
+
+ ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
+ }
+ return stat;
+}
+
+enum devstat
+reset_device(int deviceNr, int cdx, int resetNr)
+{
+ int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
+ struct ap_status_word stat_word;
+ enum devstat stat;
+ int break_out;
+
+ q_nr = (deviceNr << SKIP_BITL) + cdx;
+ stat = DEV_GONE;
+ ccode = resetq(q_nr, &stat_word);
+ if (ccode > 3)
+ return DEV_RSQ_EXCEPTION;
+
+ break_out = 0;
+ for (i = 0; i < resetNr; i++) {
+ switch (ccode) {
+ case 0:
+ stat = DEV_ONLINE;
+ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
+ break_out = 1;
+ break;
+ case 3:
+ switch (stat_word.response_code) {
+ case AP_RESPONSE_NORMAL:
+ stat = DEV_ONLINE;
+ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
+ break_out = 1;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ stat = DEV_GONE;
+ break_out = 1;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ default:
+ break;
+ }
+ break;
+ default:
+ stat = DEV_GONE;
+ break_out = 1;
+ break;
+ }
+ if (break_out == 1)
+ break;
+ udelay(5);
+
+ ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
+ if (ccode > 3) {
+ stat = DEV_TSQ_EXCEPTION;
+ break;
+ }
+ }
+ PDEBUG("Number of testq's needed for reset: %d\n", i);
+
+ if (i >= resetNr) {
+ stat = DEV_GONE;
+ }
+
+ return stat;
+}
+
+#ifdef DEBUG_HYDRA_MSGS
+static inline void
+print_buffer(unsigned char *buffer, int bufflen)
+{
+ int i;
+ for (i = 0; i < bufflen; i += 16) {
+ PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
+ "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
+ buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
+ buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
+ buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
+ buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
+ }
+}
+#endif
+
+enum devstat
+send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
+{
+ struct ap_status_word stat_word;
+ enum devstat stat;
+ int ccode;
+
+ ((struct request_msg_ext *) msg_ext)->q_nr =
+ (dev_nr << SKIP_BITL) + cdx;
+ PDEBUG("msg_len passed to sen: %d\n", msg_len);
+ PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
+ msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
+ stat = DEV_GONE;
+
+#ifdef DEBUG_HYDRA_MSGS
+ PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
+ "%02X%02X%02X%02X\n",
+ msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
+ msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
+ msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
+ print_buffer(msg_ext+CALLER_HEADER, msg_len);
+#endif
+
+ ccode = sen(msg_len, msg_ext, &stat_word);
+ if (ccode > 3)
+ return DEV_SEN_EXCEPTION;
+
+ PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
+ ccode, stat_word.q_stat_flags, stat_word.response_code,
+ stat_word.reserved[0], stat_word.reserved[1]);
+ switch (ccode) {
+ case 0:
+ stat = DEV_ONLINE;
+ break;
+ case 1:
+ stat = DEV_GONE;
+ break;
+ case 3:
+ switch (stat_word.response_code) {
+ case AP_RESPONSE_NORMAL:
+ stat = DEV_ONLINE;
+ break;
+ case AP_RESPONSE_Q_FULL:
+ stat = DEV_QUEUE_FULL;
+ break;
+ default:
+ stat = DEV_GONE;
+ break;
+ }
+ break;
+ default:
+ stat = DEV_GONE;
+ break;
+ }
+
+ return stat;
+}
+
+enum devstat
+receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
+ unsigned char *psmid)
+{
+ int ccode;
+ struct ap_status_word stat_word;
+ enum devstat stat;
+
+ memset(resp, 0x00, 8);
+
+ ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
+ &stat_word);
+ if (ccode > 3)
+ return DEV_REC_EXCEPTION;
+
+ PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
+ ccode, stat_word.q_stat_flags, stat_word.response_code,
+ stat_word.reserved[0], stat_word.reserved[1]);
+
+ stat = DEV_GONE;
+ switch (ccode) {
+ case 0:
+ stat = DEV_ONLINE;
+#ifdef DEBUG_HYDRA_MSGS
+ print_buffer(resp, resplen);
+#endif
+ break;
+ case 3:
+ switch (stat_word.response_code) {
+ case AP_RESPONSE_NORMAL:
+ stat = DEV_ONLINE;
+ break;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
+ stat = DEV_EMPTY;
+ else
+ stat = DEV_NO_WORK;
+ break;
+ case AP_RESPONSE_INDEX_TOO_BIG:
+ case AP_RESPONSE_NO_FIRST_PART:
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ stat = DEV_BAD_MESSAGE;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return stat;
+}
+
+static inline int
+pad_msg(unsigned char *buffer, int totalLength, int msgLength)
+{
+ int pad_len;
+
+ for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
+ if (buffer[pad_len] != 0x00)
+ break;
+ pad_len -= 3;
+ if (pad_len < 8)
+ return SEN_PAD_ERROR;
+
+ buffer[0] = 0x00;
+ buffer[1] = 0x02;
+
+ memcpy(buffer+2, static_pad, pad_len);
+
+ buffer[pad_len + 2] = 0x00;
+
+ return 0;
+}
+
+static inline int
+is_common_public_key(unsigned char *key, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (key[i])
+ break;
+ key += i;
+ len -= i;
+ if (((len == 1) && (key[0] == 3)) ||
+ ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
+ return 1;
+
+ return 0;
+}
+
+static int
+ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
+ union type4_msg *z90cMsg_p)
+{
+ int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
+ unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
+ union type4_msg *tmp_type4_msg;
+
+ mod_len = icaMex_p->inputdatalength;
+
+ msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
+ CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, msg_size);
+
+ tmp_type4_msg = (union type4_msg *)
+ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
+
+ tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
+ tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
+
+ if (mod_len <= 128) {
+ tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
+ tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
+ mod_tgt = tmp_type4_msg->sme.modulus;
+ mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
+ exp_tgt = tmp_type4_msg->sme.exponent;
+ exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
+ inp_tgt = tmp_type4_msg->sme.message;
+ inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
+ } else {
+ tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
+ tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
+ mod_tgt = tmp_type4_msg->lme.modulus;
+ mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
+ exp_tgt = tmp_type4_msg->lme.exponent;
+ exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
+ inp_tgt = tmp_type4_msg->lme.message;
+ inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
+ }
+
+ mod_tgt += (mod_tgt_len - mod_len);
+ if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(mod_tgt, mod_len))
+ return SEN_USER_ERROR;
+ exp_tgt += (exp_tgt_len - mod_len);
+ if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(exp_tgt, mod_len))
+ return SEN_USER_ERROR;
+ inp_tgt += (inp_tgt_len - mod_len);
+ if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(inp_tgt, mod_len))
+ return SEN_USER_ERROR;
+
+ *z90cMsg_l_p = msg_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
+ int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
+{
+ int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
+ dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
+ unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
+ union type4_msg *tmp_type4_msg;
+
+ mod_len = icaMsg_p->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = mod_len / 2 + 8;
+
+ tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
+ CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, tmp_size);
+
+ tmp_type4_msg = (union type4_msg *)
+ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
+
+ tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
+ tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
+ if (mod_len <= 128) {
+ tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
+ tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
+ p_tgt = tmp_type4_msg->scr.p;
+ p_tgt_len = sizeof(tmp_type4_msg->scr.p);
+ q_tgt = tmp_type4_msg->scr.q;
+ q_tgt_len = sizeof(tmp_type4_msg->scr.q);
+ dp_tgt = tmp_type4_msg->scr.dp;
+ dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
+ dq_tgt = tmp_type4_msg->scr.dq;
+ dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
+ u_tgt = tmp_type4_msg->scr.u;
+ u_tgt_len = sizeof(tmp_type4_msg->scr.u);
+ inp_tgt = tmp_type4_msg->scr.message;
+ inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
+ } else {
+ tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
+ tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
+ p_tgt = tmp_type4_msg->lcr.p;
+ p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
+ q_tgt = tmp_type4_msg->lcr.q;
+ q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
+ dp_tgt = tmp_type4_msg->lcr.dp;
+ dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
+ dq_tgt = tmp_type4_msg->lcr.dq;
+ dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
+ u_tgt = tmp_type4_msg->lcr.u;
+ u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
+ inp_tgt = tmp_type4_msg->lcr.message;
+ inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
+ }
+
+ p_tgt += (p_tgt_len - long_len);
+ if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
+ return SEN_RELEASED;
+ if (is_empty(p_tgt, long_len))
+ return SEN_USER_ERROR;
+ q_tgt += (q_tgt_len - short_len);
+ if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
+ return SEN_RELEASED;
+ if (is_empty(q_tgt, short_len))
+ return SEN_USER_ERROR;
+ dp_tgt += (dp_tgt_len - long_len);
+ if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
+ return SEN_RELEASED;
+ if (is_empty(dp_tgt, long_len))
+ return SEN_USER_ERROR;
+ dq_tgt += (dq_tgt_len - short_len);
+ if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
+ return SEN_RELEASED;
+ if (is_empty(dq_tgt, short_len))
+ return SEN_USER_ERROR;
+ u_tgt += (u_tgt_len - long_len);
+ if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
+ return SEN_RELEASED;
+ if (is_empty(u_tgt, long_len))
+ return SEN_USER_ERROR;
+ inp_tgt += (inp_tgt_len - mod_len);
+ if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(inp_tgt, mod_len))
+ return SEN_USER_ERROR;
+
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
+ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
+{
+ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
+ unsigned char *temp;
+ struct type6_hdr *tp6Hdr_p;
+ struct CPRB *cprb_p;
+ struct cca_private_ext_ME *key_p;
+ static int deprecated_msg_count = 0;
+
+ mod_len = icaMsg_p->inputdatalength;
+ tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
+ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
+ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
+ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, tmp_size);
+
+ temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
+ memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
+ tp6Hdr_p = (struct type6_hdr *)temp;
+ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
+ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
+
+ temp += sizeof(struct type6_hdr);
+ memcpy(temp, &static_cprb, sizeof(struct CPRB));
+ cprb_p = (struct CPRB *) temp;
+ cprb_p->usage_domain[0]= (unsigned char)cdx;
+ itoLe2(&parmBlock_l, cprb_p->req_parml);
+ itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
+
+ temp += sizeof(struct CPRB);
+ memcpy(temp, &static_pkd_function_and_rules,
+ sizeof(struct function_and_rules_block));
+
+ temp += sizeof(struct function_and_rules_block);
+ vud_len = 2 + icaMsg_p->inputdatalength;
+ itoLe2(&vud_len, temp);
+
+ temp += 2;
+ if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(temp, mod_len))
+ return SEN_USER_ERROR;
+
+ temp += mod_len;
+ memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
+
+ temp += sizeof(struct T6_keyBlock_hdr);
+ memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
+ key_p = (struct cca_private_ext_ME *)temp;
+ temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
+ - mod_len;
+ if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(temp, mod_len))
+ return SEN_USER_ERROR;
+
+ if (is_common_public_key(temp, mod_len)) {
+ if (deprecated_msg_count < 20) {
+ PRINTK("Common public key used for modex decrypt\n");
+ deprecated_msg_count++;
+ if (deprecated_msg_count == 20)
+ PRINTK("No longer issuing messages about common"
+ " public key for modex decrypt.\n");
+ }
+ return SEN_NOT_AVAIL;
+ }
+
+ temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
+ - mod_len;
+ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(temp, mod_len))
+ return SEN_USER_ERROR;
+
+ key_p->pubMESec.modulus_bit_len = 8 * mod_len;
+
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
+ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
+{
+ int mod_len, vud_len, exp_len, key_len;
+ int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
+ unsigned char *temp_exp, *exp_p, *temp;
+ struct type6_hdr *tp6Hdr_p;
+ struct CPRB *cprb_p;
+ struct cca_public_key *key_p;
+ struct T6_keyBlock_hdr *keyb_p;
+
+ temp_exp = kmalloc(256, GFP_KERNEL);
+ if (!temp_exp)
+ return EGETBUFF;
+ mod_len = icaMsg_p->inputdatalength;
+ if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
+ kfree(temp_exp);
+ return SEN_RELEASED;
+ }
+ if (is_empty(temp_exp, mod_len)) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+
+ exp_p = temp_exp;
+ for (i = 0; i < mod_len; i++)
+ if (exp_p[i])
+ break;
+ if (i >= mod_len) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+
+ exp_len = mod_len - i;
+ exp_p += i;
+
+ PDEBUG("exp_len after computation: %08x\n", exp_len);
+ tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
+ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
+ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
+ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
+
+ vud_len = 2 + mod_len;
+ memset(z90cMsg_p, 0, tmp_size);
+
+ temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
+ memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
+ tp6Hdr_p = (struct type6_hdr *)temp;
+ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
+ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
+ memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
+ sizeof(static_PKE_function_code));
+ temp += sizeof(struct type6_hdr);
+ memcpy(temp, &static_cprb, sizeof(struct CPRB));
+ cprb_p = (struct CPRB *) temp;
+ cprb_p->usage_domain[0]= (unsigned char)cdx;
+ itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
+ temp += sizeof(struct CPRB);
+ memcpy(temp, &static_pke_function_and_rules,
+ sizeof(struct function_and_rules_block));
+ temp += sizeof(struct function_and_rules_block);
+ temp += 2;
+ if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
+ kfree(temp_exp);
+ return SEN_RELEASED;
+ }
+ if (is_empty(temp, mod_len)) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+ if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
+ kfree(temp_exp);
+ return SEN_NOT_AVAIL;
+ }
+ for (i = 2; i < mod_len; i++)
+ if (temp[i] == 0x00)
+ break;
+ if ((i < 9) || (i > (mod_len - 2))) {
+ kfree(temp_exp);
+ return SEN_NOT_AVAIL;
+ }
+ pad_len = i + 1;
+ vud_len = mod_len - pad_len;
+ memmove(temp, temp+pad_len, vud_len);
+ temp -= 2;
+ vud_len += 2;
+ itoLe2(&vud_len, temp);
+ temp += (vud_len);
+ keyb_p = (struct T6_keyBlock_hdr *)temp;
+ temp += sizeof(struct T6_keyBlock_hdr);
+ memcpy(temp, &static_public_key, sizeof(static_public_key));
+ key_p = (struct cca_public_key *)temp;
+ temp = key_p->pubSec.exponent;
+ memcpy(temp, exp_p, exp_len);
+ kfree(temp_exp);
+ temp += exp_len;
+ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(temp, mod_len))
+ return SEN_USER_ERROR;
+ key_p->pubSec.modulus_bit_len = 8 * mod_len;
+ key_p->pubSec.modulus_byte_len = mod_len;
+ key_p->pubSec.exponent_len = exp_len;
+ key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
+ key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
+ key_p->pubHdr.token_length = key_len;
+ key_len += 4;
+ itoLe2(&key_len, keyb_p->ulen);
+ key_len += 2;
+ itoLe2(&key_len, keyb_p->blen);
+ parmBlock_l -= pad_len;
+ itoLe2(&parmBlock_l, cprb_p->req_parml);
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
+ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
+{
+ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
+ int long_len, pad_len, keyPartsLen, tmp_l;
+ unsigned char *tgt_p, *temp;
+ struct type6_hdr *tp6Hdr_p;
+ struct CPRB *cprb_p;
+ struct cca_token_hdr *keyHdr_p;
+ struct cca_pvt_ext_CRT_sec *pvtSec_p;
+ struct cca_public_sec *pubSec_p;
+
+ mod_len = icaMsg_p->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = 8 + short_len;
+ keyPartsLen = 3 * long_len + 2 * short_len;
+ pad_len = (8 - (keyPartsLen % 8)) % 8;
+ keyPartsLen += pad_len + mod_len;
+ tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
+ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
+ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
+ vud_len = 2 + mod_len;
+ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, tmp_size);
+ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
+ memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
+ tp6Hdr_p = (struct type6_hdr *)tgt_p;
+ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
+ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
+ tgt_p += sizeof(struct type6_hdr);
+ cprb_p = (struct CPRB *) tgt_p;
+ memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
+ cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
+ itoLe2(&parmBlock_l, cprb_p->req_parml);
+ memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
+ sizeof(cprb_p->req_parml));
+ tgt_p += sizeof(struct CPRB);
+ memcpy(tgt_p, &static_pkd_function_and_rules,
+ sizeof(struct function_and_rules_block));
+ tgt_p += sizeof(struct function_and_rules_block);
+ itoLe2(&vud_len, tgt_p);
+ tgt_p += 2;
+ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, mod_len))
+ return SEN_USER_ERROR;
+ tgt_p += mod_len;
+ tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
+ sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
+ itoLe2(&tmp_l, tgt_p);
+ temp = tgt_p + 2;
+ tmp_l -= 2;
+ itoLe2(&tmp_l, temp);
+ tgt_p += sizeof(struct T6_keyBlock_hdr);
+ keyHdr_p = (struct cca_token_hdr *)tgt_p;
+ keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
+ tmp_l -= 4;
+ keyHdr_p->token_length = tmp_l;
+ tgt_p += sizeof(struct cca_token_hdr);
+ pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
+ pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+ pvtSec_p->section_length =
+ sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
+ pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+ pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
+ pvtSec_p->p_len = long_len;
+ pvtSec_p->q_len = short_len;
+ pvtSec_p->dp_len = long_len;
+ pvtSec_p->dq_len = short_len;
+ pvtSec_p->u_len = long_len;
+ pvtSec_p->mod_len = mod_len;
+ pvtSec_p->pad_len = pad_len;
+ tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
+ if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, short_len))
+ return SEN_USER_ERROR;
+ tgt_p += short_len;
+ if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, short_len))
+ return SEN_USER_ERROR;
+ tgt_p += short_len;
+ if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ tgt_p += pad_len;
+ memset(tgt_p, 0xFF, mod_len);
+ tgt_p += mod_len;
+ memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
+ pubSec_p = (struct cca_public_sec *) tgt_p;
+ pubSec_p->modulus_bit_len = 8 * mod_len;
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
+ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
+ int dev_type)
+{
+ int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
+ int key_len, i;
+ unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
+ struct type6_hdr *tp6Hdr_p;
+ struct CPRBX *cprbx_p;
+ struct cca_public_key *key_p;
+ struct T6_keyBlock_hdrX *keyb_p;
+
+ temp_exp = kmalloc(256, GFP_KERNEL);
+ if (!temp_exp)
+ return EGETBUFF;
+ mod_len = icaMsg_p->inputdatalength;
+ if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
+ kfree(temp_exp);
+ return SEN_RELEASED;
+ }
+ if (is_empty(temp_exp, mod_len)) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+ exp_p = temp_exp;
+ for (i = 0; i < mod_len; i++)
+ if (exp_p[i])
+ break;
+ if (i >= mod_len) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+ exp_len = mod_len - i;
+ exp_p += i;
+ PDEBUG("exp_len after computation: %08x\n", exp_len);
+ tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
+ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
+ parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
+ tmp_size = tmp_size + CALLER_HEADER;
+ vud_len = 2 + mod_len;
+ memset(z90cMsg_p, 0, tmp_size);
+ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
+ memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
+ tp6Hdr_p = (struct type6_hdr *)tgt_p;
+ tp6Hdr_p->ToCardLen1 = total_CPRB_len;
+ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
+ memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
+ sizeof(static_PKE_function_code));
+ tgt_p += sizeof(struct type6_hdr);
+ memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
+ cprbx_p = (struct CPRBX *) tgt_p;
+ cprbx_p->domain = (unsigned short)cdx;
+ cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
+ tgt_p += sizeof(struct CPRBX);
+ if (dev_type == PCIXCC_MCL2)
+ memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
+ sizeof(struct function_and_rules_block));
+ else
+ memcpy(tgt_p, &static_pke_function_and_rulesX,
+ sizeof(struct function_and_rules_block));
+ tgt_p += sizeof(struct function_and_rules_block);
+
+ tgt_p += 2;
+ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
+ kfree(temp_exp);
+ return SEN_RELEASED;
+ }
+ if (is_empty(tgt_p, mod_len)) {
+ kfree(temp_exp);
+ return SEN_USER_ERROR;
+ }
+ tgt_p -= 2;
+ *((short *)tgt_p) = (short) vud_len;
+ tgt_p += vud_len;
+ keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
+ tgt_p += sizeof(struct T6_keyBlock_hdrX);
+ memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
+ key_p = (struct cca_public_key *)tgt_p;
+ temp = key_p->pubSec.exponent;
+ memcpy(temp, exp_p, exp_len);
+ kfree(temp_exp);
+ temp += exp_len;
+ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(temp, mod_len))
+ return SEN_USER_ERROR;
+ key_p->pubSec.modulus_bit_len = 8 * mod_len;
+ key_p->pubSec.modulus_byte_len = mod_len;
+ key_p->pubSec.exponent_len = exp_len;
+ key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
+ key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
+ key_p->pubHdr.token_length = key_len;
+ key_len += 4;
+ keyb_p->ulen = (unsigned short)key_len;
+ key_len += 2;
+ keyb_p->blen = (unsigned short)key_len;
+ cprbx_p->req_parml = parmBlock_l;
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
+ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
+ int dev_type)
+{
+ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
+ int long_len, pad_len, keyPartsLen, tmp_l;
+ unsigned char *tgt_p, *temp;
+ struct type6_hdr *tp6Hdr_p;
+ struct CPRBX *cprbx_p;
+ struct cca_token_hdr *keyHdr_p;
+ struct cca_pvt_ext_CRT_sec *pvtSec_p;
+ struct cca_public_sec *pubSec_p;
+
+ mod_len = icaMsg_p->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = 8 + short_len;
+ keyPartsLen = 3 * long_len + 2 * short_len;
+ pad_len = (8 - (keyPartsLen % 8)) % 8;
+ keyPartsLen += pad_len + mod_len;
+ tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
+ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
+ parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
+ vud_len = 2 + mod_len;
+ tmp_size = tmp_size + CALLER_HEADER;
+ memset(z90cMsg_p, 0, tmp_size);
+ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
+ memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
+ tp6Hdr_p = (struct type6_hdr *)tgt_p;
+ tp6Hdr_p->ToCardLen1 = total_CPRB_len;
+ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
+ tgt_p += sizeof(struct type6_hdr);
+ cprbx_p = (struct CPRBX *) tgt_p;
+ memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
+ cprbx_p->domain = (unsigned short)cdx;
+ cprbx_p->req_parml = parmBlock_l;
+ cprbx_p->rpl_msgbl = parmBlock_l;
+ tgt_p += sizeof(struct CPRBX);
+ if (dev_type == PCIXCC_MCL2)
+ memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
+ sizeof(struct function_and_rules_block));
+ else
+ memcpy(tgt_p, &static_pkd_function_and_rulesX,
+ sizeof(struct function_and_rules_block));
+ tgt_p += sizeof(struct function_and_rules_block);
+ *((short *)tgt_p) = (short) vud_len;
+ tgt_p += 2;
+ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, mod_len))
+ return SEN_USER_ERROR;
+ tgt_p += mod_len;
+ tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
+ sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
+ *((short *)tgt_p) = (short) tmp_l;
+ temp = tgt_p + 2;
+ tmp_l -= 2;
+ *((short *)temp) = (short) tmp_l;
+ tgt_p += sizeof(struct T6_keyBlock_hdr);
+ keyHdr_p = (struct cca_token_hdr *)tgt_p;
+ keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
+ tmp_l -= 4;
+ keyHdr_p->token_length = tmp_l;
+ tgt_p += sizeof(struct cca_token_hdr);
+ pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
+ pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+ pvtSec_p->section_length =
+ sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
+ pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+ pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
+ pvtSec_p->p_len = long_len;
+ pvtSec_p->q_len = short_len;
+ pvtSec_p->dp_len = long_len;
+ pvtSec_p->dq_len = short_len;
+ pvtSec_p->u_len = long_len;
+ pvtSec_p->mod_len = mod_len;
+ pvtSec_p->pad_len = pad_len;
+ tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
+ if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, short_len))
+ return SEN_USER_ERROR;
+ tgt_p += short_len;
+ if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, short_len))
+ return SEN_USER_ERROR;
+ tgt_p += short_len;
+ if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
+ return SEN_RELEASED;
+ if (is_empty(tgt_p, long_len))
+ return SEN_USER_ERROR;
+ tgt_p += long_len;
+ tgt_p += pad_len;
+ memset(tgt_p, 0xFF, mod_len);
+ tgt_p += mod_len;
+ memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
+ pubSec_p = (struct cca_public_sec *) tgt_p;
+ pubSec_p->modulus_bit_len = 8 * mod_len;
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
+int
+convert_request(unsigned char *buffer, int func, unsigned short function,
+ int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
+{
+ if (dev_type == PCICA) {
+ if (func == ICARSACRT)
+ return ICACRT_msg_to_type4CRT_msg(
+ (struct ica_rsa_modexpo_crt *) buffer,
+ msg_l_p, (union type4_msg *) msg_p);
+ else
+ return ICAMEX_msg_to_type4MEX_msg(
+ (struct ica_rsa_modexpo *) buffer,
+ msg_l_p, (union type4_msg *) msg_p);
+ }
+ if (dev_type == PCICC) {
+ if (func == ICARSACRT)
+ return ICACRT_msg_to_type6CRT_msg(
+ (struct ica_rsa_modexpo_crt *) buffer,
+ cdx, msg_l_p, (struct type6_msg *)msg_p);
+ if (function == PCI_FUNC_KEY_ENCRYPT)
+ return ICAMEX_msg_to_type6MEX_en_msg(
+ (struct ica_rsa_modexpo *) buffer,
+ cdx, msg_l_p, (struct type6_msg *) msg_p);
+ else
+ return ICAMEX_msg_to_type6MEX_de_msg(
+ (struct ica_rsa_modexpo *) buffer,
+ cdx, msg_l_p, (struct type6_msg *) msg_p);
+ }
+ if ((dev_type == PCIXCC_MCL2) ||
+ (dev_type == PCIXCC_MCL3) ||
+ (dev_type == CEX2C)) {
+ if (func == ICARSACRT)
+ return ICACRT_msg_to_type6CRT_msgX(
+ (struct ica_rsa_modexpo_crt *) buffer,
+ cdx, msg_l_p, (struct type6_msg *) msg_p,
+ dev_type);
+ else
+ return ICAMEX_msg_to_type6MEX_msgX(
+ (struct ica_rsa_modexpo *) buffer,
+ cdx, msg_l_p, (struct type6_msg *) msg_p,
+ dev_type);
+ }
+
+ return 0;
+}
+
+int ext_bitlens_msg_count = 0;
+static inline void
+unset_ext_bitlens(void)
+{
+ if (!ext_bitlens_msg_count) {
+ PRINTK("Unable to use coprocessors for extended bitlengths. "
+ "Using PCICAs (if present) for extended bitlengths. "
+ "This is not an error.\n");
+ ext_bitlens_msg_count++;
+ }
+ ext_bitlens = 0;
+}
+
+int
+convert_response(unsigned char *response, unsigned char *buffer,
+ int *respbufflen_p, unsigned char *resp_buff)
+{
+ struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
+ struct type82_hdr *t82h_p = (struct type82_hdr *) response;
+ struct type84_hdr *t84h_p = (struct type84_hdr *) response;
+ struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
+ int reply_code, service_rc, service_rs, src_l;
+ unsigned char *src_p, *tgt_p;
+ struct CPRB *cprb_p;
+ struct CPRBX *cprbx_p;
+
+ src_p = 0;
+ reply_code = 0;
+ service_rc = 0;
+ service_rs = 0;
+ src_l = 0;
+ switch (t82h_p->type) {
+ case TYPE82_RSP_CODE:
+ reply_code = t82h_p->reply_code;
+ src_p = (unsigned char *)t82h_p;
+ PRINTK("Hardware error: Type 82 Message Header: "
+ "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ src_p[0], src_p[1], src_p[2], src_p[3],
+ src_p[4], src_p[5], src_p[6], src_p[7]);
+ break;
+ case TYPE84_RSP_CODE:
+ src_l = icaMsg_p->outputdatalength;
+ src_p = response + (int)t84h_p->len - src_l;
+ break;
+ case TYPE86_RSP_CODE:
+ reply_code = t86m_p->hdr.reply_code;
+ if (reply_code != 0)
+ break;
+ cprb_p = (struct CPRB *)
+ (response + sizeof(struct type86_fmt2_msg));
+ cprbx_p = (struct CPRBX *) cprb_p;
+ if (cprb_p->cprb_ver_id != 0x02) {
+ le2toI(cprb_p->ccp_rtcode, &service_rc);
+ if (service_rc != 0) {
+ le2toI(cprb_p->ccp_rscode, &service_rs);
+ if ((service_rc == 8) && (service_rs == 66))
+ PDEBUG("Bad block format on PCICC\n");
+ else if ((service_rc == 8) && (service_rs == 770)) {
+ PDEBUG("Invalid key length on PCICC\n");
+ unset_ext_bitlens();
+ return REC_USE_PCICA;
+ }
+ else if ((service_rc == 8) && (service_rs == 783)) {
+ PDEBUG("Extended bitlengths not enabled"
+ "on PCICC\n");
+ unset_ext_bitlens();
+ return REC_USE_PCICA;
+ }
+ else
+ PRINTK("service rc/rs: %d/%d\n",
+ service_rc, service_rs);
+ return REC_OPERAND_INV;
+ }
+ src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
+ src_p += 4;
+ le2toI(src_p, &src_l);
+ src_l -= 2;
+ src_p += 2;
+ } else {
+ service_rc = (int)cprbx_p->ccp_rtcode;
+ if (service_rc != 0) {
+ service_rs = (int) cprbx_p->ccp_rscode;
+ if ((service_rc == 8) && (service_rs == 66))
+ PDEBUG("Bad block format on PCXICC\n");
+ else if ((service_rc == 8) && (service_rs == 770)) {
+ PDEBUG("Invalid key length on PCIXCC\n");
+ unset_ext_bitlens();
+ return REC_USE_PCICA;
+ }
+ else if ((service_rc == 8) && (service_rs == 783)) {
+ PDEBUG("Extended bitlengths not enabled"
+ "on PCIXCC\n");
+ unset_ext_bitlens();
+ return REC_USE_PCICA;
+ }
+ else
+ PRINTK("service rc/rs: %d/%d\n",
+ service_rc, service_rs);
+ return REC_OPERAND_INV;
+ }
+ src_p = (unsigned char *)
+ cprbx_p + sizeof(struct CPRBX);
+ src_p += 4;
+ src_l = (int)(*((short *) src_p));
+ src_l -= 2;
+ src_p += 2;
+ }
+ break;
+ default:
+ return REC_BAD_MESSAGE;
+ }
+
+ if (reply_code)
+ switch (reply_code) {
+ case REPLY_ERROR_OPERAND_INVALID:
+ return REC_OPERAND_INV;
+ case REPLY_ERROR_OPERAND_SIZE:
+ return REC_OPERAND_SIZE;
+ case REPLY_ERROR_EVEN_MOD_IN_OPND:
+ return REC_EVEN_MOD;
+ case REPLY_ERROR_MESSAGE_TYPE:
+ return WRONG_DEVICE_TYPE;
+ case REPLY_ERROR_TRANSPORT_FAIL:
+ PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
+ t86m_p->apfs[0], t86m_p->apfs[1],
+ t86m_p->apfs[2], t86m_p->apfs[3]);
+ return REC_HARDWAR_ERR;
+ default:
+ PRINTKW("reply code = %d\n", reply_code);
+ return REC_HARDWAR_ERR;
+ }
+
+ if (service_rc != 0)
+ return REC_OPERAND_INV;
+
+ if ((src_l > icaMsg_p->outputdatalength) ||
+ (src_l > RESPBUFFSIZE) ||
+ (src_l <= 0))
+ return REC_OPERAND_SIZE;
+
+ PDEBUG("Length returned = %d\n", src_l);
+ tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
+ memcpy(tgt_p, src_p, src_l);
+ if ((t82h_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
+ memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
+ if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
+ return REC_INVALID_PAD;
+ }
+ *respbufflen_p = icaMsg_p->outputdatalength;
+ if (*respbufflen_p == 0)
+ PRINTK("Zero *respbufflen_p\n");
+
+ return 0;
+}
+
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
new file mode 100644
index 000000000000..a98c00c02559
--- /dev/null
+++ b/drivers/s390/crypto/z90main.c
@@ -0,0 +1,3563 @@
+/*
+ * linux/drivers/s390/crypto/z90main.c
+ *
+ * z90crypt 1.3.2
+ *
+ * Copyright (C) 2001, 2004 IBM Corporation
+ * Author(s): Robert Burroughs (burrough@us.ibm.com)
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/uaccess.h> // copy_(from|to)_user
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/delay.h> // mdelay
+#include <linux/init.h>
+#include <linux/interrupt.h> // for tasklets
+#include <linux/ioctl32.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kobject_uevent.h>
+#include <linux/proc_fs.h>
+#include <linux/syscalls.h>
+#include <linux/version.h>
+#include "z90crypt.h"
+#include "z90common.h"
+#ifndef Z90CRYPT_USE_HOTPLUG
+#include <linux/miscdevice.h>
+#endif
+
+#define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
+#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
+# error "This kernel is too old: not supported"
+#endif
+#if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
+# error "This kernel is too recent: not supported by this file"
+#endif
+
+#define VERSION_Z90MAIN_C "$Revision: 1.57 $"
+
+static char z90main_version[] __initdata =
+ "z90main.o (" VERSION_Z90MAIN_C "/"
+ VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
+
+extern char z90hardware_version[];
+
+/**
+ * Defaults that may be modified.
+ */
+
+#ifndef Z90CRYPT_USE_HOTPLUG
+/**
+ * You can specify a different minor at compile time.
+ */
+#ifndef Z90CRYPT_MINOR
+#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
+#endif
+#else
+/**
+ * You can specify a different major at compile time.
+ */
+#ifndef Z90CRYPT_MAJOR
+#define Z90CRYPT_MAJOR 0
+#endif
+#endif
+
+/**
+ * You can specify a different domain at compile time or on the insmod
+ * command line.
+ */
+#ifndef DOMAIN_INDEX
+#define DOMAIN_INDEX -1
+#endif
+
+/**
+ * This is the name under which the device is registered in /proc/modules.
+ */
+#define REG_NAME "z90crypt"
+
+/**
+ * Cleanup should run every CLEANUPTIME seconds and should clean up requests
+ * older than CLEANUPTIME seconds in the past.
+ */
+#ifndef CLEANUPTIME
+#define CLEANUPTIME 20
+#endif
+
+/**
+ * Config should run every CONFIGTIME seconds
+ */
+#ifndef CONFIGTIME
+#define CONFIGTIME 30
+#endif
+
+/**
+ * The first execution of the config task should take place
+ * immediately after initialization
+ */
+#ifndef INITIAL_CONFIGTIME
+#define INITIAL_CONFIGTIME 1
+#endif
+
+/**
+ * Reader should run every READERTIME milliseconds
+ * With the 100Hz patch for s390, z90crypt can lock the system solid while
+ * under heavy load. We'll try to avoid that.
+ */
+#ifndef READERTIME
+#if HZ > 1000
+#define READERTIME 2
+#else
+#define READERTIME 10
+#endif
+#endif
+
+/**
+ * turn long device array index into device pointer
+ */
+#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
+
+/**
+ * turn short device array index into long device array index
+ */
+#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
+
+/**
+ * turn short device array index into device pointer
+ */
+#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
+
+/**
+ * Status for a work-element
+ */
+#define STAT_DEFAULT 0x00 // request has not been processed
+
+#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
+ // else, device is determined each write
+#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
+ // before being sent to the hardware.
+#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
+// 0x20 // UNUSED state
+#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
+#define STAT_NOWORK 0x00 // bits off: no work on any queue
+#define STAT_RDWRMASK 0x30 // mask for bits 5-4
+
+/**
+ * Macros to check the status RDWRMASK
+ */
+#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
+#define SET_RDWRMASK(statbyte, newval) \
+ {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
+
+/**
+ * Audit Trail. Progress of a Work element
+ * audit[0]: Unless noted otherwise, these bits are all set by the process
+ */
+#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
+#define FP_BUFFREQ 0x40 // Low Level buffer requested
+#define FP_BUFFGOT 0x20 // Low Level buffer obtained
+#define FP_SENT 0x10 // Work element sent to a crypto device
+ // (may be set by process or by reader task)
+#define FP_PENDING 0x08 // Work element placed on pending queue
+ // (may be set by process or by reader task)
+#define FP_REQUEST 0x04 // Work element placed on request queue
+#define FP_ASLEEP 0x02 // Work element about to sleep
+#define FP_AWAKE 0x01 // Work element has been awakened
+
+/**
+ * audit[1]: These bits are set by the reader task and/or the cleanup task
+ */
+#define FP_NOTPENDING 0x80 // Work element removed from pending queue
+#define FP_AWAKENING 0x40 // Caller about to be awakened
+#define FP_TIMEDOUT 0x20 // Caller timed out
+#define FP_RESPSIZESET 0x10 // Response size copied to work element
+#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
+#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
+#define FP_REMREQUEST 0x02 // Work element removed from request queue
+#define FP_SIGNALED 0x01 // Work element was awakened by a signal
+
+/**
+ * audit[2]: unused
+ */
+
+/**
+ * state of the file handle in private_data.status
+ */
+#define STAT_OPEN 0
+#define STAT_CLOSED 1
+
+/**
+ * PID() expands to the process ID of the current process
+ */
+#define PID() (current->pid)
+
+/**
+ * Selected Constants. The number of APs and the number of devices
+ */
+#ifndef Z90CRYPT_NUM_APS
+#define Z90CRYPT_NUM_APS 64
+#endif
+#ifndef Z90CRYPT_NUM_DEVS
+#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
+#endif
+
+/**
+ * Buffer size for receiving responses. The maximum Response Size
+ * is actually the maximum request size, since in an error condition
+ * the request itself may be returned unchanged.
+ */
+#define MAX_RESPONSE_SIZE 0x0000077C
+
+/**
+ * A count and status-byte mask
+ */
+struct status {
+ int st_count; // # of enabled devices
+ int disabled_count; // # of disabled devices
+ int user_disabled_count; // # of devices disabled via proc fs
+ unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
+};
+
+/**
+ * The array of device indexes is a mechanism for fast indexing into
+ * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
+ * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
+ * z90CDeviceIndex[2] is 47.
+ */
+struct device_x {
+ int device_index[Z90CRYPT_NUM_DEVS];
+};
+
+/**
+ * All devices are arranged in a single array: 64 APs
+ */
+struct device {
+ int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
+ // PCIXCC_MCL3, CEX2C
+ enum devstat dev_stat; // current device status
+ int dev_self_x; // Index in array
+ int disabled; // Set when device is in error
+ int user_disabled; // Set when device is disabled by user
+ int dev_q_depth; // q depth
+ unsigned char * dev_resp_p; // Response buffer address
+ int dev_resp_l; // Response Buffer length
+ int dev_caller_count; // Number of callers
+ int dev_total_req_cnt; // # requests for device since load
+ struct list_head dev_caller_list; // List of callers
+};
+
+/**
+ * There's a struct status and a struct device_x for each device type.
+ */
+struct hdware_block {
+ struct status hdware_mask;
+ struct status type_mask[Z90CRYPT_NUM_TYPES];
+ struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
+ unsigned char device_type_array[Z90CRYPT_NUM_APS];
+};
+
+/**
+ * z90crypt is the topmost data structure in the hierarchy.
+ */
+struct z90crypt {
+ int max_count; // Nr of possible crypto devices
+ struct status mask;
+ int q_depth_array[Z90CRYPT_NUM_DEVS];
+ int dev_type_array[Z90CRYPT_NUM_DEVS];
+ struct device_x overall_device_x; // array device indexes
+ struct device * device_p[Z90CRYPT_NUM_DEVS];
+ int terminating;
+ int domain_established;// TRUE: domain has been found
+ int cdx; // Crypto Domain Index
+ int len; // Length of this data structure
+ struct hdware_block *hdware_info;
+};
+
+/**
+ * An array of these structures is pointed to from dev_caller
+ * The length of the array depends on the device type. For APs,
+ * there are 8.
+ *
+ * The caller buffer is allocated to the user at OPEN. At WRITE,
+ * it contains the request; at READ, the response. The function
+ * send_to_crypto_device converts the request to device-dependent
+ * form and use the caller's OPEN-allocated buffer for the response.
+ */
+struct caller {
+ int caller_buf_l; // length of original request
+ unsigned char * caller_buf_p; // Original request on WRITE
+ int caller_dev_dep_req_l; // len device dependent request
+ unsigned char * caller_dev_dep_req_p; // Device dependent form
+ unsigned char caller_id[8]; // caller-supplied message id
+ struct list_head caller_liste;
+ unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
+};
+
+/**
+ * Function prototypes from z90hardware.c
+ */
+enum hdstat query_online(int, int, int, int *, int *);
+enum devstat reset_device(int, int, int);
+enum devstat send_to_AP(int, int, int, unsigned char *);
+enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
+int convert_request(unsigned char *, int, short, int, int, int *,
+ unsigned char *);
+int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
+
+/**
+ * Low level function prototypes
+ */
+static int create_z90crypt(int *);
+static int refresh_z90crypt(int *);
+static int find_crypto_devices(struct status *);
+static int create_crypto_device(int);
+static int destroy_crypto_device(int);
+static void destroy_z90crypt(void);
+static int refresh_index_array(struct status *, struct device_x *);
+static int probe_device_type(struct device *);
+static int probe_PCIXCC_type(struct device *);
+
+/**
+ * proc fs definitions
+ */
+static struct proc_dir_entry *z90crypt_entry;
+
+/**
+ * data structures
+ */
+
+/**
+ * work_element.opener points back to this structure
+ */
+struct priv_data {
+ pid_t opener_pid;
+ unsigned char status; // 0: open 1: closed
+};
+
+/**
+ * A work element is allocated for each request
+ */
+struct work_element {
+ struct priv_data *priv_data;
+ pid_t pid;
+ int devindex; // index of device processing this w_e
+ // (If request did not specify device,
+ // -1 until placed onto a queue)
+ int devtype;
+ struct list_head liste; // used for requestq and pendingq
+ char buffer[128]; // local copy of user request
+ int buff_size; // size of the buffer for the request
+ char resp_buff[RESPBUFFSIZE];
+ int resp_buff_size;
+ char __user * resp_addr; // address of response in user space
+ unsigned int funccode; // function code of request
+ wait_queue_head_t waitq;
+ unsigned long requestsent; // time at which the request was sent
+ atomic_t alarmrung; // wake-up signal
+ unsigned char caller_id[8]; // pid + counter, for this w_e
+ unsigned char status[1]; // bits to mark status of the request
+ unsigned char audit[3]; // record of work element's progress
+ unsigned char * requestptr; // address of request buffer
+ int retcode; // return code of request
+};
+
+/**
+ * High level function prototypes
+ */
+static int z90crypt_open(struct inode *, struct file *);
+static int z90crypt_release(struct inode *, struct file *);
+static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t z90crypt_write(struct file *, const char __user *,
+ size_t, loff_t *);
+static int z90crypt_ioctl(struct inode *, struct file *,
+ unsigned int, unsigned long);
+
+static void z90crypt_reader_task(unsigned long);
+static void z90crypt_schedule_reader_task(unsigned long);
+static void z90crypt_config_task(unsigned long);
+static void z90crypt_cleanup_task(unsigned long);
+
+static int z90crypt_status(char *, char **, off_t, int, int *, void *);
+static int z90crypt_status_write(struct file *, const char __user *,
+ unsigned long, void *);
+
+/**
+ * Hotplug support
+ */
+
+#ifdef Z90CRYPT_USE_HOTPLUG
+#define Z90CRYPT_HOTPLUG_ADD 1
+#define Z90CRYPT_HOTPLUG_REMOVE 2
+
+static void z90crypt_hotplug_event(int, int, int);
+#endif
+
+/**
+ * Storage allocated at initialization and used throughout the life of
+ * this insmod
+ */
+#ifdef Z90CRYPT_USE_HOTPLUG
+static int z90crypt_major = Z90CRYPT_MAJOR;
+#endif
+
+static int domain = DOMAIN_INDEX;
+static struct z90crypt z90crypt;
+static int quiesce_z90crypt;
+static spinlock_t queuespinlock;
+static struct list_head request_list;
+static int requestq_count;
+static struct list_head pending_list;
+static int pendingq_count;
+
+static struct tasklet_struct reader_tasklet;
+static struct timer_list reader_timer;
+static struct timer_list config_timer;
+static struct timer_list cleanup_timer;
+static atomic_t total_open;
+static atomic_t z90crypt_step;
+
+static struct file_operations z90crypt_fops = {
+ .owner = THIS_MODULE,
+ .read = z90crypt_read,
+ .write = z90crypt_write,
+ .ioctl = z90crypt_ioctl,
+ .open = z90crypt_open,
+ .release = z90crypt_release
+};
+
+#ifndef Z90CRYPT_USE_HOTPLUG
+static struct miscdevice z90crypt_misc_device = {
+ .minor = Z90CRYPT_MINOR,
+ .name = DEV_NAME,
+ .fops = &z90crypt_fops,
+ .devfs_name = DEV_NAME
+};
+#endif
+
+/**
+ * Documentation values.
+ */
+MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
+ "and Jochen Roehrig");
+MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
+ "Copyright 2001, 2004 IBM Corporation");
+MODULE_LICENSE("GPL");
+module_param(domain, int, 0);
+MODULE_PARM_DESC(domain, "domain index for device");
+
+#ifdef CONFIG_COMPAT
+/**
+ * ioctl32 conversion routines
+ */
+struct ica_rsa_modexpo_32 { // For 32-bit callers
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t b_key;
+ compat_uptr_t n_modulus;
+};
+
+static int
+trans_modexpo32(unsigned int fd, unsigned int cmd, unsigned long arg,
+ struct file *file)
+{
+ struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
+ struct ica_rsa_modexpo_32 mex32k;
+ struct ica_rsa_modexpo __user *mex64;
+ int ret = 0;
+ unsigned int i;
+
+ if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
+ return -EFAULT;
+ mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
+ if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
+ return -EFAULT;
+ if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
+ return -EFAULT;
+ if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
+ __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
+ __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
+ __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
+ __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
+ __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
+ return -EFAULT;
+ ret = sys_ioctl(fd, cmd, (unsigned long)mex64);
+ if (!ret)
+ if (__get_user(i, &mex64->outputdatalength) ||
+ __put_user(i, &mex32u->outputdatalength))
+ ret = -EFAULT;
+ return ret;
+}
+
+struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t bp_key;
+ compat_uptr_t bq_key;
+ compat_uptr_t np_prime;
+ compat_uptr_t nq_prime;
+ compat_uptr_t u_mult_inv;
+};
+
+static int
+trans_modexpo_crt32(unsigned int fd, unsigned int cmd, unsigned long arg,
+ struct file *file)
+{
+ struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
+ struct ica_rsa_modexpo_crt_32 crt32k;
+ struct ica_rsa_modexpo_crt __user *crt64;
+ int ret = 0;
+ unsigned int i;
+
+ if (!access_ok(VERIFY_WRITE, crt32u,
+ sizeof(struct ica_rsa_modexpo_crt_32)))
+ return -EFAULT;
+ crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
+ if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
+ return -EFAULT;
+ if (copy_from_user(&crt32k, crt32u,
+ sizeof(struct ica_rsa_modexpo_crt_32)))
+ return -EFAULT;
+ if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
+ __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
+ __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
+ __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
+ __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
+ __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
+ __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
+ __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
+ __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
+ ret = -EFAULT;
+ if (!ret)
+ ret = sys_ioctl(fd, cmd, (unsigned long)crt64);
+ if (!ret)
+ if (__get_user(i, &crt64->outputdatalength) ||
+ __put_user(i, &crt32u->outputdatalength))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int compatible_ioctls[] = {
+ ICAZ90STATUS, Z90QUIESCE, Z90STAT_TOTALCOUNT, Z90STAT_PCICACOUNT,
+ Z90STAT_PCICCCOUNT, Z90STAT_PCIXCCCOUNT, Z90STAT_PCIXCCMCL2COUNT,
+ Z90STAT_PCIXCCMCL3COUNT, Z90STAT_CEX2CCOUNT, Z90STAT_REQUESTQ_COUNT,
+ Z90STAT_PENDINGQ_COUNT, Z90STAT_TOTALOPEN_COUNT, Z90STAT_DOMAIN_INDEX,
+ Z90STAT_STATUS_MASK, Z90STAT_QDEPTH_MASK, Z90STAT_PERDEV_REQCNT,
+};
+
+static void z90_unregister_ioctl32s(void)
+{
+ int i;
+
+ unregister_ioctl32_conversion(ICARSAMODEXPO);
+ unregister_ioctl32_conversion(ICARSACRT);
+
+ for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++)
+ unregister_ioctl32_conversion(compatible_ioctls[i]);
+}
+
+static int z90_register_ioctl32s(void)
+{
+ int result, i;
+
+ result = register_ioctl32_conversion(ICARSAMODEXPO, trans_modexpo32);
+ if (result == -EBUSY) {
+ unregister_ioctl32_conversion(ICARSAMODEXPO);
+ result = register_ioctl32_conversion(ICARSAMODEXPO,
+ trans_modexpo32);
+ }
+ if (result)
+ return result;
+ result = register_ioctl32_conversion(ICARSACRT, trans_modexpo_crt32);
+ if (result == -EBUSY) {
+ unregister_ioctl32_conversion(ICARSACRT);
+ result = register_ioctl32_conversion(ICARSACRT,
+ trans_modexpo_crt32);
+ }
+ if (result)
+ return result;
+
+ for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++) {
+ result = register_ioctl32_conversion(compatible_ioctls[i], 0);
+ if (result == -EBUSY) {
+ unregister_ioctl32_conversion(compatible_ioctls[i]);
+ result = register_ioctl32_conversion(
+ compatible_ioctls[i], 0);
+ }
+ if (result)
+ return result;
+ }
+ return 0;
+}
+#else // !CONFIG_COMPAT
+static inline void z90_unregister_ioctl32s(void)
+{
+}
+
+static inline int z90_register_ioctl32s(void)
+{
+ return 0;
+}
+#endif
+
+/**
+ * The module initialization code.
+ */
+static int __init
+z90crypt_init_module(void)
+{
+ int result, nresult;
+ struct proc_dir_entry *entry;
+
+ PDEBUG("PID %d\n", PID());
+
+ if ((domain < -1) || (domain > 15)) {
+ PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
+ return -EINVAL;
+ }
+
+#ifndef Z90CRYPT_USE_HOTPLUG
+ /* Register as misc device with given minor (or get a dynamic one). */
+ result = misc_register(&z90crypt_misc_device);
+ if (result < 0) {
+ PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
+ z90crypt_misc_device.minor, result);
+ return result;
+ }
+#else
+ /* Register the major (or get a dynamic one). */
+ result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
+ if (result < 0) {
+ PRINTKW("register_chrdev (major %d) failed with %d.\n",
+ z90crypt_major, result);
+ return result;
+ }
+
+ if (z90crypt_major == 0)
+ z90crypt_major = result;
+#endif
+
+ PDEBUG("Registered " DEV_NAME " with result %d\n", result);
+
+ result = create_z90crypt(&domain);
+ if (result != 0) {
+ PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
+ domain, result);
+ result = -ENOMEM;
+ goto init_module_cleanup;
+ }
+
+ if (result == 0) {
+ PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
+ z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
+ __DATE__, __TIME__);
+ PRINTKN("%s\n", z90main_version);
+ PRINTKN("%s\n", z90hardware_version);
+ PDEBUG("create_z90crypt (domain index %d) successful.\n",
+ domain);
+ } else
+ PRINTK("No devices at startup\n");
+
+#ifdef Z90CRYPT_USE_HOTPLUG
+ /* generate hotplug event for device node generation */
+ z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
+#endif
+
+ /* Initialize globals. */
+ spin_lock_init(&queuespinlock);
+
+ INIT_LIST_HEAD(&pending_list);
+ pendingq_count = 0;
+
+ INIT_LIST_HEAD(&request_list);
+ requestq_count = 0;
+
+ quiesce_z90crypt = 0;
+
+ atomic_set(&total_open, 0);
+ atomic_set(&z90crypt_step, 0);
+
+ /* Set up the cleanup task. */
+ init_timer(&cleanup_timer);
+ cleanup_timer.function = z90crypt_cleanup_task;
+ cleanup_timer.data = 0;
+ cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
+ add_timer(&cleanup_timer);
+
+ /* Set up the proc file system */
+ entry = create_proc_entry("driver/z90crypt", 0644, 0);
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = 0;
+ entry->read_proc = z90crypt_status;
+ entry->write_proc = z90crypt_status_write;
+ }
+ else
+ PRINTK("Couldn't create z90crypt proc entry\n");
+ z90crypt_entry = entry;
+
+ /* Set up the configuration task. */
+ init_timer(&config_timer);
+ config_timer.function = z90crypt_config_task;
+ config_timer.data = 0;
+ config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
+ add_timer(&config_timer);
+
+ /* Set up the reader task */
+ tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
+ init_timer(&reader_timer);
+ reader_timer.function = z90crypt_schedule_reader_task;
+ reader_timer.data = 0;
+ reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
+ add_timer(&reader_timer);
+
+ if ((result = z90_register_ioctl32s()))
+ goto init_module_cleanup;
+
+ return 0; // success
+
+init_module_cleanup:
+ z90_unregister_ioctl32s();
+
+#ifndef Z90CRYPT_USE_HOTPLUG
+ if ((nresult = misc_deregister(&z90crypt_misc_device)))
+ PRINTK("misc_deregister failed with %d.\n", nresult);
+ else
+ PDEBUG("misc_deregister successful.\n");
+#else
+ if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
+ PRINTK("unregister_chrdev failed with %d.\n", nresult);
+ else
+ PDEBUG("unregister_chrdev successful.\n");
+#endif
+
+ return result; // failure
+}
+
+/**
+ * The module termination code
+ */
+static void __exit
+z90crypt_cleanup_module(void)
+{
+ int nresult;
+
+ PDEBUG("PID %d\n", PID());
+
+ z90_unregister_ioctl32s();
+
+ remove_proc_entry("driver/z90crypt", 0);
+
+#ifndef Z90CRYPT_USE_HOTPLUG
+ if ((nresult = misc_deregister(&z90crypt_misc_device)))
+ PRINTK("misc_deregister failed with %d.\n", nresult);
+ else
+ PDEBUG("misc_deregister successful.\n");
+#else
+ z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
+
+ if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
+ PRINTK("unregister_chrdev failed with %d.\n", nresult);
+ else
+ PDEBUG("unregister_chrdev successful.\n");
+#endif
+
+ /* Remove the tasks */
+ tasklet_kill(&reader_tasklet);
+ del_timer(&reader_timer);
+ del_timer(&config_timer);
+ del_timer(&cleanup_timer);
+
+ destroy_z90crypt();
+
+ PRINTKN("Unloaded.\n");
+}
+
+/**
+ * Functions running under a process id
+ *
+ * The I/O functions:
+ * z90crypt_open
+ * z90crypt_release
+ * z90crypt_read
+ * z90crypt_write
+ * z90crypt_ioctl
+ * z90crypt_status
+ * z90crypt_status_write
+ * disable_card
+ * enable_card
+ * scan_char
+ * scan_string
+ *
+ * Helper functions:
+ * z90crypt_rsa
+ * z90crypt_prepare
+ * z90crypt_send
+ * z90crypt_process_results
+ *
+ */
+static int
+z90crypt_open(struct inode *inode, struct file *filp)
+{
+ struct priv_data *private_data_p;
+
+ if (quiesce_z90crypt)
+ return -EQUIESCE;
+
+ private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
+ if (!private_data_p) {
+ PRINTK("Memory allocate failed\n");
+ return -ENOMEM;
+ }
+
+ memset((void *)private_data_p, 0, sizeof(struct priv_data));
+ private_data_p->status = STAT_OPEN;
+ private_data_p->opener_pid = PID();
+ filp->private_data = private_data_p;
+ atomic_inc(&total_open);
+
+ return 0;
+}
+
+static int
+z90crypt_release(struct inode *inode, struct file *filp)
+{
+ struct priv_data *private_data_p = filp->private_data;
+
+ PDEBUG("PID %d (filp %p)\n", PID(), filp);
+
+ private_data_p->status = STAT_CLOSED;
+ memset(private_data_p, 0, sizeof(struct priv_data));
+ kfree(private_data_p);
+ atomic_dec(&total_open);
+
+ return 0;
+}
+
+/*
+ * there are two read functions, of which compile options will choose one
+ * without USE_GET_RANDOM_BYTES
+ * => read() always returns -EPERM;
+ * otherwise
+ * => read() uses get_random_bytes() kernel function
+ */
+#ifndef USE_GET_RANDOM_BYTES
+/**
+ * z90crypt_read will not be supported beyond z90crypt 1.3.1
+ */
+static ssize_t
+z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ PDEBUG("filp %p (PID %d)\n", filp, PID());
+ return -EPERM;
+}
+#else // we want to use get_random_bytes
+/**
+ * read() just returns a string of random bytes. Since we have no way
+ * to generate these cryptographically, we just execute get_random_bytes
+ * for the length specified.
+ */
+#include <linux/random.h>
+static ssize_t
+z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ unsigned char *temp_buff;
+
+ PDEBUG("filp %p (PID %d)\n", filp, PID());
+
+ if (quiesce_z90crypt)
+ return -EQUIESCE;
+ if (count < 0) {
+ PRINTK("Requested random byte count negative: %ld\n", count);
+ return -EINVAL;
+ }
+ if (count > RESPBUFFSIZE) {
+ PDEBUG("count[%d] > RESPBUFFSIZE", count);
+ return -EINVAL;
+ }
+ if (count == 0)
+ return 0;
+ temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
+ if (!temp_buff) {
+ PRINTK("Memory allocate failed\n");
+ return -ENOMEM;
+ }
+ get_random_bytes(temp_buff, count);
+
+ if (copy_to_user(buf, temp_buff, count) != 0) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ kfree(temp_buff);
+ return count;
+}
+#endif
+
+/**
+ * Write is is not allowed
+ */
+static ssize_t
+z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
+{
+ PDEBUG("filp %p (PID %d)\n", filp, PID());
+ return -EPERM;
+}
+
+/**
+ * New status functions
+ */
+static inline int
+get_status_totalcount(void)
+{
+ return z90crypt.hdware_info->hdware_mask.st_count;
+}
+
+static inline int
+get_status_PCICAcount(void)
+{
+ return z90crypt.hdware_info->type_mask[PCICA].st_count;
+}
+
+static inline int
+get_status_PCICCcount(void)
+{
+ return z90crypt.hdware_info->type_mask[PCICC].st_count;
+}
+
+static inline int
+get_status_PCIXCCcount(void)
+{
+ return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
+ z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
+}
+
+static inline int
+get_status_PCIXCCMCL2count(void)
+{
+ return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
+}
+
+static inline int
+get_status_PCIXCCMCL3count(void)
+{
+ return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
+}
+
+static inline int
+get_status_CEX2Ccount(void)
+{
+ return z90crypt.hdware_info->type_mask[CEX2C].st_count;
+}
+
+static inline int
+get_status_requestq_count(void)
+{
+ return requestq_count;
+}
+
+static inline int
+get_status_pendingq_count(void)
+{
+ return pendingq_count;
+}
+
+static inline int
+get_status_totalopen_count(void)
+{
+ return atomic_read(&total_open);
+}
+
+static inline int
+get_status_domain_index(void)
+{
+ return z90crypt.cdx;
+}
+
+static inline unsigned char *
+get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
+{
+ int i, ix;
+
+ memcpy(status, z90crypt.hdware_info->device_type_array,
+ Z90CRYPT_NUM_APS);
+
+ for (i = 0; i < get_status_totalcount(); i++) {
+ ix = SHRT2LONG(i);
+ if (LONG2DEVPTR(ix)->user_disabled)
+ status[ix] = 0x0d;
+ }
+
+ return status;
+}
+
+static inline unsigned char *
+get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
+{
+ int i, ix;
+
+ memset(qdepth, 0, Z90CRYPT_NUM_APS);
+
+ for (i = 0; i < get_status_totalcount(); i++) {
+ ix = SHRT2LONG(i);
+ qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
+ }
+
+ return qdepth;
+}
+
+static inline unsigned int *
+get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
+{
+ int i, ix;
+
+ memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
+
+ for (i = 0; i < get_status_totalcount(); i++) {
+ ix = SHRT2LONG(i);
+ reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
+ }
+
+ return reqcnt;
+}
+
+static inline void
+init_work_element(struct work_element *we_p,
+ struct priv_data *priv_data, pid_t pid)
+{
+ int step;
+
+ we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
+ /* Come up with a unique id for this caller. */
+ step = atomic_inc_return(&z90crypt_step);
+ memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
+ memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
+ we_p->pid = pid;
+ we_p->priv_data = priv_data;
+ we_p->status[0] = STAT_DEFAULT;
+ we_p->audit[0] = 0x00;
+ we_p->audit[1] = 0x00;
+ we_p->audit[2] = 0x00;
+ we_p->resp_buff_size = 0;
+ we_p->retcode = 0;
+ we_p->devindex = -1;
+ we_p->devtype = -1;
+ atomic_set(&we_p->alarmrung, 0);
+ init_waitqueue_head(&we_p->waitq);
+ INIT_LIST_HEAD(&(we_p->liste));
+}
+
+static inline int
+allocate_work_element(struct work_element **we_pp,
+ struct priv_data *priv_data_p, pid_t pid)
+{
+ struct work_element *we_p;
+
+ we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
+ if (!we_p)
+ return -ENOMEM;
+ init_work_element(we_p, priv_data_p, pid);
+ *we_pp = we_p;
+ return 0;
+}
+
+static inline void
+remove_device(struct device *device_p)
+{
+ if (!device_p || (device_p->disabled != 0))
+ return;
+ device_p->disabled = 1;
+ z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
+ z90crypt.hdware_info->hdware_mask.disabled_count++;
+}
+
+/**
+ * Bitlength limits for each card
+ *
+ * There are new MCLs which allow more bitlengths. See the table for details.
+ * The MCL must be applied and the newer bitlengths enabled for these to work.
+ *
+ * Card Type Old limit New limit
+ * PCICC 512-1024 512-2048
+ * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
+ * PCIXCC_MCL3 512-2048 128-2048
+ * CEX2C 512-2048 128-2048
+ *
+ * ext_bitlens (extended bitlengths) is a global, since you should not apply an
+ * MCL to just one card in a machine. We assume, at first, that all cards have
+ * these capabilities.
+ */
+int ext_bitlens = 1; // This is global
+#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
+#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
+#define PCICC_MIN_MOD_SIZE 64 // 512 bits
+#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
+#define MAX_MOD_SIZE 256 // 2048 bits
+
+static inline int
+select_device_type(int *dev_type_p, int bytelength)
+{
+ static int count = 0;
+ int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
+ struct status *stat;
+ if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
+ (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
+ (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
+ return -1;
+ if (*dev_type_p != ANYDEV) {
+ stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
+ if (stat->st_count >
+ (stat->disabled_count + stat->user_disabled_count))
+ return 0;
+ return -1;
+ }
+
+ /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
+ stat = &z90crypt.hdware_info->type_mask[PCICA];
+ PCICA_avail = stat->st_count -
+ (stat->disabled_count + stat->user_disabled_count);
+ stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
+ PCIXCC_MCL3_avail = stat->st_count -
+ (stat->disabled_count + stat->user_disabled_count);
+ stat = &z90crypt.hdware_info->type_mask[CEX2C];
+ CEX2C_avail = stat->st_count -
+ (stat->disabled_count + stat->user_disabled_count);
+ if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
+ /**
+ * bitlength is a factor, PCICA is the most capable, even with
+ * the new MCL.
+ */
+ if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
+ (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
+ if (!PCICA_avail)
+ return -1;
+ else {
+ *dev_type_p = PCICA;
+ return 0;
+ }
+ }
+
+ index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
+ CEX2C_avail);
+ if (index_to_use < PCICA_avail)
+ *dev_type_p = PCICA;
+ else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
+ *dev_type_p = PCIXCC_MCL3;
+ else
+ *dev_type_p = CEX2C;
+ count++;
+ return 0;
+ }
+
+ /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
+ if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
+ return -1;
+ stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
+ if (stat->st_count >
+ (stat->disabled_count + stat->user_disabled_count)) {
+ *dev_type_p = PCIXCC_MCL2;
+ return 0;
+ }
+
+ /**
+ * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
+ * (if we don't have the MCL applied and the newer bitlengths enabled)
+ * cannot go to a PCICC
+ */
+ if ((bytelength < PCICC_MIN_MOD_SIZE) ||
+ (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
+ return -1;
+ }
+ stat = &z90crypt.hdware_info->type_mask[PCICC];
+ if (stat->st_count >
+ (stat->disabled_count + stat->user_disabled_count)) {
+ *dev_type_p = PCICC;
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * Try the selected number, then the selected type (can be ANYDEV)
+ */
+static inline int
+select_device(int *dev_type_p, int *device_nr_p, int bytelength)
+{
+ int i, indx, devTp, low_count, low_indx;
+ struct device_x *index_p;
+ struct device *dev_ptr;
+
+ PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
+ if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
+ PDEBUG("trying index = %d\n", *device_nr_p);
+ dev_ptr = z90crypt.device_p[*device_nr_p];
+
+ if (dev_ptr &&
+ (dev_ptr->dev_stat != DEV_GONE) &&
+ (dev_ptr->disabled == 0) &&
+ (dev_ptr->user_disabled == 0)) {
+ PDEBUG("selected by number, index = %d\n",
+ *device_nr_p);
+ *dev_type_p = dev_ptr->dev_type;
+ return *device_nr_p;
+ }
+ }
+ *device_nr_p = -1;
+ PDEBUG("trying type = %d\n", *dev_type_p);
+ devTp = *dev_type_p;
+ if (select_device_type(&devTp, bytelength) == -1) {
+ PDEBUG("failed to select by type\n");
+ return -1;
+ }
+ PDEBUG("selected type = %d\n", devTp);
+ index_p = &z90crypt.hdware_info->type_x_addr[devTp];
+ low_count = 0x0000FFFF;
+ low_indx = -1;
+ for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
+ indx = index_p->device_index[i];
+ dev_ptr = z90crypt.device_p[indx];
+ if (dev_ptr &&
+ (dev_ptr->dev_stat != DEV_GONE) &&
+ (dev_ptr->disabled == 0) &&
+ (dev_ptr->user_disabled == 0) &&
+ (devTp == dev_ptr->dev_type) &&
+ (low_count > dev_ptr->dev_caller_count)) {
+ low_count = dev_ptr->dev_caller_count;
+ low_indx = indx;
+ }
+ }
+ *device_nr_p = low_indx;
+ return low_indx;
+}
+
+static inline int
+send_to_crypto_device(struct work_element *we_p)
+{
+ struct caller *caller_p;
+ struct device *device_p;
+ int dev_nr;
+ int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
+
+ if (!we_p->requestptr)
+ return SEN_FATAL_ERROR;
+ caller_p = (struct caller *)we_p->requestptr;
+ dev_nr = we_p->devindex;
+ if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
+ if (z90crypt.hdware_info->hdware_mask.st_count != 0)
+ return SEN_RETRY;
+ else
+ return SEN_NOT_AVAIL;
+ }
+ we_p->devindex = dev_nr;
+ device_p = z90crypt.device_p[dev_nr];
+ if (!device_p)
+ return SEN_NOT_AVAIL;
+ if (device_p->dev_type != we_p->devtype)
+ return SEN_RETRY;
+ if (device_p->dev_caller_count >= device_p->dev_q_depth)
+ return SEN_QUEUE_FULL;
+ PDEBUG("device number prior to send: %d\n", dev_nr);
+ switch (send_to_AP(dev_nr, z90crypt.cdx,
+ caller_p->caller_dev_dep_req_l,
+ caller_p->caller_dev_dep_req_p)) {
+ case DEV_SEN_EXCEPTION:
+ PRINTKC("Exception during send to device %d\n", dev_nr);
+ z90crypt.terminating = 1;
+ return SEN_FATAL_ERROR;
+ case DEV_GONE:
+ PRINTK("Device %d not available\n", dev_nr);
+ remove_device(device_p);
+ return SEN_NOT_AVAIL;
+ case DEV_EMPTY:
+ return SEN_NOT_AVAIL;
+ case DEV_NO_WORK:
+ return SEN_FATAL_ERROR;
+ case DEV_BAD_MESSAGE:
+ return SEN_USER_ERROR;
+ case DEV_QUEUE_FULL:
+ return SEN_QUEUE_FULL;
+ default:
+ case DEV_ONLINE:
+ break;
+ }
+ list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
+ device_p->dev_caller_count++;
+ return 0;
+}
+
+/**
+ * Send puts the user's work on one of two queues:
+ * the pending queue if the send was successful
+ * the request queue if the send failed because device full or busy
+ */
+static inline int
+z90crypt_send(struct work_element *we_p, const char *buf)
+{
+ int rv;
+
+ PDEBUG("PID %d\n", PID());
+
+ if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
+ PDEBUG("PID %d tried to send more work but has outstanding "
+ "work.\n", PID());
+ return -EWORKPEND;
+ }
+ we_p->devindex = -1; // Reset device number
+ spin_lock_irq(&queuespinlock);
+ rv = send_to_crypto_device(we_p);
+ switch (rv) {
+ case 0:
+ we_p->requestsent = jiffies;
+ we_p->audit[0] |= FP_SENT;
+ list_add_tail(&we_p->liste, &pending_list);
+ ++pendingq_count;
+ we_p->audit[0] |= FP_PENDING;
+ break;
+ case SEN_BUSY:
+ case SEN_QUEUE_FULL:
+ rv = 0;
+ we_p->devindex = -1; // any device will do
+ we_p->requestsent = jiffies;
+ list_add_tail(&we_p->liste, &request_list);
+ ++requestq_count;
+ we_p->audit[0] |= FP_REQUEST;
+ break;
+ case SEN_RETRY:
+ rv = -ERESTARTSYS;
+ break;
+ case SEN_NOT_AVAIL:
+ PRINTK("*** No devices available.\n");
+ rv = we_p->retcode = -ENODEV;
+ we_p->status[0] |= STAT_FAILED;
+ break;
+ case REC_OPERAND_INV:
+ case REC_OPERAND_SIZE:
+ case REC_EVEN_MOD:
+ case REC_INVALID_PAD:
+ rv = we_p->retcode = -EINVAL;
+ we_p->status[0] |= STAT_FAILED;
+ break;
+ default:
+ we_p->retcode = rv;
+ we_p->status[0] |= STAT_FAILED;
+ break;
+ }
+ if (rv != -ERESTARTSYS)
+ SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
+ spin_unlock_irq(&queuespinlock);
+ if (rv == 0)
+ tasklet_schedule(&reader_tasklet);
+ return rv;
+}
+
+/**
+ * process_results copies the user's work from kernel space.
+ */
+static inline int
+z90crypt_process_results(struct work_element *we_p, char __user *buf)
+{
+ int rv;
+
+ PDEBUG("we_p %p (PID %d)\n", we_p, PID());
+
+ LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
+ SET_RDWRMASK(we_p->status[0], STAT_READPEND);
+
+ rv = 0;
+ if (!we_p->buffer) {
+ PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
+ we_p, PID());
+ rv = -ENOBUFF;
+ }
+
+ if (!rv)
+ if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
+ PDEBUG("copy_to_user failed: rv = %d\n", rv);
+ rv = -EFAULT;
+ }
+
+ if (!rv)
+ rv = we_p->retcode;
+ if (!rv)
+ if (we_p->resp_buff_size
+ && copy_to_user(we_p->resp_addr, we_p->resp_buff,
+ we_p->resp_buff_size))
+ rv = -EFAULT;
+
+ SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
+ return rv;
+}
+
+static unsigned char NULL_psmid[8] =
+{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+/**
+ * Used in device configuration functions
+ */
+#define MAX_RESET 90
+
+/**
+ * This is used only for PCICC support
+ */
+static inline int
+is_PKCS11_padded(unsigned char *buffer, int length)
+{
+ int i;
+ if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
+ return 0;
+ for (i = 2; i < length; i++)
+ if (buffer[i] != 0xFF)
+ break;
+ if ((i < 10) || (i == length))
+ return 0;
+ if (buffer[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+/**
+ * This is used only for PCICC support
+ */
+static inline int
+is_PKCS12_padded(unsigned char *buffer, int length)
+{
+ int i;
+ if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
+ return 0;
+ for (i = 2; i < length; i++)
+ if (buffer[i] == 0x00)
+ break;
+ if ((i < 10) || (i == length))
+ return 0;
+ if (buffer[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+/**
+ * builds struct caller and converts message from generic format to
+ * device-dependent format
+ * func is ICARSAMODEXPO or ICARSACRT
+ * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
+ */
+static inline int
+build_caller(struct work_element *we_p, short function)
+{
+ int rv;
+ struct caller *caller_p = (struct caller *)we_p->requestptr;
+
+ if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
+ (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
+ (we_p->devtype != CEX2C))
+ return SEN_NOT_AVAIL;
+
+ memcpy(caller_p->caller_id, we_p->caller_id,
+ sizeof(caller_p->caller_id));
+ caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
+ caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
+ caller_p->caller_buf_p = we_p->buffer;
+ INIT_LIST_HEAD(&(caller_p->caller_liste));
+
+ rv = convert_request(we_p->buffer, we_p->funccode, function,
+ z90crypt.cdx, we_p->devtype,
+ &caller_p->caller_dev_dep_req_l,
+ caller_p->caller_dev_dep_req_p);
+ if (rv) {
+ if (rv == SEN_NOT_AVAIL)
+ PDEBUG("request can't be processed on hdwr avail\n");
+ else
+ PRINTK("Error from convert_request: %d\n", rv);
+ }
+ else
+ memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
+ return rv;
+}
+
+static inline void
+unbuild_caller(struct device *device_p, struct caller *caller_p)
+{
+ if (!caller_p)
+ return;
+ if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
+ if (!list_empty(&caller_p->caller_liste)) {
+ list_del_init(&caller_p->caller_liste);
+ device_p->dev_caller_count--;
+ }
+ memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
+}
+
+static inline int
+get_crypto_request_buffer(struct work_element *we_p)
+{
+ struct ica_rsa_modexpo *mex_p;
+ struct ica_rsa_modexpo_crt *crt_p;
+ unsigned char *temp_buffer;
+ short function;
+ int rv;
+
+ mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
+ crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
+
+ PDEBUG("device type input = %d\n", we_p->devtype);
+
+ if (z90crypt.terminating)
+ return REC_NO_RESPONSE;
+ if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
+ PRINTK("psmid zeroes\n");
+ return SEN_FATAL_ERROR;
+ }
+ if (!we_p->buffer) {
+ PRINTK("buffer pointer NULL\n");
+ return SEN_USER_ERROR;
+ }
+ if (!we_p->requestptr) {
+ PRINTK("caller pointer NULL\n");
+ return SEN_USER_ERROR;
+ }
+
+ if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
+ (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
+ (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
+ PRINTK("invalid device type\n");
+ return SEN_USER_ERROR;
+ }
+
+ if ((mex_p->inputdatalength < 1) ||
+ (mex_p->inputdatalength > MAX_MOD_SIZE)) {
+ PRINTK("inputdatalength[%d] is not valid\n",
+ mex_p->inputdatalength);
+ return SEN_USER_ERROR;
+ }
+
+ if (mex_p->outputdatalength < mex_p->inputdatalength) {
+ PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
+ mex_p->outputdatalength, mex_p->inputdatalength);
+ return SEN_USER_ERROR;
+ }
+
+ if (!mex_p->inputdata || !mex_p->outputdata) {
+ PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
+ mex_p->outputdata, mex_p->inputdata);
+ return SEN_USER_ERROR;
+ }
+
+ /**
+ * As long as outputdatalength is big enough, we can set the
+ * outputdatalength equal to the inputdatalength, since that is the
+ * number of bytes we will copy in any case
+ */
+ mex_p->outputdatalength = mex_p->inputdatalength;
+
+ rv = 0;
+ switch (we_p->funccode) {
+ case ICARSAMODEXPO:
+ if (!mex_p->b_key || !mex_p->n_modulus)
+ rv = SEN_USER_ERROR;
+ break;
+ case ICARSACRT:
+ if (!IS_EVEN(crt_p->inputdatalength)) {
+ PRINTK("inputdatalength[%d] is odd, CRT form\n",
+ crt_p->inputdatalength);
+ rv = SEN_USER_ERROR;
+ break;
+ }
+ if (!crt_p->bp_key ||
+ !crt_p->bq_key ||
+ !crt_p->np_prime ||
+ !crt_p->nq_prime ||
+ !crt_p->u_mult_inv) {
+ PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
+ crt_p->bp_key, crt_p->bq_key,
+ crt_p->np_prime, crt_p->nq_prime,
+ crt_p->u_mult_inv);
+ rv = SEN_USER_ERROR;
+ }
+ break;
+ default:
+ PRINTK("bad func = %d\n", we_p->funccode);
+ rv = SEN_USER_ERROR;
+ break;
+ }
+ if (rv != 0)
+ return rv;
+
+ if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
+ return SEN_NOT_AVAIL;
+
+ temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
+ sizeof(struct caller);
+ if (copy_from_user(temp_buffer, mex_p->inputdata,
+ mex_p->inputdatalength) != 0)
+ return SEN_RELEASED;
+
+ function = PCI_FUNC_KEY_ENCRYPT;
+ switch (we_p->devtype) {
+ /* PCICA does everything with a simple RSA mod-expo operation */
+ case PCICA:
+ function = PCI_FUNC_KEY_ENCRYPT;
+ break;
+ /**
+ * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
+ * operation, and all CRT forms with a PKCS-1.2 format decrypt.
+ * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
+ * mod-expo operation
+ */
+ case PCIXCC_MCL2:
+ if (we_p->funccode == ICARSAMODEXPO)
+ function = PCI_FUNC_KEY_ENCRYPT;
+ else
+ function = PCI_FUNC_KEY_DECRYPT;
+ break;
+ case PCIXCC_MCL3:
+ case CEX2C:
+ if (we_p->funccode == ICARSAMODEXPO)
+ function = PCI_FUNC_KEY_ENCRYPT;
+ else
+ function = PCI_FUNC_KEY_DECRYPT;
+ break;
+ /**
+ * PCICC does everything as a PKCS-1.2 format request
+ */
+ case PCICC:
+ /* PCICC cannot handle input that is is PKCS#1.1 padded */
+ if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
+ return SEN_NOT_AVAIL;
+ }
+ if (we_p->funccode == ICARSAMODEXPO) {
+ if (is_PKCS12_padded(temp_buffer,
+ mex_p->inputdatalength))
+ function = PCI_FUNC_KEY_ENCRYPT;
+ else
+ function = PCI_FUNC_KEY_DECRYPT;
+ } else
+ /* all CRT forms are decrypts */
+ function = PCI_FUNC_KEY_DECRYPT;
+ break;
+ }
+ PDEBUG("function: %04x\n", function);
+ rv = build_caller(we_p, function);
+ PDEBUG("rv from build_caller = %d\n", rv);
+ return rv;
+}
+
+static inline int
+z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
+ const char __user *buffer)
+{
+ int rv;
+
+ we_p->devindex = -1;
+ if (funccode == ICARSAMODEXPO)
+ we_p->buff_size = sizeof(struct ica_rsa_modexpo);
+ else
+ we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
+
+ if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
+ return -EFAULT;
+
+ we_p->audit[0] |= FP_COPYFROM;
+ SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
+ we_p->funccode = funccode;
+ we_p->devtype = -1;
+ we_p->audit[0] |= FP_BUFFREQ;
+ rv = get_crypto_request_buffer(we_p);
+ switch (rv) {
+ case 0:
+ we_p->audit[0] |= FP_BUFFGOT;
+ break;
+ case SEN_USER_ERROR:
+ rv = -EINVAL;
+ break;
+ case SEN_QUEUE_FULL:
+ rv = 0;
+ break;
+ case SEN_RELEASED:
+ rv = -EFAULT;
+ break;
+ case REC_NO_RESPONSE:
+ rv = -ENODEV;
+ break;
+ case SEN_NOT_AVAIL:
+ case EGETBUFF:
+ rv = -EGETBUFF;
+ break;
+ default:
+ PRINTK("rv = %d\n", rv);
+ rv = -EGETBUFF;
+ break;
+ }
+ if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
+ SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
+ return rv;
+}
+
+static inline void
+purge_work_element(struct work_element *we_p)
+{
+ struct list_head *lptr;
+
+ spin_lock_irq(&queuespinlock);
+ list_for_each(lptr, &request_list) {
+ if (lptr == &we_p->liste) {
+ list_del_init(lptr);
+ requestq_count--;
+ break;
+ }
+ }
+ list_for_each(lptr, &pending_list) {
+ if (lptr == &we_p->liste) {
+ list_del_init(lptr);
+ pendingq_count--;
+ break;
+ }
+ }
+ spin_unlock_irq(&queuespinlock);
+}
+
+/**
+ * Build the request and send it.
+ */
+static inline int
+z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
+ unsigned int cmd, unsigned long arg)
+{
+ struct work_element *we_p;
+ int rv;
+
+ if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
+ PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
+ return rv;
+ }
+ if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
+ PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
+ if (!rv)
+ if ((rv = z90crypt_send(we_p, (const char *)arg)))
+ PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
+ if (!rv) {
+ we_p->audit[0] |= FP_ASLEEP;
+ wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
+ we_p->audit[0] |= FP_AWAKE;
+ rv = we_p->retcode;
+ }
+ if (!rv)
+ rv = z90crypt_process_results(we_p, (char __user *)arg);
+
+ if ((we_p->status[0] & STAT_FAILED)) {
+ switch (rv) {
+ /**
+ * EINVAL *after* receive is almost always a padding error or
+ * length error issued by a coprocessor (not an accelerator).
+ * We convert this return value to -EGETBUFF which should
+ * trigger a fallback to software.
+ */
+ case -EINVAL:
+ if (we_p->devtype != PCICA)
+ rv = -EGETBUFF;
+ break;
+ case -ETIMEOUT:
+ if (z90crypt.mask.st_count > 0)
+ rv = -ERESTARTSYS; // retry with another
+ else
+ rv = -ENODEV; // no cards left
+ /* fall through to clean up request queue */
+ case -ERESTARTSYS:
+ case -ERELEASED:
+ switch (CHK_RDWRMASK(we_p->status[0])) {
+ case STAT_WRITTEN:
+ purge_work_element(we_p);
+ break;
+ case STAT_READPEND:
+ case STAT_NOWORK:
+ default:
+ break;
+ }
+ break;
+ default:
+ we_p->status[0] ^= STAT_FAILED;
+ break;
+ }
+ }
+ free_page((long)we_p);
+ return rv;
+}
+
+/**
+ * This function is a little long, but it's really just one large switch
+ * statement.
+ */
+static int
+z90crypt_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct priv_data *private_data_p = filp->private_data;
+ unsigned char *status;
+ unsigned char *qdepth;
+ unsigned int *reqcnt;
+ struct ica_z90_status *pstat;
+ int ret, i, loopLim, tempstat;
+ static int deprecated_msg_count1 = 0;
+ static int deprecated_msg_count2 = 0;
+
+ PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
+ PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
+ cmd,
+ !_IOC_DIR(cmd) ? "NO"
+ : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
+ : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
+ : "WR")),
+ _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
+
+ if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
+ PRINTK("cmd 0x%08X contains bad magic\n", cmd);
+ return -ENOTTY;
+ }
+
+ ret = 0;
+ switch (cmd) {
+ case ICARSAMODEXPO:
+ case ICARSACRT:
+ if (quiesce_z90crypt) {
+ ret = -EQUIESCE;
+ break;
+ }
+ ret = -ENODEV; // Default if no devices
+ loopLim = z90crypt.hdware_info->hdware_mask.st_count -
+ (z90crypt.hdware_info->hdware_mask.disabled_count +
+ z90crypt.hdware_info->hdware_mask.user_disabled_count);
+ for (i = 0; i < loopLim; i++) {
+ ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
+ if (ret != -ERESTARTSYS)
+ break;
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -ENODEV;
+ break;
+
+ case Z90STAT_TOTALCOUNT:
+ tempstat = get_status_totalcount();
+ if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_PCICACOUNT:
+ tempstat = get_status_PCICAcount();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_PCICCCOUNT:
+ tempstat = get_status_PCICCcount();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_PCIXCCMCL2COUNT:
+ tempstat = get_status_PCIXCCMCL2count();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_PCIXCCMCL3COUNT:
+ tempstat = get_status_PCIXCCMCL3count();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_CEX2CCOUNT:
+ tempstat = get_status_CEX2Ccount();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_REQUESTQ_COUNT:
+ tempstat = get_status_requestq_count();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_PENDINGQ_COUNT:
+ tempstat = get_status_pendingq_count();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_TOTALOPEN_COUNT:
+ tempstat = get_status_totalopen_count();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_DOMAIN_INDEX:
+ tempstat = get_status_domain_index();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90STAT_STATUS_MASK:
+ status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
+ if (!status) {
+ PRINTK("kmalloc for status failed!\n");
+ ret = -ENOMEM;
+ break;
+ }
+ get_status_status_mask(status);
+ if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
+ != 0)
+ ret = -EFAULT;
+ kfree(status);
+ break;
+
+ case Z90STAT_QDEPTH_MASK:
+ qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
+ if (!qdepth) {
+ PRINTK("kmalloc for qdepth failed!\n");
+ ret = -ENOMEM;
+ break;
+ }
+ get_status_qdepth_mask(qdepth);
+ if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
+ ret = -EFAULT;
+ kfree(qdepth);
+ break;
+
+ case Z90STAT_PERDEV_REQCNT:
+ reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
+ if (!reqcnt) {
+ PRINTK("kmalloc for reqcnt failed!\n");
+ ret = -ENOMEM;
+ break;
+ }
+ get_status_perdevice_reqcnt(reqcnt);
+ if (copy_to_user((char __user *) arg, reqcnt,
+ Z90CRYPT_NUM_APS * sizeof(int)) != 0)
+ ret = -EFAULT;
+ kfree(reqcnt);
+ break;
+
+ /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
+ case ICAZ90STATUS:
+ if (deprecated_msg_count1 < 20) {
+ PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
+ deprecated_msg_count1++;
+ if (deprecated_msg_count1 == 20)
+ PRINTK("No longer issuing messages related to "
+ "deprecated call to ICAZ90STATUS.\n");
+ }
+
+ pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
+ if (!pstat) {
+ PRINTK("kmalloc for pstat failed!\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ pstat->totalcount = get_status_totalcount();
+ pstat->leedslitecount = get_status_PCICAcount();
+ pstat->leeds2count = get_status_PCICCcount();
+ pstat->requestqWaitCount = get_status_requestq_count();
+ pstat->pendingqWaitCount = get_status_pendingq_count();
+ pstat->totalOpenCount = get_status_totalopen_count();
+ pstat->cryptoDomain = get_status_domain_index();
+ get_status_status_mask(pstat->status);
+ get_status_qdepth_mask(pstat->qdepth);
+
+ if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
+ sizeof(struct ica_z90_status)) != 0)
+ ret = -EFAULT;
+ kfree(pstat);
+ break;
+
+ /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
+ case Z90STAT_PCIXCCCOUNT:
+ if (deprecated_msg_count2 < 20) {
+ PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
+ deprecated_msg_count2++;
+ if (deprecated_msg_count2 == 20)
+ PRINTK("No longer issuing messages about depre"
+ "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
+ }
+
+ tempstat = get_status_PCIXCCcount();
+ if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
+ case Z90QUIESCE:
+ if (current->euid != 0) {
+ PRINTK("QUIESCE fails: euid %d\n",
+ current->euid);
+ ret = -EACCES;
+ } else {
+ PRINTK("QUIESCE device from PID %d\n", PID());
+ quiesce_z90crypt = 1;
+ }
+ break;
+
+ default:
+ /* user passed an invalid IOCTL number */
+ PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static inline int
+sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
+{
+ int hl, i;
+
+ hl = 0;
+ for (i = 0; i < len; i++)
+ hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
+ hl += sprintf(outaddr+hl, " ");
+
+ return hl;
+}
+
+static inline int
+sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
+{
+ int hl, inl, c, cx;
+
+ hl = sprintf(outaddr, " ");
+ inl = 0;
+ for (c = 0; c < (len / 16); c++) {
+ hl += sprintcl(outaddr+hl, addr+inl, 16);
+ inl += 16;
+ }
+
+ cx = len%16;
+ if (cx) {
+ hl += sprintcl(outaddr+hl, addr+inl, cx);
+ inl += cx;
+ }
+
+ hl += sprintf(outaddr+hl, "\n");
+
+ return hl;
+}
+
+static inline int
+sprinthx(unsigned char *title, unsigned char *outaddr,
+ unsigned char *addr, unsigned int len)
+{
+ int hl, inl, r, rx;
+
+ hl = sprintf(outaddr, "\n%s\n", title);
+ inl = 0;
+ for (r = 0; r < (len / 64); r++) {
+ hl += sprintrw(outaddr+hl, addr+inl, 64);
+ inl += 64;
+ }
+ rx = len % 64;
+ if (rx) {
+ hl += sprintrw(outaddr+hl, addr+inl, rx);
+ inl += rx;
+ }
+
+ hl += sprintf(outaddr+hl, "\n");
+
+ return hl;
+}
+
+static inline int
+sprinthx4(unsigned char *title, unsigned char *outaddr,
+ unsigned int *array, unsigned int len)
+{
+ int hl, r;
+
+ hl = sprintf(outaddr, "\n%s\n", title);
+
+ for (r = 0; r < len; r++) {
+ if ((r % 8) == 0)
+ hl += sprintf(outaddr+hl, " ");
+ hl += sprintf(outaddr+hl, "%08X ", array[r]);
+ if ((r % 8) == 7)
+ hl += sprintf(outaddr+hl, "\n");
+ }
+
+ hl += sprintf(outaddr+hl, "\n");
+
+ return hl;
+}
+
+static int
+z90crypt_status(char *resp_buff, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ unsigned char *workarea;
+ int len;
+
+ /* resp_buff is a page. Use the right half for a work area */
+ workarea = resp_buff+2000;
+ len = 0;
+ len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
+ z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
+ len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
+ get_status_domain_index());
+ len += sprintf(resp_buff+len, "Total device count: %d\n",
+ get_status_totalcount());
+ len += sprintf(resp_buff+len, "PCICA count: %d\n",
+ get_status_PCICAcount());
+ len += sprintf(resp_buff+len, "PCICC count: %d\n",
+ get_status_PCICCcount());
+ len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
+ get_status_PCIXCCMCL2count());
+ len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
+ get_status_PCIXCCMCL3count());
+ len += sprintf(resp_buff+len, "CEX2C count: %d\n",
+ get_status_CEX2Ccount());
+ len += sprintf(resp_buff+len, "requestq count: %d\n",
+ get_status_requestq_count());
+ len += sprintf(resp_buff+len, "pendingq count: %d\n",
+ get_status_pendingq_count());
+ len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
+ get_status_totalopen_count());
+ len += sprinthx(
+ "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
+ "4: PCIXCC (MCL3), 5: CEX2C",
+ resp_buff+len,
+ get_status_status_mask(workarea),
+ Z90CRYPT_NUM_APS);
+ len += sprinthx("Waiting work element counts",
+ resp_buff+len,
+ get_status_qdepth_mask(workarea),
+ Z90CRYPT_NUM_APS);
+ len += sprinthx4(
+ "Per-device successfully completed request counts",
+ resp_buff+len,
+ get_status_perdevice_reqcnt((unsigned int *)workarea),
+ Z90CRYPT_NUM_APS);
+ *eof = 1;
+ memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
+ return len;
+}
+
+static inline void
+disable_card(int card_index)
+{
+ struct device *devp;
+
+ devp = LONG2DEVPTR(card_index);
+ if (!devp || devp->user_disabled)
+ return;
+ devp->user_disabled = 1;
+ z90crypt.hdware_info->hdware_mask.user_disabled_count++;
+ if (devp->dev_type == -1)
+ return;
+ z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
+}
+
+static inline void
+enable_card(int card_index)
+{
+ struct device *devp;
+
+ devp = LONG2DEVPTR(card_index);
+ if (!devp || !devp->user_disabled)
+ return;
+ devp->user_disabled = 0;
+ z90crypt.hdware_info->hdware_mask.user_disabled_count--;
+ if (devp->dev_type == -1)
+ return;
+ z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
+}
+
+static inline int
+scan_char(unsigned char *bf, unsigned int len,
+ unsigned int *offs, unsigned int *p_eof, unsigned char c)
+{
+ unsigned int i, found;
+
+ found = 0;
+ for (i = 0; i < len; i++) {
+ if (bf[i] == c) {
+ found = 1;
+ break;
+ }
+ if (bf[i] == '\0') {
+ *p_eof = 1;
+ break;
+ }
+ if (bf[i] == '\n') {
+ break;
+ }
+ }
+ *offs = i+1;
+ return found;
+}
+
+static inline int
+scan_string(unsigned char *bf, unsigned int len,
+ unsigned int *offs, unsigned int *p_eof, unsigned char *s)
+{
+ unsigned int temp_len, temp_offs, found, eof;
+
+ temp_len = temp_offs = found = eof = 0;
+ while (!eof && !found) {
+ found = scan_char(bf+temp_len, len-temp_len,
+ &temp_offs, &eof, *s);
+
+ temp_len += temp_offs;
+ if (eof) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ if (len >= temp_offs+strlen(s)) {
+ found = !strncmp(bf+temp_len-1, s, strlen(s));
+ if (found) {
+ *offs = temp_len+strlen(s)-1;
+ break;
+ }
+ } else {
+ found = 0;
+ *p_eof = 1;
+ break;
+ }
+ }
+ }
+ return found;
+}
+
+static int
+z90crypt_status_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int i, j, len, offs, found, eof;
+ unsigned char *lbuf;
+ unsigned int local_count;
+
+#define LBUFSIZE 600
+ lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
+ if (!lbuf) {
+ PRINTK("kmalloc failed!\n");
+ return 0;
+ }
+
+ if (count <= 0)
+ return 0;
+
+ local_count = UMIN((unsigned int)count, LBUFSIZE-1);
+
+ if (copy_from_user(lbuf, buffer, local_count) != 0) {
+ kfree(lbuf);
+ return -EFAULT;
+ }
+
+ lbuf[local_count-1] = '\0';
+
+ len = 0;
+ eof = 0;
+ found = 0;
+ while (!eof) {
+ found = scan_string(lbuf+len, local_count-len, &offs, &eof,
+ "Online devices");
+ len += offs;
+ if (found == 1)
+ break;
+ }
+
+ if (eof) {
+ kfree(lbuf);
+ return count;
+ }
+
+ if (found)
+ found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
+
+ if (!found || eof) {
+ kfree(lbuf);
+ return count;
+ }
+
+ len += offs;
+ j = 0;
+ for (i = 0; i < 80; i++) {
+ switch (*(lbuf+len+i)) {
+ case '\t':
+ case ' ':
+ break;
+ case '\n':
+ default:
+ eof = 1;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ j++;
+ break;
+ case 'd':
+ case 'D':
+ disable_card(j);
+ j++;
+ break;
+ case 'e':
+ case 'E':
+ enable_card(j);
+ j++;
+ break;
+ }
+ if (eof)
+ break;
+ }
+
+ kfree(lbuf);
+ return count;
+}
+
+/**
+ * Functions that run under a timer, with no process id
+ *
+ * The task functions:
+ * z90crypt_reader_task
+ * helper_send_work
+ * helper_handle_work_element
+ * helper_receive_rc
+ * z90crypt_config_task
+ * z90crypt_cleanup_task
+ *
+ * Helper functions:
+ * z90crypt_schedule_reader_timer
+ * z90crypt_schedule_reader_task
+ * z90crypt_schedule_config_task
+ * z90crypt_schedule_cleanup_task
+ */
+static inline int
+receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
+ unsigned char *buff, unsigned char __user **dest_p_p)
+{
+ int dv, rv;
+ struct device *dev_ptr;
+ struct caller *caller_p;
+ struct ica_rsa_modexpo *icaMsg_p;
+ struct list_head *ptr, *tptr;
+
+ memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
+
+ if (z90crypt.terminating)
+ return REC_FATAL_ERROR;
+
+ caller_p = 0;
+ dev_ptr = z90crypt.device_p[index];
+ rv = 0;
+ do {
+ if (!dev_ptr || dev_ptr->disabled) {
+ rv = REC_NO_WORK; // a disabled device can't return work
+ break;
+ }
+ if (dev_ptr->dev_self_x != index) {
+ PRINTKC("Corrupt dev ptr\n");
+ z90crypt.terminating = 1;
+ rv = REC_FATAL_ERROR;
+ break;
+ }
+ if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
+ dv = DEV_REC_EXCEPTION;
+ PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
+ dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
+ } else {
+ PDEBUG("Dequeue called for device %d\n", index);
+ dv = receive_from_AP(index, z90crypt.cdx,
+ dev_ptr->dev_resp_l,
+ dev_ptr->dev_resp_p, psmid);
+ }
+ switch (dv) {
+ case DEV_REC_EXCEPTION:
+ rv = REC_FATAL_ERROR;
+ z90crypt.terminating = 1;
+ PRINTKC("Exception in receive from device %d\n",
+ index);
+ break;
+ case DEV_ONLINE:
+ rv = 0;
+ break;
+ case DEV_EMPTY:
+ rv = REC_EMPTY;
+ break;
+ case DEV_NO_WORK:
+ rv = REC_NO_WORK;
+ break;
+ case DEV_BAD_MESSAGE:
+ case DEV_GONE:
+ case REC_HARDWAR_ERR:
+ default:
+ rv = REC_NO_RESPONSE;
+ break;
+ }
+ if (rv)
+ break;
+ if (dev_ptr->dev_caller_count <= 0) {
+ rv = REC_USER_GONE;
+ break;
+ }
+
+ list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
+ caller_p = list_entry(ptr, struct caller, caller_liste);
+ if (!memcmp(caller_p->caller_id, psmid,
+ sizeof(caller_p->caller_id))) {
+ if (!list_empty(&caller_p->caller_liste)) {
+ list_del_init(ptr);
+ dev_ptr->dev_caller_count--;
+ break;
+ }
+ }
+ caller_p = 0;
+ }
+ if (!caller_p) {
+ PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
+ "%02X%02X%02X in device list\n",
+ psmid[0], psmid[1], psmid[2], psmid[3],
+ psmid[4], psmid[5], psmid[6], psmid[7]);
+ rv = REC_USER_GONE;
+ break;
+ }
+
+ PDEBUG("caller_p after successful receive: %p\n", caller_p);
+ rv = convert_response(dev_ptr->dev_resp_p,
+ caller_p->caller_buf_p, buff_len_p, buff);
+ switch (rv) {
+ case REC_USE_PCICA:
+ break;
+ case REC_OPERAND_INV:
+ case REC_OPERAND_SIZE:
+ case REC_EVEN_MOD:
+ case REC_INVALID_PAD:
+ PDEBUG("device %d: 'user error' %d\n", index, rv);
+ break;
+ case WRONG_DEVICE_TYPE:
+ case REC_HARDWAR_ERR:
+ case REC_BAD_MESSAGE:
+ PRINTKW("device %d: hardware error %d\n", index, rv);
+ rv = REC_NO_RESPONSE;
+ break;
+ default:
+ PDEBUG("device %d: rv = %d\n", index, rv);
+ break;
+ }
+ } while (0);
+
+ switch (rv) {
+ case 0:
+ PDEBUG("Successful receive from device %d\n", index);
+ icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
+ *dest_p_p = icaMsg_p->outputdata;
+ if (*buff_len_p == 0)
+ PRINTK("Zero *buff_len_p\n");
+ break;
+ case REC_NO_RESPONSE:
+ PRINTKW("Removing device %d from availability\n", index);
+ remove_device(dev_ptr);
+ break;
+ }
+
+ if (caller_p)
+ unbuild_caller(dev_ptr, caller_p);
+
+ return rv;
+}
+
+static inline void
+helper_send_work(int index)
+{
+ struct work_element *rq_p;
+ int rv;
+
+ if (list_empty(&request_list))
+ return;
+ requestq_count--;
+ rq_p = list_entry(request_list.next, struct work_element, liste);
+ list_del_init(&rq_p->liste);
+ rq_p->audit[1] |= FP_REMREQUEST;
+ if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
+ rq_p->devindex = SHRT2LONG(index);
+ rv = send_to_crypto_device(rq_p);
+ if (rv == 0) {
+ rq_p->requestsent = jiffies;
+ rq_p->audit[0] |= FP_SENT;
+ list_add_tail(&rq_p->liste, &pending_list);
+ ++pendingq_count;
+ rq_p->audit[0] |= FP_PENDING;
+ } else {
+ switch (rv) {
+ case REC_OPERAND_INV:
+ case REC_OPERAND_SIZE:
+ case REC_EVEN_MOD:
+ case REC_INVALID_PAD:
+ rq_p->retcode = -EINVAL;
+ break;
+ case SEN_NOT_AVAIL:
+ case SEN_RETRY:
+ case REC_NO_RESPONSE:
+ default:
+ if (z90crypt.mask.st_count > 1)
+ rq_p->retcode =
+ -ERESTARTSYS;
+ else
+ rq_p->retcode = -ENODEV;
+ break;
+ }
+ rq_p->status[0] |= STAT_FAILED;
+ rq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&rq_p->alarmrung, 1);
+ wake_up(&rq_p->waitq);
+ }
+ } else {
+ if (z90crypt.mask.st_count > 1)
+ rq_p->retcode = -ERESTARTSYS;
+ else
+ rq_p->retcode = -ENODEV;
+ rq_p->status[0] |= STAT_FAILED;
+ rq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&rq_p->alarmrung, 1);
+ wake_up(&rq_p->waitq);
+ }
+}
+
+static inline void
+helper_handle_work_element(int index, unsigned char psmid[8], int rc,
+ int buff_len, unsigned char *buff,
+ unsigned char __user *resp_addr)
+{
+ struct work_element *pq_p;
+ struct list_head *lptr, *tptr;
+
+ pq_p = 0;
+ list_for_each_safe(lptr, tptr, &pending_list) {
+ pq_p = list_entry(lptr, struct work_element, liste);
+ if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
+ list_del_init(lptr);
+ pendingq_count--;
+ pq_p->audit[1] |= FP_NOTPENDING;
+ break;
+ }
+ pq_p = 0;
+ }
+
+ if (!pq_p) {
+ PRINTK("device %d has work but no caller exists on pending Q\n",
+ SHRT2LONG(index));
+ return;
+ }
+
+ switch (rc) {
+ case 0:
+ pq_p->resp_buff_size = buff_len;
+ pq_p->audit[1] |= FP_RESPSIZESET;
+ if (buff_len) {
+ pq_p->resp_addr = resp_addr;
+ pq_p->audit[1] |= FP_RESPADDRCOPIED;
+ memcpy(pq_p->resp_buff, buff, buff_len);
+ pq_p->audit[1] |= FP_RESPBUFFCOPIED;
+ }
+ break;
+ case REC_OPERAND_INV:
+ case REC_OPERAND_SIZE:
+ case REC_EVEN_MOD:
+ case REC_INVALID_PAD:
+ PDEBUG("-EINVAL after application error %d\n", rc);
+ pq_p->retcode = -EINVAL;
+ pq_p->status[0] |= STAT_FAILED;
+ break;
+ case REC_USE_PCICA:
+ pq_p->retcode = -ERESTARTSYS;
+ pq_p->status[0] |= STAT_FAILED;
+ break;
+ case REC_NO_RESPONSE:
+ default:
+ if (z90crypt.mask.st_count > 1)
+ pq_p->retcode = -ERESTARTSYS;
+ else
+ pq_p->retcode = -ENODEV;
+ pq_p->status[0] |= STAT_FAILED;
+ break;
+ }
+ if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
+ pq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&pq_p->alarmrung, 1);
+ wake_up(&pq_p->waitq);
+ }
+}
+
+/**
+ * return TRUE if the work element should be removed from the queue
+ */
+static inline int
+helper_receive_rc(int index, int *rc_p)
+{
+ switch (*rc_p) {
+ case 0:
+ case REC_OPERAND_INV:
+ case REC_OPERAND_SIZE:
+ case REC_EVEN_MOD:
+ case REC_INVALID_PAD:
+ case REC_USE_PCICA:
+ break;
+
+ case REC_BUSY:
+ case REC_NO_WORK:
+ case REC_EMPTY:
+ case REC_RETRY_DEV:
+ case REC_FATAL_ERROR:
+ return 0;
+
+ case REC_NO_RESPONSE:
+ break;
+
+ default:
+ PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
+ *rc_p, SHRT2LONG(index));
+ *rc_p = REC_NO_RESPONSE;
+ break;
+ }
+ return 1;
+}
+
+static inline void
+z90crypt_schedule_reader_timer(void)
+{
+ if (timer_pending(&reader_timer))
+ return;
+ if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
+ PRINTK("Timer pending while modifying reader timer\n");
+}
+
+static void
+z90crypt_reader_task(unsigned long ptr)
+{
+ int workavail, index, rc, buff_len;
+ unsigned char psmid[8];
+ unsigned char __user *resp_addr;
+ static unsigned char buff[1024];
+
+ /**
+ * we use workavail = 2 to ensure 2 passes with nothing dequeued before
+ * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
+ * loop, there is no work remaining on the queues.
+ */
+ resp_addr = 0;
+ workavail = 2;
+ buff_len = 0;
+ while (workavail) {
+ workavail--;
+ rc = 0;
+ spin_lock_irq(&queuespinlock);
+ memset(buff, 0x00, sizeof(buff));
+
+ /* Dequeue once from each device in round robin. */
+ for (index = 0; index < z90crypt.mask.st_count; index++) {
+ PDEBUG("About to receive.\n");
+ rc = receive_from_crypto_device(SHRT2LONG(index),
+ psmid,
+ &buff_len,
+ buff,
+ &resp_addr);
+ PDEBUG("Dequeued: rc = %d.\n", rc);
+
+ if (helper_receive_rc(index, &rc)) {
+ if (rc != REC_NO_RESPONSE) {
+ helper_send_work(index);
+ workavail = 2;
+ }
+
+ helper_handle_work_element(index, psmid, rc,
+ buff_len, buff,
+ resp_addr);
+ }
+
+ if (rc == REC_FATAL_ERROR)
+ PRINTKW("REC_FATAL_ERROR from device %d!\n",
+ SHRT2LONG(index));
+ }
+ spin_unlock_irq(&queuespinlock);
+ }
+
+ if (pendingq_count + requestq_count)
+ z90crypt_schedule_reader_timer();
+}
+
+static inline void
+z90crypt_schedule_config_task(unsigned int expiration)
+{
+ if (timer_pending(&config_timer))
+ return;
+ if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
+ PRINTK("Timer pending while modifying config timer\n");
+}
+
+static void
+z90crypt_config_task(unsigned long ptr)
+{
+ int rc;
+
+ PDEBUG("jiffies %ld\n", jiffies);
+
+ if ((rc = refresh_z90crypt(&z90crypt.cdx)))
+ PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
+ /* If return was fatal, don't bother reconfiguring */
+ if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
+ z90crypt_schedule_config_task(CONFIGTIME);
+}
+
+static inline void
+z90crypt_schedule_cleanup_task(void)
+{
+ if (timer_pending(&cleanup_timer))
+ return;
+ if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
+ PRINTK("Timer pending while modifying cleanup timer\n");
+}
+
+static inline void
+helper_drain_queues(void)
+{
+ struct work_element *pq_p;
+ struct list_head *lptr, *tptr;
+
+ list_for_each_safe(lptr, tptr, &pending_list) {
+ pq_p = list_entry(lptr, struct work_element, liste);
+ pq_p->retcode = -ENODEV;
+ pq_p->status[0] |= STAT_FAILED;
+ unbuild_caller(LONG2DEVPTR(pq_p->devindex),
+ (struct caller *)pq_p->requestptr);
+ list_del_init(lptr);
+ pendingq_count--;
+ pq_p->audit[1] |= FP_NOTPENDING;
+ pq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&pq_p->alarmrung, 1);
+ wake_up(&pq_p->waitq);
+ }
+
+ list_for_each_safe(lptr, tptr, &request_list) {
+ pq_p = list_entry(lptr, struct work_element, liste);
+ pq_p->retcode = -ENODEV;
+ pq_p->status[0] |= STAT_FAILED;
+ list_del_init(lptr);
+ requestq_count--;
+ pq_p->audit[1] |= FP_REMREQUEST;
+ pq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&pq_p->alarmrung, 1);
+ wake_up(&pq_p->waitq);
+ }
+}
+
+static inline void
+helper_timeout_requests(void)
+{
+ struct work_element *pq_p;
+ struct list_head *lptr, *tptr;
+ long timelimit;
+
+ timelimit = jiffies - (CLEANUPTIME * HZ);
+ /* The list is in strict chronological order */
+ list_for_each_safe(lptr, tptr, &pending_list) {
+ pq_p = list_entry(lptr, struct work_element, liste);
+ if (pq_p->requestsent >= timelimit)
+ break;
+ PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
+ ((struct caller *)pq_p->requestptr)->caller_id[0],
+ ((struct caller *)pq_p->requestptr)->caller_id[1],
+ ((struct caller *)pq_p->requestptr)->caller_id[2],
+ ((struct caller *)pq_p->requestptr)->caller_id[3],
+ ((struct caller *)pq_p->requestptr)->caller_id[4],
+ ((struct caller *)pq_p->requestptr)->caller_id[5],
+ ((struct caller *)pq_p->requestptr)->caller_id[6],
+ ((struct caller *)pq_p->requestptr)->caller_id[7]);
+ pq_p->retcode = -ETIMEOUT;
+ pq_p->status[0] |= STAT_FAILED;
+ /* get this off any caller queue it may be on */
+ unbuild_caller(LONG2DEVPTR(pq_p->devindex),
+ (struct caller *) pq_p->requestptr);
+ list_del_init(lptr);
+ pendingq_count--;
+ pq_p->audit[1] |= FP_TIMEDOUT;
+ pq_p->audit[1] |= FP_NOTPENDING;
+ pq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&pq_p->alarmrung, 1);
+ wake_up(&pq_p->waitq);
+ }
+
+ /**
+ * If pending count is zero, items left on the request queue may
+ * never be processed.
+ */
+ if (pendingq_count <= 0) {
+ list_for_each_safe(lptr, tptr, &request_list) {
+ pq_p = list_entry(lptr, struct work_element, liste);
+ if (pq_p->requestsent >= timelimit)
+ break;
+ PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
+ ((struct caller *)pq_p->requestptr)->caller_id[0],
+ ((struct caller *)pq_p->requestptr)->caller_id[1],
+ ((struct caller *)pq_p->requestptr)->caller_id[2],
+ ((struct caller *)pq_p->requestptr)->caller_id[3],
+ ((struct caller *)pq_p->requestptr)->caller_id[4],
+ ((struct caller *)pq_p->requestptr)->caller_id[5],
+ ((struct caller *)pq_p->requestptr)->caller_id[6],
+ ((struct caller *)pq_p->requestptr)->caller_id[7]);
+ pq_p->retcode = -ETIMEOUT;
+ pq_p->status[0] |= STAT_FAILED;
+ list_del_init(lptr);
+ requestq_count--;
+ pq_p->audit[1] |= FP_TIMEDOUT;
+ pq_p->audit[1] |= FP_REMREQUEST;
+ pq_p->audit[1] |= FP_AWAKENING;
+ atomic_set(&pq_p->alarmrung, 1);
+ wake_up(&pq_p->waitq);
+ }
+ }
+}
+
+static void
+z90crypt_cleanup_task(unsigned long ptr)
+{
+ PDEBUG("jiffies %ld\n", jiffies);
+ spin_lock_irq(&queuespinlock);
+ if (z90crypt.mask.st_count <= 0) // no devices!
+ helper_drain_queues();
+ else
+ helper_timeout_requests();
+ spin_unlock_irq(&queuespinlock);
+ z90crypt_schedule_cleanup_task();
+}
+
+static void
+z90crypt_schedule_reader_task(unsigned long ptr)
+{
+ tasklet_schedule(&reader_tasklet);
+}
+
+/**
+ * Lowlevel Functions:
+ *
+ * create_z90crypt: creates and initializes basic data structures
+ * refresh_z90crypt: re-initializes basic data structures
+ * find_crypto_devices: returns a count and mask of hardware status
+ * create_crypto_device: builds the descriptor for a device
+ * destroy_crypto_device: unallocates the descriptor for a device
+ * destroy_z90crypt: drains all work, unallocates structs
+ */
+
+/**
+ * build the z90crypt root structure using the given domain index
+ */
+static int
+create_z90crypt(int *cdx_p)
+{
+ struct hdware_block *hdware_blk_p;
+
+ memset(&z90crypt, 0x00, sizeof(struct z90crypt));
+ z90crypt.domain_established = 0;
+ z90crypt.len = sizeof(struct z90crypt);
+ z90crypt.max_count = Z90CRYPT_NUM_DEVS;
+ z90crypt.cdx = *cdx_p;
+
+ hdware_blk_p = (struct hdware_block *)
+ kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
+ if (!hdware_blk_p) {
+ PDEBUG("kmalloc for hardware block failed\n");
+ return ENOMEM;
+ }
+ memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
+ z90crypt.hdware_info = hdware_blk_p;
+
+ return 0;
+}
+
+static inline int
+helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
+{
+ enum hdstat hd_stat;
+ int q_depth, dev_type;
+ int indx, chkdom, numdomains;
+
+ q_depth = dev_type = numdomains = 0;
+ for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
+ for (indx = 0; indx < z90crypt.max_count; indx++) {
+ hd_stat = HD_NOT_THERE;
+ numdomains = 0;
+ for (chkdom = 0; chkdom <= 15; chkdom++) {
+ hd_stat = query_online(indx, chkdom, MAX_RESET,
+ &q_depth, &dev_type);
+ if (hd_stat == HD_TSQ_EXCEPTION) {
+ z90crypt.terminating = 1;
+ PRINTKC("exception taken!\n");
+ break;
+ }
+ if (hd_stat == HD_ONLINE) {
+ cdx_array[numdomains++] = chkdom;
+ if (*cdx_p == chkdom) {
+ *correct_cdx_found = 1;
+ break;
+ }
+ }
+ }
+ if ((*correct_cdx_found == 1) || (numdomains != 0))
+ break;
+ if (z90crypt.terminating)
+ break;
+ }
+ return numdomains;
+}
+
+static inline int
+probe_crypto_domain(int *cdx_p)
+{
+ int cdx_array[16];
+ char cdx_array_text[53], temp[5];
+ int correct_cdx_found, numdomains;
+
+ correct_cdx_found = 0;
+ numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
+
+ if (z90crypt.terminating)
+ return TSQ_FATAL_ERROR;
+
+ if (correct_cdx_found)
+ return 0;
+
+ if (numdomains == 0) {
+ PRINTKW("Unable to find crypto domain: No devices found\n");
+ return Z90C_NO_DEVICES;
+ }
+
+ if (numdomains == 1) {
+ if (*cdx_p == -1) {
+ *cdx_p = cdx_array[0];
+ return 0;
+ }
+ PRINTKW("incorrect domain: specified = %d, found = %d\n",
+ *cdx_p, cdx_array[0]);
+ return Z90C_INCORRECT_DOMAIN;
+ }
+
+ numdomains--;
+ sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
+ while (numdomains) {
+ numdomains--;
+ sprintf(temp, ", %d", cdx_array[numdomains]);
+ strcat(cdx_array_text, temp);
+ }
+
+ PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
+ *cdx_p, cdx_array_text);
+ return Z90C_AMBIGUOUS_DOMAIN;
+}
+
+static int
+refresh_z90crypt(int *cdx_p)
+{
+ int i, j, indx, rv;
+ static struct status local_mask;
+ struct device *devPtr;
+ unsigned char oldStat, newStat;
+ int return_unchanged;
+
+ if (z90crypt.len != sizeof(z90crypt))
+ return ENOTINIT;
+ if (z90crypt.terminating)
+ return TSQ_FATAL_ERROR;
+ rv = 0;
+ if (!z90crypt.hdware_info->hdware_mask.st_count &&
+ !z90crypt.domain_established) {
+ rv = probe_crypto_domain(cdx_p);
+ if (z90crypt.terminating)
+ return TSQ_FATAL_ERROR;
+ if (rv == Z90C_NO_DEVICES)
+ return 0; // try later
+ if (rv)
+ return rv;
+ z90crypt.cdx = *cdx_p;
+ z90crypt.domain_established = 1;
+ }
+ rv = find_crypto_devices(&local_mask);
+ if (rv) {
+ PRINTK("find crypto devices returned %d\n", rv);
+ return rv;
+ }
+ if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
+ sizeof(struct status))) {
+ return_unchanged = 1;
+ for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
+ /**
+ * Check for disabled cards. If any device is marked
+ * disabled, destroy it.
+ */
+ for (j = 0;
+ j < z90crypt.hdware_info->type_mask[i].st_count;
+ j++) {
+ indx = z90crypt.hdware_info->type_x_addr[i].
+ device_index[j];
+ devPtr = z90crypt.device_p[indx];
+ if (devPtr && devPtr->disabled) {
+ local_mask.st_mask[indx] = HD_NOT_THERE;
+ return_unchanged = 0;
+ }
+ }
+ }
+ if (return_unchanged == 1)
+ return 0;
+ }
+
+ spin_lock_irq(&queuespinlock);
+ for (i = 0; i < z90crypt.max_count; i++) {
+ oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
+ newStat = local_mask.st_mask[i];
+ if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
+ destroy_crypto_device(i);
+ else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
+ rv = create_crypto_device(i);
+ if (rv >= REC_FATAL_ERROR)
+ return rv;
+ if (rv != 0) {
+ local_mask.st_mask[i] = HD_NOT_THERE;
+ local_mask.st_count--;
+ }
+ }
+ }
+ memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
+ sizeof(local_mask.st_mask));
+ z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
+ z90crypt.hdware_info->hdware_mask.disabled_count =
+ local_mask.disabled_count;
+ refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
+ for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
+ refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
+ &(z90crypt.hdware_info->type_x_addr[i]));
+ spin_unlock_irq(&queuespinlock);
+
+ return rv;
+}
+
+static int
+find_crypto_devices(struct status *deviceMask)
+{
+ int i, q_depth, dev_type;
+ enum hdstat hd_stat;
+
+ deviceMask->st_count = 0;
+ deviceMask->disabled_count = 0;
+ deviceMask->user_disabled_count = 0;
+
+ for (i = 0; i < z90crypt.max_count; i++) {
+ hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
+ &dev_type);
+ if (hd_stat == HD_TSQ_EXCEPTION) {
+ z90crypt.terminating = 1;
+ PRINTKC("Exception during probe for crypto devices\n");
+ return TSQ_FATAL_ERROR;
+ }
+ deviceMask->st_mask[i] = hd_stat;
+ if (hd_stat == HD_ONLINE) {
+ PDEBUG("Got an online crypto!: %d\n", i);
+ PDEBUG("Got a queue depth of %d\n", q_depth);
+ PDEBUG("Got a device type of %d\n", dev_type);
+ if (q_depth <= 0)
+ return TSQ_FATAL_ERROR;
+ deviceMask->st_count++;
+ z90crypt.q_depth_array[i] = q_depth;
+ z90crypt.dev_type_array[i] = dev_type;
+ }
+ }
+
+ return 0;
+}
+
+static int
+refresh_index_array(struct status *status_str, struct device_x *index_array)
+{
+ int i, count;
+ enum devstat stat;
+
+ i = -1;
+ count = 0;
+ do {
+ stat = status_str->st_mask[++i];
+ if (stat == DEV_ONLINE)
+ index_array->device_index[count++] = i;
+ } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
+
+ return count;
+}
+
+static int
+create_crypto_device(int index)
+{
+ int rv, devstat, total_size;
+ struct device *dev_ptr;
+ struct status *type_str_p;
+ int deviceType;
+
+ dev_ptr = z90crypt.device_p[index];
+ if (!dev_ptr) {
+ total_size = sizeof(struct device) +
+ z90crypt.q_depth_array[index] * sizeof(int);
+
+ dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
+ if (!dev_ptr) {
+ PRINTK("kmalloc device %d failed\n", index);
+ return ENOMEM;
+ }
+ memset(dev_ptr, 0, total_size);
+ dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
+ if (!dev_ptr->dev_resp_p) {
+ kfree(dev_ptr);
+ PRINTK("kmalloc device %d rec buffer failed\n", index);
+ return ENOMEM;
+ }
+ dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
+ INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
+ }
+
+ devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
+ if (devstat == DEV_RSQ_EXCEPTION) {
+ PRINTK("exception during reset device %d\n", index);
+ kfree(dev_ptr->dev_resp_p);
+ kfree(dev_ptr);
+ return RSQ_FATAL_ERROR;
+ }
+ if (devstat == DEV_ONLINE) {
+ dev_ptr->dev_self_x = index;
+ dev_ptr->dev_type = z90crypt.dev_type_array[index];
+ if (dev_ptr->dev_type == NILDEV) {
+ rv = probe_device_type(dev_ptr);
+ if (rv) {
+ PRINTK("rv = %d from probe_device_type %d\n",
+ rv, index);
+ kfree(dev_ptr->dev_resp_p);
+ kfree(dev_ptr);
+ return rv;
+ }
+ }
+ if (dev_ptr->dev_type == PCIXCC_UNK) {
+ rv = probe_PCIXCC_type(dev_ptr);
+ if (rv) {
+ PRINTK("rv = %d from probe_PCIXCC_type %d\n",
+ rv, index);
+ kfree(dev_ptr->dev_resp_p);
+ kfree(dev_ptr);
+ return rv;
+ }
+ }
+ deviceType = dev_ptr->dev_type;
+ z90crypt.dev_type_array[index] = deviceType;
+ if (deviceType == PCICA)
+ z90crypt.hdware_info->device_type_array[index] = 1;
+ else if (deviceType == PCICC)
+ z90crypt.hdware_info->device_type_array[index] = 2;
+ else if (deviceType == PCIXCC_MCL2)
+ z90crypt.hdware_info->device_type_array[index] = 3;
+ else if (deviceType == PCIXCC_MCL3)
+ z90crypt.hdware_info->device_type_array[index] = 4;
+ else if (deviceType == CEX2C)
+ z90crypt.hdware_info->device_type_array[index] = 5;
+ else
+ z90crypt.hdware_info->device_type_array[index] = -1;
+ }
+
+ /**
+ * 'q_depth' returned by the hardware is one less than
+ * the actual depth
+ */
+ dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
+ dev_ptr->dev_type = z90crypt.dev_type_array[index];
+ dev_ptr->dev_stat = devstat;
+ dev_ptr->disabled = 0;
+ z90crypt.device_p[index] = dev_ptr;
+
+ if (devstat == DEV_ONLINE) {
+ if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
+ z90crypt.mask.st_mask[index] = DEV_ONLINE;
+ z90crypt.mask.st_count++;
+ }
+ deviceType = dev_ptr->dev_type;
+ type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
+ if (type_str_p->st_mask[index] != DEV_ONLINE) {
+ type_str_p->st_mask[index] = DEV_ONLINE;
+ type_str_p->st_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int
+destroy_crypto_device(int index)
+{
+ struct device *dev_ptr;
+ int t, disabledFlag;
+
+ dev_ptr = z90crypt.device_p[index];
+
+ /* remember device type; get rid of device struct */
+ if (dev_ptr) {
+ disabledFlag = dev_ptr->disabled;
+ t = dev_ptr->dev_type;
+ if (dev_ptr->dev_resp_p)
+ kfree(dev_ptr->dev_resp_p);
+ kfree(dev_ptr);
+ } else {
+ disabledFlag = 0;
+ t = -1;
+ }
+ z90crypt.device_p[index] = 0;
+
+ /* if the type is valid, remove the device from the type_mask */
+ if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
+ z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
+ z90crypt.hdware_info->type_mask[t].st_count--;
+ if (disabledFlag == 1)
+ z90crypt.hdware_info->type_mask[t].disabled_count--;
+ }
+ if (z90crypt.mask.st_mask[index] != DEV_GONE) {
+ z90crypt.mask.st_mask[index] = DEV_GONE;
+ z90crypt.mask.st_count--;
+ }
+ z90crypt.hdware_info->device_type_array[index] = 0;
+
+ return 0;
+}
+
+static void
+destroy_z90crypt(void)
+{
+ int i;
+ for (i = 0; i < z90crypt.max_count; i++)
+ if (z90crypt.device_p[i])
+ destroy_crypto_device(i);
+ if (z90crypt.hdware_info)
+ kfree((void *)z90crypt.hdware_info);
+ memset((void *)&z90crypt, 0, sizeof(z90crypt));
+}
+
+static unsigned char static_testmsg[384] = {
+0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
+0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
+0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
+0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
+0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
+0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
+0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
+0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
+0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
+0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
+0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
+0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
+0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
+0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
+0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+};
+
+static int
+probe_device_type(struct device *devPtr)
+{
+ int rv, dv, i, index, length;
+ unsigned char psmid[8];
+ static unsigned char loc_testmsg[sizeof(static_testmsg)];
+
+ index = devPtr->dev_self_x;
+ rv = 0;
+ do {
+ memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
+ length = sizeof(static_testmsg) - 24;
+ /* the -24 allows for the header */
+ dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
+ if (dv) {
+ PDEBUG("dv returned by send during probe: %d\n", dv);
+ if (dv == DEV_SEN_EXCEPTION) {
+ rv = SEN_FATAL_ERROR;
+ PRINTKC("exception in send to AP %d\n", index);
+ break;
+ }
+ PDEBUG("return value from send_to_AP: %d\n", rv);
+ switch (dv) {
+ case DEV_GONE:
+ PDEBUG("dev %d not available\n", index);
+ rv = SEN_NOT_AVAIL;
+ break;
+ case DEV_ONLINE:
+ rv = 0;
+ break;
+ case DEV_EMPTY:
+ rv = SEN_NOT_AVAIL;
+ break;
+ case DEV_NO_WORK:
+ rv = SEN_FATAL_ERROR;
+ break;
+ case DEV_BAD_MESSAGE:
+ rv = SEN_USER_ERROR;
+ break;
+ case DEV_QUEUE_FULL:
+ rv = SEN_QUEUE_FULL;
+ break;
+ default:
+ PRINTK("unknown dv=%d for dev %d\n", dv, index);
+ rv = SEN_NOT_AVAIL;
+ break;
+ }
+ }
+
+ if (rv)
+ break;
+
+ for (i = 0; i < 6; i++) {
+ mdelay(300);
+ dv = receive_from_AP(index, z90crypt.cdx,
+ devPtr->dev_resp_l,
+ devPtr->dev_resp_p, psmid);
+ PDEBUG("dv returned by DQ = %d\n", dv);
+ if (dv == DEV_REC_EXCEPTION) {
+ rv = REC_FATAL_ERROR;
+ PRINTKC("exception in dequeue %d\n",
+ index);
+ break;
+ }
+ switch (dv) {
+ case DEV_ONLINE:
+ rv = 0;
+ break;
+ case DEV_EMPTY:
+ rv = REC_EMPTY;
+ break;
+ case DEV_NO_WORK:
+ rv = REC_NO_WORK;
+ break;
+ case DEV_BAD_MESSAGE:
+ case DEV_GONE:
+ default:
+ rv = REC_NO_RESPONSE;
+ break;
+ }
+ if ((rv != 0) && (rv != REC_NO_WORK))
+ break;
+ if (rv == 0)
+ break;
+ }
+ if (rv)
+ break;
+ rv = (devPtr->dev_resp_p[0] == 0x00) &&
+ (devPtr->dev_resp_p[1] == 0x86);
+ if (rv)
+ devPtr->dev_type = PCICC;
+ else
+ devPtr->dev_type = PCICA;
+ rv = 0;
+ } while (0);
+ /* In a general error case, the card is not marked online */
+ return rv;
+}
+
+static unsigned char MCL3_testmsg[] = {
+0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
+0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
+0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
+0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
+0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
+0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
+0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
+0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
+0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
+0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
+0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
+0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
+0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
+0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
+0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
+0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
+};
+
+static int
+probe_PCIXCC_type(struct device *devPtr)
+{
+ int rv, dv, i, index, length;
+ unsigned char psmid[8];
+ static unsigned char loc_testmsg[548];
+ struct CPRBX *cprbx_p;
+
+ index = devPtr->dev_self_x;
+ rv = 0;
+ do {
+ memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
+ length = sizeof(MCL3_testmsg) - 0x0C;
+ dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
+ if (dv) {
+ PDEBUG("dv returned = %d\n", dv);
+ if (dv == DEV_SEN_EXCEPTION) {
+ rv = SEN_FATAL_ERROR;
+ PRINTKC("exception in send to AP %d\n", index);
+ break;
+ }
+ PDEBUG("return value from send_to_AP: %d\n", rv);
+ switch (dv) {
+ case DEV_GONE:
+ PDEBUG("dev %d not available\n", index);
+ rv = SEN_NOT_AVAIL;
+ break;
+ case DEV_ONLINE:
+ rv = 0;
+ break;
+ case DEV_EMPTY:
+ rv = SEN_NOT_AVAIL;
+ break;
+ case DEV_NO_WORK:
+ rv = SEN_FATAL_ERROR;
+ break;
+ case DEV_BAD_MESSAGE:
+ rv = SEN_USER_ERROR;
+ break;
+ case DEV_QUEUE_FULL:
+ rv = SEN_QUEUE_FULL;
+ break;
+ default:
+ PRINTK("unknown dv=%d for dev %d\n", dv, index);
+ rv = SEN_NOT_AVAIL;
+ break;
+ }
+ }
+
+ if (rv)
+ break;
+
+ for (i = 0; i < 6; i++) {
+ mdelay(300);
+ dv = receive_from_AP(index, z90crypt.cdx,
+ devPtr->dev_resp_l,
+ devPtr->dev_resp_p, psmid);
+ PDEBUG("dv returned by DQ = %d\n", dv);
+ if (dv == DEV_REC_EXCEPTION) {
+ rv = REC_FATAL_ERROR;
+ PRINTKC("exception in dequeue %d\n",
+ index);
+ break;
+ }
+ switch (dv) {
+ case DEV_ONLINE:
+ rv = 0;
+ break;
+ case DEV_EMPTY:
+ rv = REC_EMPTY;
+ break;
+ case DEV_NO_WORK:
+ rv = REC_NO_WORK;
+ break;
+ case DEV_BAD_MESSAGE:
+ case DEV_GONE:
+ default:
+ rv = REC_NO_RESPONSE;
+ break;
+ }
+ if ((rv != 0) && (rv != REC_NO_WORK))
+ break;
+ if (rv == 0)
+ break;
+ }
+ if (rv)
+ break;
+ cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
+ if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
+ devPtr->dev_type = PCIXCC_MCL2;
+ PDEBUG("device %d is MCL2\n", index);
+ } else {
+ devPtr->dev_type = PCIXCC_MCL3;
+ PDEBUG("device %d is MCL3\n", index);
+ }
+ } while (0);
+ /* In a general error case, the card is not marked online */
+ return rv;
+}
+
+#ifdef Z90CRYPT_USE_HOTPLUG
+static void
+z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
+{
+#ifdef CONFIG_HOTPLUG
+ char *argv[3];
+ char *envp[6];
+ char major[20];
+ char minor[20];
+
+ sprintf(major, "MAJOR=%d", dev_major);
+ sprintf(minor, "MINOR=%d", dev_minor);
+
+ argv[0] = hotplug_path;
+ argv[1] = "z90crypt";
+ argv[2] = 0;
+
+ envp[0] = "HOME=/";
+ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+ switch (action) {
+ case Z90CRYPT_HOTPLUG_ADD:
+ envp[2] = "ACTION=add";
+ break;
+ case Z90CRYPT_HOTPLUG_REMOVE:
+ envp[2] = "ACTION=remove";
+ break;
+ default:
+ BUG();
+ break;
+ }
+ envp[3] = major;
+ envp[4] = minor;
+ envp[5] = 0;
+
+ call_usermodehelper(argv[0], argv, envp, 0);
+#endif
+}
+#endif
+
+module_init(z90crypt_init_module);
+module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/ebcdic.c b/drivers/s390/ebcdic.c
new file mode 100644
index 000000000000..99c98da15473
--- /dev/null
+++ b/drivers/s390/ebcdic.c
@@ -0,0 +1,246 @@
+/*
+ * arch/s390/kernel/ebcdic.c
+ * ECBDIC -> ASCII, ASCII -> ECBDIC conversion tables.
+ *
+ * S390 version
+ * Copyright (C) 1998 IBM Corporation
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <asm/types.h>
+
+/*
+ * ASCII -> EBCDIC
+ */
+__u8 _ascebc[256] =
+{
+ /*00 NL SH SX EX ET NQ AK BL */
+ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ /*08 BS HT LF VT FF CR SO SI */
+ 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /*10 DL D1 D2 D3 D4 NK SN EB */
+ 0x10, 0x11, 0x12, 0x13, 0x3C, 0x15, 0x32, 0x26,
+ /*18 CN EM SB EC FS GS RS US */
+ 0x18, 0x19, 0x3F, 0x27, 0x1C, 0x1D, 0x1E, 0x1F,
+ /*20 SP ! " # $ % & ' */
+ 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ /*28 ( ) * + , - . / */
+ 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ /*30 0 1 2 3 4 5 6 7 */
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ /*38 8 9 : ; < = > ? */
+ 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ /*40 @ A B C D E F G */
+ 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ /*48 H I J K L M N O */
+ 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ /*50 P Q R S T U V W */
+ 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ /*58 X Y Z [ \ ] ^ _ */
+ 0xE7, 0xE8, 0xE9, 0xAD, 0xE0, 0xBD, 0x5F, 0x6D,
+ /*60 ` a b c d e f g */
+ 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /*68 h i j k l m n o */
+ 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ /*70 p q r s t u v w */
+ 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ /*78 x y z { | } ~ DL */
+ 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0xFF
+};
+
+/*
+ * EBCDIC -> ASCII
+ */
+__u8 _ebcasc[256] =
+{
+ /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
+ 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
+ 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
+ -ENP ->LF */
+ 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
+ -IUS */
+ 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
+ -INP */
+ 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
+ -SW */
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
+ 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
+ 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ /* 0x40 SP RSP ä ---- */
+ 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ /* 0x48 . < ( + | */
+ 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
+ /* 0x50 & ---- */
+ 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ /* 0x58 ß ! $ * ) ; */
+ 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
+ /* 0x60 - / ---- Ä ---- ---- ---- */
+ 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ /* 0x68 ---- , % _ > ? */
+ 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
+ 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x78 * ` : # @ ' = " */
+ 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ /* 0x80 * a b c d e f g */
+ 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x88 h i ---- ---- ---- */
+ 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ /* 0x90 ° j k l m n o p */
+ 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ /* 0x98 q r ---- ---- */
+ 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ /* 0xA0 ~ s t u v w x */
+ 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ /* 0xA8 y z ---- ---- ---- ---- */
+ 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ /* 0xB0 ^ ---- § ---- */
+ 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ /* 0xB8 ---- [ ] ---- ---- ---- ---- */
+ 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
+ /* 0xC0 { A B C D E F G */
+ 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0xC8 H I ---- ö ---- */
+ 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ /* 0xD0 } J K L M N O P */
+ 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ /* 0xD8 Q R ---- ü */
+ 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ /* 0xE0 \ S T U V W X */
+ 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ /* 0xE8 Y Z ---- Ö ---- ---- ---- */
+ 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ /* 0xF0 0 1 2 3 4 5 6 7 */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
+ 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
+};
+
+/*
+ * EBCDIC (capitals) -> ASCII (small case)
+ */
+__u8 _ebcasc_reduce_case[256] =
+{
+ /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
+ 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+
+ /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
+ 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+
+ /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
+ -ENP ->LF */
+ 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+
+ /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
+ -IUS */
+ 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+
+ /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
+ -INP */
+ 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+
+ /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
+ -SW */
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+
+ /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
+ 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+
+ /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
+ 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+
+ /* 0x40 SP RSP ä ---- */
+ 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+
+ /* 0x48 . < ( + | */
+ 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
+
+ /* 0x50 & ---- */
+ 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+
+ /* 0x58 ß ! $ * ) ; */
+ 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
+
+ /* 0x60 - / ---- Ä ---- ---- ---- */
+ 0x2D, 0x2F, 0x07, 0x84, 0x07, 0x07, 0x07, 0x8F,
+
+ /* 0x68 ---- , % _ > ? */
+ 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+
+ /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
+ 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+
+ /* 0x78 * ` : # @ ' = " */
+ 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+
+ /* 0x80 * a b c d e f g */
+ 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+
+ /* 0x88 h i ---- ---- ---- */
+ 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+
+ /* 0x90 ° j k l m n o p */
+ 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+
+ /* 0x98 q r ---- ---- */
+ 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+
+ /* 0xA0 ~ s t u v w x */
+ 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+
+ /* 0xA8 y z ---- ---- ---- ---- */
+ 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+
+ /* 0xB0 ^ ---- § ---- */
+ 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+
+ /* 0xB8 ---- [ ] ---- ---- ---- ---- */
+ 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
+
+ /* 0xC0 { A B C D E F G */
+ 0x7B, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+
+ /* 0xC8 H I ---- ö ---- */
+ 0x68, 0x69, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+
+ /* 0xD0 } J K L M N O P */
+ 0x7D, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+
+ /* 0xD8 Q R ---- ü */
+ 0x71, 0x72, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+
+ /* 0xE0 \ S T U V W X */
+ 0x5C, 0xF6, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+
+ /* 0xE8 Y Z ---- Ö ---- ---- ---- */
+ 0x79, 0x7A, 0xFD, 0x07, 0x94, 0x07, 0x07, 0x07,
+
+ /* 0xF0 0 1 2 3 4 5 6 7 */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+
+ /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
+ 0x38, 0x39, 0x07, 0x07, 0x81, 0x07, 0x07, 0x07
+};
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
new file mode 100644
index 000000000000..a7efc394515e
--- /dev/null
+++ b/drivers/s390/net/Kconfig
@@ -0,0 +1,108 @@
+menu "S/390 network device drivers"
+ depends on NETDEVICES && ARCH_S390
+
+config LCS
+ tristate "Lan Channel Station Interface"
+ depends on NETDEVICES && (NET_ETHERNET || TR || FDDI)
+ help
+ Select this option if you want to use LCS networking on IBM S/390
+ or zSeries. This device driver supports Token Ring (IEEE 802.5),
+ FDDI (IEEE 802.7) and Ethernet.
+ This option is also available as a module which will be
+ called lcs.ko. If you do not know what it is, it's safe to say "Y".
+
+config CTC
+ tristate "CTC device support"
+ depends on NETDEVICES
+ help
+ Select this option if you want to use channel-to-channel networking
+ on IBM S/390 or zSeries. This device driver supports real CTC
+ coupling using ESCON. It also supports virtual CTCs when running
+ under VM. It will use the channel device configuration if this is
+ available. This option is also available as a module which will be
+ called ctc.ko. If you do not know what it is, it's safe to say "Y".
+
+config IUCV
+ tristate "IUCV support (VM only)"
+ help
+ Select this option if you want to use inter-user communication
+ under VM or VIF. If unsure, say "Y" to enable a fast communication
+ link between VM guests. At boot time the user ID of the guest needs
+ to be passed to the kernel. Note that both kernels need to be
+ compiled with this option and both need to be booted with the user ID
+ of the other VM guest.
+
+config NETIUCV
+ tristate "IUCV network device support (VM only)"
+ depends on IUCV && NETDEVICES
+ help
+ Select this option if you want to use inter-user communication
+ vehicle networking under VM or VIF. It enables a fast communication
+ link between VM guests. Using ifconfig a point-to-point connection
+ can be established to the Linux for zSeries and S7390 system
+ running on the other VM guest. This option is also available
+ as a module which will be called netiucv.ko. If unsure, say "Y".
+
+config SMSGIUCV
+ tristate "IUCV special message support (VM only)"
+ depends on IUCV
+ help
+ Select this option if you want to be able to receive SMSG messages
+ from other VM guest systems.
+
+config CLAW
+ tristate "CLAW device support"
+ depends on NETDEVICES
+ help
+ This driver supports channel attached CLAW devices.
+ CLAW is Common Link Access for Workstation. Common devices
+ that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
+ To compile as a module choose M here: The module will be called
+ claw.ko to compile into the kernel choose Y
+
+config QETH
+ tristate "Gigabit Ethernet device support"
+ depends on NETDEVICES && IP_MULTICAST && QDIO
+ help
+ This driver supports the IBM S/390 and zSeries OSA Express adapters
+ in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
+ interfaces in QDIO and HIPER mode.
+
+ For details please refer to the documentation provided by IBM at
+ <http://www10.software.ibm.com/developerworks/opensource/linux390>
+
+ To compile this driver as a module, choose M here: the
+ module will be called qeth.ko.
+
+
+comment "Gigabit Ethernet default settings"
+ depends on QETH
+
+config QETH_IPV6
+ bool "IPv6 support for gigabit ethernet"
+ depends on (QETH = IPV6) || (QETH && IPV6 = 'y')
+ help
+ If CONFIG_QETH is switched on, this option will include IPv6
+ support in the qeth device driver.
+
+config QETH_VLAN
+ bool "VLAN support for gigabit ethernet"
+ depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
+ help
+ If CONFIG_QETH is switched on, this option will include IEEE
+ 802.1q VLAN support in the qeth device driver.
+
+config QETH_PERF_STATS
+ bool "Performance statistics in /proc"
+ depends on QETH
+ help
+ When switched on, this option will add a file in the proc-fs
+ (/proc/qeth_perf_stats) containing performance statistics. It
+ may slightly impact performance, so this is only recommended for
+ internal tuning of the device driver.
+
+config CCWGROUP
+ tristate
+ default (LCS || CTC || QETH)
+
+endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 000000000000..7cabb80a2e41
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,14 @@
+#
+# S/390 network devices
+#
+
+ctc-objs := ctcmain.o ctctty.o ctcdbug.o
+
+obj-$(CONFIG_IUCV) += iucv.o
+obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
+obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
+obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
+obj-$(CONFIG_LCS) += lcs.o cu3088.o
+qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o
+qeth-$(CONFIG_PROC_FS) += qeth_proc.o
+obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
new file mode 100644
index 000000000000..06804d39a9c6
--- /dev/null
+++ b/drivers/s390/net/claw.c
@@ -0,0 +1,4447 @@
+/*
+ * drivers/s390/net/claw.c
+ * ESCON CLAW network driver
+ *
+ * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $
+ *
+ * Linux fo zSeries version
+ * Copyright (C) 2002,2005 IBM Corporation
+ * Author(s) Original code written by:
+ * Kazuo Iimura (iimura@jp.ibm.com)
+ * Rewritten by
+ * Andy Richter (richtera@us.ibm.com)
+ * Marc Price (mwprice@us.ibm.com)
+ *
+ * sysfs parms:
+ * group x.x.rrrr,x.x.wwww
+ * read_buffer nnnnnnn
+ * write_buffer nnnnnn
+ * host_name aaaaaaaa
+ * adapter_name aaaaaaaa
+ * api_type aaaaaaaa
+ *
+ * eg.
+ * group 0.0.0200 0.0.0201
+ * read_buffer 25
+ * write_buffer 20
+ * host_name LINUX390
+ * adapter_name RS6K
+ * api_type TCPIP
+ *
+ * where
+ *
+ * The device id is decided by the order entries
+ * are added to the group the first is claw0 the second claw1
+ * up to CLAW_MAX_DEV
+ *
+ * rrrr - the first of 2 consecutive device addresses used for the
+ * CLAW protocol.
+ * The specified address is always used as the input (Read)
+ * channel and the next address is used as the output channel.
+ *
+ * wwww - the second of 2 consecutive device addresses used for
+ * the CLAW protocol.
+ * The specified address is always used as the output
+ * channel and the previous address is used as the input channel.
+ *
+ * read_buffer - specifies number of input buffers to allocate.
+ * write_buffer - specifies number of output buffers to allocate.
+ * host_name - host name
+ * adaptor_name - adaptor name
+ * api_type - API type TCPIP or API will be sent and expected
+ * as ws_name
+ *
+ * Note the following requirements:
+ * 1) host_name must match the configured adapter_name on the remote side
+ * 2) adaptor_name must match the configured host name on the remote side
+ *
+ * Change History
+ * 1.00 Initial release shipped
+ * 1.10 Changes for Buffer allocation
+ * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
+ * 1.25 Added Packing support
+ */
+#include <asm/bitops.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/io.h>
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include "cu3088.h"
+#include "claw.h"
+
+MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
+MODULE_DESCRIPTION("Linux for zSeries CLAW Driver\n" \
+ "Copyright 2000,2005 IBM Corporation\n");
+MODULE_LICENSE("GPL");
+
+/* Debugging is based on DEBUGMSG, IOTRACE, or FUNCTRACE options:
+ DEBUGMSG - Enables output of various debug messages in the code
+ IOTRACE - Enables output of CCW and other IO related traces
+ FUNCTRACE - Enables output of function entry/exit trace
+ Define any combination of above options to enable tracing
+
+ CLAW also uses the s390dbf file system see claw_trace and claw_setup
+*/
+
+/* following enables tracing */
+//#define DEBUGMSG
+//#define IOTRACE
+//#define FUNCTRACE
+
+#ifdef DEBUGMSG
+#define DEBUG
+#endif
+
+#ifdef IOTRACE
+#define DEBUG
+#endif
+
+#ifdef FUNCTRACE
+#define DEBUG
+#endif
+
+ char debug_buffer[255];
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *claw_dbf_setup;
+static debug_info_t *claw_dbf_trace;
+
+/**
+ * CLAW Debug Facility functions
+ */
+static void
+claw_unregister_debug_facility(void)
+{
+ if (claw_dbf_setup)
+ debug_unregister(claw_dbf_setup);
+ if (claw_dbf_trace)
+ debug_unregister(claw_dbf_trace);
+}
+
+static int
+claw_register_debug_facility(void)
+{
+ claw_dbf_setup = debug_register("claw_setup", 1, 1, 8);
+ claw_dbf_trace = debug_register("claw_trace", 1, 2, 8);
+ if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
+ printk(KERN_WARNING "Not enough memory for debug facility.\n");
+ claw_unregister_debug_facility();
+ return -ENOMEM;
+ }
+ debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(claw_dbf_setup, 2);
+ debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(claw_dbf_trace, 2);
+ return 0;
+}
+
+static inline void
+claw_set_busy(struct net_device *dev)
+{
+ ((struct claw_privbk *) dev->priv)->tbusy=1;
+ eieio();
+}
+
+static inline void
+claw_clear_busy(struct net_device *dev)
+{
+ clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy));
+ netif_wake_queue(dev);
+ eieio();
+}
+
+static inline int
+claw_check_busy(struct net_device *dev)
+{
+ eieio();
+ return ((struct claw_privbk *) dev->priv)->tbusy;
+}
+
+static inline void
+claw_setbit_busy(int nr,struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy));
+}
+
+static inline void
+claw_clearbit_busy(int nr,struct net_device *dev)
+{
+ clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy));
+ netif_wake_queue(dev);
+}
+
+static inline int
+claw_test_and_setbit_busy(int nr,struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return test_and_set_bit(nr,
+ (void *)&(((struct claw_privbk *) dev->priv)->tbusy));
+}
+
+
+/* Functions for the DEV methods */
+
+static int claw_probe(struct ccwgroup_device *cgdev);
+static void claw_remove_device(struct ccwgroup_device *cgdev);
+static void claw_purge_skb_queue(struct sk_buff_head *q);
+static int claw_new_device(struct ccwgroup_device *cgdev);
+static int claw_shutdown_device(struct ccwgroup_device *cgdev);
+static int claw_tx(struct sk_buff *skb, struct net_device *dev);
+static int claw_change_mtu( struct net_device *dev, int new_mtu);
+static int claw_open(struct net_device *dev);
+static void claw_irq_handler(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb);
+static void claw_irq_tasklet ( unsigned long data );
+static int claw_release(struct net_device *dev);
+static void claw_write_retry ( struct chbk * p_ch );
+static void claw_write_next ( struct chbk * p_ch );
+static void claw_timer ( struct chbk * p_ch );
+
+/* Functions */
+static int add_claw_reads(struct net_device *dev,
+ struct ccwbk* p_first, struct ccwbk* p_last);
+static void inline ccw_check_return_code (struct ccw_device *cdev,
+ int return_code);
+static void inline ccw_check_unit_check (struct chbk * p_ch,
+ unsigned char sense );
+static int find_link(struct net_device *dev, char *host_name, char *ws_name );
+static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
+static int init_ccw_bk(struct net_device *dev);
+static void probe_error( struct ccwgroup_device *cgdev);
+static struct net_device_stats *claw_stats(struct net_device *dev);
+static int inline pages_to_order_of_mag(int num_of_pages);
+static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
+#ifdef DEBUG
+static void dumpit (char *buf, int len);
+#endif
+/* sysfs Functions */
+static ssize_t claw_hname_show(struct device *dev, char *buf);
+static ssize_t claw_hname_write(struct device *dev,
+ const char *buf, size_t count);
+static ssize_t claw_adname_show(struct device *dev, char *buf);
+static ssize_t claw_adname_write(struct device *dev,
+ const char *buf, size_t count);
+static ssize_t claw_apname_show(struct device *dev, char *buf);
+static ssize_t claw_apname_write(struct device *dev,
+ const char *buf, size_t count);
+static ssize_t claw_wbuff_show(struct device *dev, char *buf);
+static ssize_t claw_wbuff_write(struct device *dev,
+ const char *buf, size_t count);
+static ssize_t claw_rbuff_show(struct device *dev, char *buf);
+static ssize_t claw_rbuff_write(struct device *dev,
+ const char *buf, size_t count);
+static int claw_add_files(struct device *dev);
+static void claw_remove_files(struct device *dev);
+
+/* Functions for System Validate */
+static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
+static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+ __u8 correlator, __u8 rc , char *local_name, char *remote_name);
+static int claw_snd_conn_req(struct net_device *dev, __u8 link);
+static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
+static int claw_snd_sys_validate_rsp(struct net_device *dev,
+ struct clawctl * p_ctl, __u32 return_code);
+static int claw_strt_conn_req(struct net_device *dev );
+static void claw_strt_read ( struct net_device *dev, int lock );
+static void claw_strt_out_IO( struct net_device *dev );
+static void claw_free_wrt_buf( struct net_device *dev );
+
+/* Functions for unpack reads */
+static void unpack_read (struct net_device *dev );
+
+/* ccwgroup table */
+
+static struct ccwgroup_driver claw_group_driver = {
+ .owner = THIS_MODULE,
+ .name = "claw",
+ .max_slaves = 2,
+ .driver_id = 0xC3D3C1E6,
+ .probe = claw_probe,
+ .remove = claw_remove_device,
+ .set_online = claw_new_device,
+ .set_offline = claw_shutdown_device,
+};
+
+/*
+*
+* Key functions
+*/
+
+/*----------------------------------------------------------------*
+ * claw_probe *
+ * this function is called for each CLAW device. *
+ *----------------------------------------------------------------*/
+static int
+claw_probe(struct ccwgroup_device *cgdev)
+{
+ int rc;
+ struct claw_privbk *privptr=NULL;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s Enter\n",__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"probe");
+ if (!get_device(&cgdev->dev))
+ return -ENODEV;
+#ifdef DEBUGMSG
+ printk(KERN_INFO "claw: variable cgdev =\n");
+ dumpit((char *)cgdev, sizeof(struct ccwgroup_device));
+#endif
+ privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL);
+ if (privptr == NULL) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
+ cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+ CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
+ return -ENOMEM;
+ }
+ memset(privptr,0x00,sizeof(struct claw_privbk));
+ privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+ privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
+ if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
+ cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+ CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
+ return -ENOMEM;
+ }
+ memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
+ memset(privptr->p_env, 0x00, sizeof(struct claw_env));
+ memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
+ memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
+ memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
+ privptr->p_env->packing = 0;
+ privptr->p_env->write_buffers = 5;
+ privptr->p_env->read_buffers = 5;
+ privptr->p_env->read_size = CLAW_FRAME_SIZE;
+ privptr->p_env->write_size = CLAW_FRAME_SIZE;
+ rc = claw_add_files(&cgdev->dev);
+ if (rc) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
+ cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
+ CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
+ return rc;
+ }
+ printk(KERN_INFO "claw: sysfs files added for %s\n",cgdev->cdev[0]->dev.bus_id);
+ privptr->p_env->p_priv = privptr;
+ cgdev->cdev[0]->handler = claw_irq_handler;
+ cgdev->cdev[1]->handler = claw_irq_handler;
+ cgdev->dev.driver_data = privptr;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "claw:%s exit on line %d, "
+ "rc = 0\n",__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"prbext 0");
+
+ return 0;
+} /* end of claw_probe */
+
+/*-------------------------------------------------------------------*
+ * claw_tx *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int rc;
+ struct claw_privbk *privptr=dev->priv;
+ unsigned long saveflags;
+ struct chbk *p_ch;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"claw_tx");
+ p_ch=&privptr->channel[WRITE];
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: null pointer passed as sk_buffer\n",
+ dev->name);
+ privptr->stats.tx_dropped++;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
+ dev->name,__FUNCTION__, __LINE__);
+#endif
+ CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
+ return -EIO;
+ }
+
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: variable sk_buff=\n",dev->name);
+ dumpit((char *) skb, sizeof(struct sk_buff));
+ printk(KERN_INFO "%s: variable dev=\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+#endif
+ spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+ rc=claw_hw_tx( skb, dev, 1 );
+ spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
+ dev->name, __FUNCTION__, __LINE__, rc);
+#endif
+ CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
+ return rc;
+} /* end of claw_tx */
+
+/*------------------------------------------------------------------*
+ * pack the collect queue into an skb and return it *
+ * If not packing just return the top skb from the queue *
+ *------------------------------------------------------------------*/
+
+static struct sk_buff *
+claw_pack_skb(struct claw_privbk *privptr)
+{
+ struct sk_buff *new_skb,*held_skb;
+ struct chbk *p_ch = &privptr->channel[WRITE];
+ struct claw_env *p_env = privptr->p_env;
+ int pkt_cnt,pk_ind,so_far;
+
+ new_skb = NULL; /* assume no dice */
+ pkt_cnt = 0;
+ CLAW_DBF_TEXT(4,trace,"PackSKBe");
+ if (skb_queue_len(&p_ch->collect_queue) > 0) {
+ /* some data */
+ held_skb = skb_dequeue(&p_ch->collect_queue);
+ if (p_env->packing != DO_PACKED)
+ return held_skb;
+ if (held_skb)
+ atomic_dec(&held_skb->users);
+ else
+ return NULL;
+ /* get a new SKB we will pack at least one */
+ new_skb = dev_alloc_skb(p_env->write_size);
+ if (new_skb == NULL) {
+ atomic_inc(&held_skb->users);
+ skb_queue_head(&p_ch->collect_queue,held_skb);
+ return NULL;
+ }
+ /* we have packed packet and a place to put it */
+ pk_ind = 1;
+ so_far = 0;
+ new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
+ while ((pk_ind) && (held_skb != NULL)) {
+ if (held_skb->len+so_far <= p_env->write_size-8) {
+ memcpy(skb_put(new_skb,held_skb->len),
+ held_skb->data,held_skb->len);
+ privptr->stats.tx_packets++;
+ so_far += held_skb->len;
+ pkt_cnt++;
+ dev_kfree_skb_irq(held_skb);
+ held_skb = skb_dequeue(&p_ch->collect_queue);
+ if (held_skb)
+ atomic_dec(&held_skb->users);
+ } else {
+ pk_ind = 0;
+ atomic_inc(&held_skb->users);
+ skb_queue_head(&p_ch->collect_queue,held_skb);
+ }
+ }
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: %s() Packed %d len %d\n",
+ p_env->ndev->name,
+ __FUNCTION__,pkt_cnt,new_skb->len);
+#endif
+ }
+ CLAW_DBF_TEXT(4,trace,"PackSKBx");
+ return new_skb;
+}
+
+/*-------------------------------------------------------------------*
+ * claw_change_mtu *
+ * *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct claw_privbk *privptr=dev->priv;
+ int buff_size;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+#ifdef DEBUGMSG
+ printk(KERN_INFO "variable dev =\n");
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "variable new_mtu = %d\n", new_mtu);
+#endif
+ CLAW_DBF_TEXT(4,trace,"setmtu");
+ buff_size = privptr->p_env->write_size;
+ if ((new_mtu < 60) || (new_mtu > buff_size)) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
+ dev->name,
+ __FUNCTION__, __LINE__);
+#endif
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
+ __FUNCTION__, __LINE__);
+#endif
+ return 0;
+} /* end of claw_change_mtu */
+
+
+/*-------------------------------------------------------------------*
+ * claw_open *
+ * *
+ *-------------------------------------------------------------------*/
+static int
+claw_open(struct net_device *dev)
+{
+
+ int rc;
+ int i;
+ unsigned long saveflags=0;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+ struct timer_list timer;
+ struct ccwbk *p_buf;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"open");
+ if (!dev | (dev->name[0] == 0x00)) {
+ CLAW_DBF_TEXT(2,trace,"BadDev");
+ printk(KERN_WARNING "claw: Bad device at open failing \n");
+ return -ENODEV;
+ }
+ privptr = (struct claw_privbk *)dev->priv;
+ /* allocate and initialize CCW blocks */
+ if (privptr->buffs_alloc == 0) {
+ rc=init_ccw_bk(dev);
+ if (rc) {
+ printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
+ dev->name,
+ __FUNCTION__, __LINE__);
+ CLAW_DBF_TEXT(2,trace,"openmem");
+ return -ENOMEM;
+ }
+ }
+ privptr->system_validate_comp=0;
+ privptr->release_pend=0;
+ if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+ privptr->p_env->read_size=DEF_PACK_BUFSIZE;
+ privptr->p_env->write_size=DEF_PACK_BUFSIZE;
+ privptr->p_env->packing=PACKING_ASK;
+ } else {
+ privptr->p_env->packing=0;
+ privptr->p_env->read_size=CLAW_FRAME_SIZE;
+ privptr->p_env->write_size=CLAW_FRAME_SIZE;
+ }
+ claw_set_busy(dev);
+ tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
+ (unsigned long) &privptr->channel[READ]);
+ for ( i = 0; i < 2; i++) {
+ CLAW_DBF_TEXT_(2,trace,"opn_ch%d",i);
+ init_waitqueue_head(&privptr->channel[i].wait);
+ /* skb_queue_head_init(&p_ch->io_queue); */
+ if (i == WRITE)
+ skb_queue_head_init(
+ &privptr->channel[WRITE].collect_queue);
+ privptr->channel[i].flag_a = 0;
+ privptr->channel[i].IO_active = 0;
+ privptr->channel[i].flag &= ~CLAW_TIMER;
+ init_timer(&timer);
+ timer.function = (void *)claw_timer;
+ timer.data = (unsigned long)(&privptr->channel[i]);
+ timer.expires = jiffies + 15*HZ;
+ add_timer(&timer);
+ spin_lock_irqsave(get_ccwdev_lock(
+ privptr->channel[i].cdev), saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].claw_state = CLAW_START_HALT_IO;
+ rc = 0;
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ rc = ccw_device_halt(
+ (struct ccw_device *)privptr->channel[i].cdev,parm);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if(rc != 0)
+ ccw_check_return_code(privptr->channel[i].cdev, rc);
+ if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
+ del_timer(&timer);
+ }
+ if ((((privptr->channel[READ].last_dstat |
+ privptr->channel[WRITE].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
+ (((privptr->channel[READ].flag |
+ privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: channel problems during open - read:"
+ " %02x - write: %02x\n",
+ dev->name,
+ privptr->channel[READ].last_dstat,
+ privptr->channel[WRITE].last_dstat);
+#endif
+ printk(KERN_INFO "%s: remote side is not ready\n", dev->name);
+ CLAW_DBF_TEXT(2,trace,"notrdy");
+
+ for ( i = 0; i < 2; i++) {
+ spin_lock_irqsave(
+ get_ccwdev_lock(privptr->channel[i].cdev),
+ saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].claw_state = CLAW_STOP;
+ rc = ccw_device_halt(
+ (struct ccw_device *)&privptr->channel[i].cdev,
+ parm);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev),
+ saveflags);
+ if (rc != 0) {
+ ccw_check_return_code(
+ privptr->channel[i].cdev, rc);
+ }
+ }
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ free_pages((unsigned long)privptr->p_buff_read,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_read_num));
+ }
+ else {
+ p_buf=privptr->p_read_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread ));
+ p_buf=p_buf->next;
+ }
+ }
+ if (privptr->p_env->write_size < PAGE_SIZE ) {
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_write_num));
+ }
+ else {
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite ));
+ p_buf=p_buf->next;
+ }
+ }
+ privptr->buffs_alloc = 0;
+ privptr->channel[READ].flag= 0x00;
+ privptr->channel[WRITE].flag = 0x00;
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_read=NULL;
+ privptr->p_buff_write=NULL;
+ claw_clear_busy(dev);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(2,trace,"open EIO");
+ return -EIO;
+ }
+
+ /* Send SystemValidate command */
+
+ claw_clear_busy(dev);
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"openok");
+ return 0;
+} /* end of claw_open */
+
+/*-------------------------------------------------------------------*
+* *
+* claw_irq_handler *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_handler(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
+{
+ struct chbk *p_ch = NULL;
+ struct claw_privbk *privptr = NULL;
+ struct net_device *dev = NULL;
+ struct claw_env *p_env;
+ struct chbk *p_ch_r=NULL;
+
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s enter \n",__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"clawirq");
+ /* Bypass all 'unsolicited interrupts' */
+ if (!cdev->dev.driver_data) {
+ printk(KERN_WARNING "claw: unsolicited interrupt for device:"
+ "%s received c-%02x d-%02x\n",
+ cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "claw: %s() "
+ "exit on line %d\n",__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(2,trace,"badirq");
+ return;
+ }
+ privptr = (struct claw_privbk *)cdev->dev.driver_data;
+
+ /* Try to extract channel from driver data. */
+ if (privptr->channel[READ].cdev == cdev)
+ p_ch = &privptr->channel[READ];
+ else if (privptr->channel[WRITE].cdev == cdev)
+ p_ch = &privptr->channel[WRITE];
+ else {
+ printk(KERN_WARNING "claw: Can't determine channel for "
+ "interrupt, device %s\n", cdev->dev.bus_id);
+ CLAW_DBF_TEXT(2,trace,"badchan");
+ return;
+ }
+ CLAW_DBF_TEXT_(4,trace,"IRQCH=%d",p_ch->flag);
+
+ dev = (struct net_device *) (p_ch->ndev);
+ p_env=privptr->p_env;
+
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: interrupt for device: %04x "
+ "received c-%02x d-%02x state-%02x\n",
+ dev->name, p_ch->devno, irb->scsw.cstat,
+ irb->scsw.dstat, p_ch->claw_state);
+#endif
+
+ /* Copy interruption response block. */
+ memcpy(p_ch->irb, irb, sizeof(struct irb));
+
+ /* Check for good subchannel return code, otherwise error message */
+ if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) {
+ printk(KERN_INFO "%s: subchannel check for device: %04x -"
+ " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
+ dev->name, p_ch->devno,
+ irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa);
+#ifdef IOTRACE
+ dumpit((char *)irb,sizeof(struct irb));
+ dumpit((char *)(unsigned long)irb->scsw.cpa,
+ sizeof(struct ccw1));
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(2,trace,"chanchk");
+ /* return; */
+ }
+
+ /* Check the reason-code of a unit check */
+ if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ ccw_check_unit_check(p_ch, irb->ecw[0]);
+ }
+
+ /* State machine to bring the connection up, down and to restart */
+ p_ch->last_dstat = irb->scsw.dstat;
+
+ switch (p_ch->claw_state) {
+ case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
+#endif
+ if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+ }
+ wake_up(&p_ch->wait); /* wake up claw_release */
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: CLAW_STOP exit\n", dev->name);
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"stop");
+ return;
+
+ case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
+ dev->name);
+#endif
+ if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"haltio");
+ return;
+ }
+ if (p_ch->flag == CLAW_READ) {
+ p_ch->claw_state = CLAW_START_READ;
+ wake_up(&p_ch->wait); /* wake claw_open (READ)*/
+ }
+ else
+ if (p_ch->flag == CLAW_WRITE) {
+ p_ch->claw_state = CLAW_START_WRITE;
+ /* send SYSTEM_VALIDATE */
+ claw_strt_read(dev, LOCK_NO);
+ claw_send_control(dev,
+ SYSTEM_VALIDATE_REQUEST,
+ 0, 0, 0,
+ p_env->host_name,
+ p_env->adapter_name );
+ } else {
+ printk(KERN_WARNING "claw: unsolicited "
+ "interrupt for device:"
+ "%s received c-%02x d-%02x\n",
+ cdev->dev.bus_id,
+ irb->scsw.cstat,
+ irb->scsw.dstat);
+ return;
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO exit\n",
+ dev->name);
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"haltio");
+ return;
+ case CLAW_START_READ:
+ CLAW_DBF_TEXT(4,trace,"ReadIRQ");
+ if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
+ (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
+ (p_ch->irb->ecw[0]) == 0)
+ {
+ privptr->stats.rx_errors++;
+ printk(KERN_INFO "%s: Restart is "
+ "required after remote "
+ "side recovers \n",
+ dev->name);
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"notrdy");
+ return;
+ }
+ if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) &&
+ (p_ch->irb->scsw.dstat==0)) {
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch->flag_a) == 0) {
+ tasklet_schedule(&p_ch->tasklet);
+ }
+ else {
+ CLAW_DBF_TEXT(4,trace,"PCINoBH");
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"PCI_read");
+ return;
+ }
+ if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"SPend_rd");
+ return;
+ }
+ clear_bit(0, (void *)&p_ch->IO_active);
+ claw_clearbit_busy(TB_RETRY,dev);
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch->flag_a) == 0) {
+ tasklet_schedule(&p_ch->tasklet);
+ }
+ else {
+ CLAW_DBF_TEXT(4,trace,"RdBHAct");
+ }
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: process CLAW_START_READ exit\n",
+ dev->name);
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"RdIRQXit");
+ return;
+ case CLAW_START_WRITE:
+ if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ printk(KERN_INFO "%s: Unit Check Occured in "
+ "write channel\n",dev->name);
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if (p_ch->irb->ecw[0] & 0x80 ) {
+ printk(KERN_INFO "%s: Resetting Event "
+ "occurred:\n",dev->name);
+ init_timer(&p_ch->timer);
+ p_ch->timer.function =
+ (void *)claw_write_retry;
+ p_ch->timer.data = (unsigned long)p_ch;
+ p_ch->timer.expires = jiffies + 10*HZ;
+ add_timer(&p_ch->timer);
+ printk(KERN_INFO "%s: write connection "
+ "restarting\n",dev->name);
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"rstrtwrt");
+ return;
+ }
+ if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
+ clear_bit(0, (void *)&p_ch->IO_active);
+ printk(KERN_INFO "%s: Unit Exception "
+ "Occured in write channel\n",
+ dev->name);
+ }
+ if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"writeUE");
+ return;
+ }
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if (claw_test_and_setbit_busy(TB_TX,dev)==0) {
+ claw_write_next(p_ch);
+ claw_clearbit_busy(TB_TX,dev);
+ claw_clear_busy(dev);
+ }
+ p_ch_r=(struct chbk *)&privptr->channel[READ];
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch_r->flag_a) == 0) {
+ tasklet_schedule(&p_ch_r->tasklet);
+ }
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: process CLAW_START_WRITE exit\n",
+ dev->name);
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"StWtExit");
+ return;
+ default:
+ printk(KERN_WARNING "%s: wrong selection code - irq "
+ "state=%d\n",dev->name,p_ch->claw_state);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(2,trace,"badIRQ");
+ return;
+ }
+
+} /* end of claw_irq_handler */
+
+
+/*-------------------------------------------------------------------*
+* claw_irq_tasklet *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_tasklet ( unsigned long data )
+{
+ struct chbk * p_ch;
+ struct net_device *dev;
+ struct claw_privbk * privptr;
+
+ p_ch = (struct chbk *) data;
+ dev = (struct net_device *)p_ch->ndev;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
+ dumpit((char *) p_ch, sizeof(struct chbk));
+#endif
+ CLAW_DBF_TEXT(4,trace,"IRQtask");
+
+ privptr = (struct claw_privbk *) dev->priv;
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: bh routine - state-%02x\n" ,
+ dev->name, p_ch->claw_state);
+#endif
+
+ unpack_read(dev);
+ clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
+ CLAW_DBF_TEXT(4,trace,"TskletXt");
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+} /* end of claw_irq_bh */
+
+/*-------------------------------------------------------------------*
+* claw_release *
+* *
+*--------------------------------------------------------------------*/
+static int
+claw_release(struct net_device *dev)
+{
+ int rc;
+ int i;
+ unsigned long saveflags;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+ struct ccwbk* p_this_ccw;
+ struct ccwbk* p_buf;
+
+ if (!dev)
+ return 0;
+ privptr = (struct claw_privbk *) dev->priv;
+ if (!privptr)
+ return 0;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"release");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "Priv Buffalloc %d\n",privptr->buffs_alloc);
+ printk(KERN_INFO "Priv p_buff_ccw = %p\n",&privptr->p_buff_ccw);
+#endif
+ privptr->release_pend=1;
+ claw_setbit_busy(TB_STOP,dev);
+ for ( i = 1; i >=0 ; i--) {
+ spin_lock_irqsave(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ /* del_timer(&privptr->channel[READ].timer); */
+ privptr->channel[i].claw_state = CLAW_STOP;
+ privptr->channel[i].IO_active = 0;
+ parm = (unsigned long) &privptr->channel[i];
+ if (i == WRITE)
+ claw_purge_skb_queue(
+ &privptr->channel[WRITE].collect_queue);
+ rc = ccw_device_halt (privptr->channel[i].cdev, parm);
+ if (privptr->system_validate_comp==0x00) /* never opened? */
+ init_waitqueue_head(&privptr->channel[i].wait);
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if (rc != 0) {
+ ccw_check_return_code(privptr->channel[i].cdev, rc);
+ }
+ }
+ if (privptr->pk_skb != NULL) {
+ dev_kfree_skb(privptr->pk_skb);
+ privptr->pk_skb = NULL;
+ }
+ if(privptr->buffs_alloc != 1) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"none2fre");
+ return 0;
+ }
+ CLAW_DBF_TEXT(4,trace,"freebufs");
+ if (privptr->p_buff_ccw != NULL) {
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ }
+ CLAW_DBF_TEXT(4,trace,"freeread");
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ if (privptr->p_buff_read != NULL) {
+ free_pages((unsigned long)privptr->p_buff_read,
+ (int)pages_to_order_of_mag(privptr->p_buff_read_num));
+ }
+ }
+ else {
+ p_buf=privptr->p_read_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread ));
+ p_buf=p_buf->next;
+ }
+ }
+ CLAW_DBF_TEXT(4,trace,"freewrit");
+ if (privptr->p_env->write_size < PAGE_SIZE ) {
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(privptr->p_buff_write_num));
+ }
+ else {
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite ));
+ p_buf=p_buf->next;
+ }
+ }
+ CLAW_DBF_TEXT(4,trace,"clearptr");
+ privptr->buffs_alloc = 0;
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_read=NULL;
+ privptr->p_buff_write=NULL;
+ privptr->system_validate_comp=0;
+ privptr->release_pend=0;
+ /* Remove any writes that were pending and reset all reads */
+ p_this_ccw=privptr->p_read_active_first;
+ while (p_this_ccw!=NULL) {
+ p_this_ccw->header.length=0xffff;
+ p_this_ccw->header.opcode=0xff;
+ p_this_ccw->header.flag=0x00;
+ p_this_ccw=p_this_ccw->next;
+ }
+
+ while (privptr->p_write_active_first!=NULL) {
+ p_this_ccw=privptr->p_write_active_first;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ privptr->p_write_active_first=p_this_ccw->next;
+ p_this_ccw->next=privptr->p_write_free_chain;
+ privptr->p_write_free_chain=p_this_ccw;
+ ++privptr->write_free_count;
+ }
+ privptr->p_write_active_last=NULL;
+ privptr->mtc_logical_link = -1;
+ privptr->mtc_skipping = 1;
+ privptr->mtc_offset=0;
+
+ if (((privptr->channel[READ].last_dstat |
+ privptr->channel[WRITE].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
+ printk(KERN_WARNING "%s: channel problems during close - "
+ "read: %02x - write: %02x\n",
+ dev->name,
+ privptr->channel[READ].last_dstat,
+ privptr->channel[WRITE].last_dstat);
+ CLAW_DBF_TEXT(2,trace,"badclose");
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"rlsexit");
+ return 0;
+} /* end of claw_release */
+
+
+
+/*-------------------------------------------------------------------*
+* claw_write_retry *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_retry ( struct chbk *p_ch )
+{
+
+ struct net_device *dev=p_ch->ndev;
+
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+ printk(KERN_INFO "claw: variable p_ch =\n");
+ dumpit((char *) p_ch, sizeof(struct chbk));
+#endif
+ CLAW_DBF_TEXT(4,trace,"w_retry");
+ if (p_ch->claw_state == CLAW_STOP) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+ }
+#ifdef DEBUGMSG
+ printk( KERN_INFO "%s:%s state-%02x\n" ,
+ dev->name,
+ __FUNCTION__,
+ p_ch->claw_state);
+#endif
+ claw_strt_out_IO( dev );
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"rtry_xit");
+ return;
+} /* end of claw_write_retry */
+
+
+/*-------------------------------------------------------------------*
+* claw_write_next *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_next ( struct chbk * p_ch )
+{
+
+ struct net_device *dev;
+ struct claw_privbk *privptr=NULL;
+ struct sk_buff *pk_skb;
+ int rc;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__);
+ printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
+ dumpit((char *) p_ch, sizeof(struct chbk));
+#endif
+ CLAW_DBF_TEXT(4,trace,"claw_wrt");
+ if (p_ch->claw_state == CLAW_STOP)
+ return;
+ dev = (struct net_device *) p_ch->ndev;
+ privptr = (struct claw_privbk *) dev->priv;
+ claw_free_wrt_buf( dev );
+ if ((privptr->write_free_count > 0) &&
+ (skb_queue_len(&p_ch->collect_queue) > 0)) {
+ pk_skb = claw_pack_skb(privptr);
+ while (pk_skb != NULL) {
+ rc = claw_hw_tx( pk_skb, dev,1);
+ if (privptr->write_free_count > 0) {
+ pk_skb = claw_pack_skb(privptr);
+ } else
+ pk_skb = NULL;
+ }
+ }
+ if (privptr->p_write_active_first!=NULL) {
+ claw_strt_out_IO(dev);
+ }
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+} /* end of claw_write_next */
+
+/*-------------------------------------------------------------------*
+* *
+* claw_timer *
+*--------------------------------------------------------------------*/
+
+static void
+claw_timer ( struct chbk * p_ch )
+{
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__);
+ printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
+ dumpit((char *) p_ch, sizeof(struct chbk));
+#endif
+ CLAW_DBF_TEXT(4,trace,"timer");
+ p_ch->flag |= CLAW_TIMER;
+ wake_up(&p_ch->wait);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ p_ch->ndev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+} /* end of claw_timer */
+
+
+/*
+*
+* functions
+*/
+
+
+/*-------------------------------------------------------------------*
+* *
+* pages_to_order_of_mag *
+* *
+* takes a number of pages from 1 to 512 and returns the *
+* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
+* of magnitude get_free_pages() has an upper order of 9 *
+*--------------------------------------------------------------------*/
+
+static int inline
+pages_to_order_of_mag(int num_of_pages)
+{
+ int order_of_mag=1; /* assume 2 pages */
+ int nump=2;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages);
+#endif
+ CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
+ if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
+ /* 512 pages = 2Meg on 4k page systems */
+ if (num_of_pages >= 512) {return 9; }
+ /* we have two or more pages order is at least 1 */
+ for (nump=2 ;nump <= 512;nump*=2) {
+ if (num_of_pages <= nump)
+ break;
+ order_of_mag +=1;
+ }
+ if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s Exit on line %d, order = %d\n",
+ __FUNCTION__,__LINE__, order_of_mag);
+#endif
+ CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
+ return order_of_mag;
+}
+
+/*-------------------------------------------------------------------*
+* *
+* add_claw_reads *
+* *
+*--------------------------------------------------------------------*/
+static int
+add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
+ struct ccwbk* p_last)
+{
+ struct claw_privbk *privptr;
+ struct ccw1 temp_ccw;
+ struct endccw * p_end;
+#ifdef IOTRACE
+ struct ccwbk* p_buf;
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+#ifdef DEBUGMSG
+ printk(KERN_INFO "dev\n");
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "p_first\n");
+ dumpit((char *) p_first, sizeof(struct ccwbk));
+ printk(KERN_INFO "p_last\n");
+ dumpit((char *) p_last, sizeof(struct ccwbk));
+#endif
+ CLAW_DBF_TEXT(4,trace,"addreads");
+ privptr = dev->priv;
+ p_end = privptr->p_end_ccw;
+
+ /* first CCW and last CCW contains a new set of read channel programs
+ * to apend the running channel programs
+ */
+ if ( p_first==NULL) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"addexit");
+ return 0;
+ }
+
+ /* set up ending CCW sequence for this segment */
+ if (p_end->read1) {
+ p_end->read1=0x00; /* second ending CCW is now active */
+ /* reset ending CCWs and setup TIC CCWs */
+ p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
+ p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
+ p_end->read2_nop2.cda=0;
+ p_end->read2_nop2.count=1;
+ }
+ else {
+ p_end->read1=0x01; /* first ending CCW is now active */
+ /* reset ending CCWs and setup TIC CCWs */
+ p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
+ p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
+ p_end->read1_nop2.cda=0;
+ p_end->read1_nop2.count=1;
+ }
+
+ if ( privptr-> p_read_active_first ==NULL ) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s p_read_active_frist == NULL \n",
+ dev->name,__FUNCTION__);
+ printk(KERN_INFO "%s:%s Read active first/last changed \n",
+ dev->name,__FUNCTION__);
+#endif
+ privptr-> p_read_active_first= p_first; /* set new first */
+ privptr-> p_read_active_last = p_last; /* set new last */
+ }
+ else {
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s Read in progress \n",
+ dev->name,__FUNCTION__);
+#endif
+ /* set up TIC ccw */
+ temp_ccw.cda= (__u32)__pa(&p_first->read);
+ temp_ccw.count=0;
+ temp_ccw.flags=0;
+ temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
+
+
+ if (p_end->read1) {
+
+ /* first set of CCW's is chained to the new read */
+ /* chain, so the second set is chained to the active chain. */
+ /* Therefore modify the second set to point to the new */
+ /* read chain set up TIC CCWs */
+ /* make sure we update the CCW so channel doesn't fetch it */
+ /* when it's only half done */
+ memcpy( &p_end->read2_nop2, &temp_ccw ,
+ sizeof(struct ccw1));
+ privptr->p_read_active_last->r_TIC_1.cda=
+ (__u32)__pa(&p_first->read);
+ privptr->p_read_active_last->r_TIC_2.cda=
+ (__u32)__pa(&p_first->read);
+ }
+ else {
+ /* make sure we update the CCW so channel doesn't */
+ /* fetch it when it is only half done */
+ memcpy( &p_end->read1_nop2, &temp_ccw ,
+ sizeof(struct ccw1));
+ privptr->p_read_active_last->r_TIC_1.cda=
+ (__u32)__pa(&p_first->read);
+ privptr->p_read_active_last->r_TIC_2.cda=
+ (__u32)__pa(&p_first->read);
+ }
+ /* chain in new set of blocks */
+ privptr->p_read_active_last->next = p_first;
+ privptr->p_read_active_last=p_last;
+ } /* end of if ( privptr-> p_read_active_first ==NULL) */
+#ifdef IOTRACE
+ printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__);
+ dumpit((char *)p_last, sizeof(struct ccwbk));
+ printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__);
+ dumpit((char *)p_end, sizeof(struct endccw));
+
+ printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__);
+ dumpit((char *)p_first, sizeof(struct ccwbk));
+ printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
+ dev->name,__FUNCTION__);
+ p_buf=privptr->p_read_active_first;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"addexit");
+ return 0;
+} /* end of add_claw_reads */
+
+/*-------------------------------------------------------------------*
+ * ccw_check_return_code *
+ * *
+ *-------------------------------------------------------------------*/
+
+static void inline
+ccw_check_return_code(struct ccw_device *cdev, int return_code)
+{
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > enter \n",
+ cdev->dev.bus_id,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"ccwret");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "variable cdev =\n");
+ dumpit((char *) cdev, sizeof(struct ccw_device));
+ printk(KERN_INFO "variable return_code = %d\n",return_code);
+#endif
+ if (return_code != 0) {
+ switch (return_code) {
+ case -EBUSY:
+ printk(KERN_INFO "%s: Busy !\n",
+ cdev->dev.bus_id);
+ break;
+ case -ENODEV:
+ printk(KERN_EMERG "%s: Missing device called "
+ "for IO ENODEV\n", cdev->dev.bus_id);
+ break;
+ case -EIO:
+ printk(KERN_EMERG "%s: Status pending... EIO \n",
+ cdev->dev.bus_id);
+ break;
+ case -EINVAL:
+ printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
+ cdev->dev.bus_id);
+ break;
+ default:
+ printk(KERN_EMERG "%s: Unknown error in "
+ "Do_IO %d\n",cdev->dev.bus_id, return_code);
+ }
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > exit on line %d\n",
+ cdev->dev.bus_id,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"ccwret");
+} /* end of ccw_check_return_code */
+
+/*-------------------------------------------------------------------*
+* ccw_check_unit_check *
+*--------------------------------------------------------------------*/
+
+static void inline
+ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
+{
+ struct net_device *dev = p_ch->ndev;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
+#endif
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *)dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable sense =\n",dev->name);
+ dumpit((char *)&sense, 2);
+#endif
+ CLAW_DBF_TEXT(4,trace,"unitchek");
+
+ printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n",
+ dev->name, sense);
+
+ if (sense & 0x40) {
+ if (sense & 0x01) {
+ printk(KERN_WARNING "%s: Interface disconnect or "
+ "Selective reset "
+ "occurred (remote side)\n", dev->name);
+ }
+ else {
+ printk(KERN_WARNING "%s: System reset occured"
+ " (remote side)\n", dev->name);
+ }
+ }
+ else if (sense & 0x20) {
+ if (sense & 0x04) {
+ printk(KERN_WARNING "%s: Data-streaming "
+ "timeout)\n", dev->name);
+ }
+ else {
+ printk(KERN_WARNING "%s: Data-transfer parity"
+ " error\n", dev->name);
+ }
+ }
+ else if (sense & 0x10) {
+ if (sense & 0x20) {
+ printk(KERN_WARNING "%s: Hardware malfunction "
+ "(remote side)\n", dev->name);
+ }
+ else {
+ printk(KERN_WARNING "%s: read-data parity error "
+ "(remote side)\n", dev->name);
+ }
+ }
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+} /* end of ccw_check_unit_check */
+
+
+
+/*-------------------------------------------------------------------*
+* Dump buffer format *
+* *
+*--------------------------------------------------------------------*/
+#ifdef DEBUG
+static void
+dumpit(char* buf, int len)
+{
+
+ __u32 ct, sw, rm, dup;
+ char *ptr, *rptr;
+ char tbuf[82], tdup[82];
+#if (CONFIG_ARCH_S390X)
+ char addr[22];
+#else
+ char addr[12];
+#endif
+ char boff[12];
+ char bhex[82], duphex[82];
+ char basc[40];
+
+ sw = 0;
+ rptr =ptr=buf;
+ rm = 16;
+ duphex[0] = 0x00;
+ dup = 0;
+ for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
+ if (sw == 0) {
+#if (CONFIG_ARCH_S390X)
+ sprintf(addr, "%16.16lX",(unsigned long)rptr);
+#else
+ sprintf(addr, "%8.8X",(__u32)rptr);
+#endif
+ sprintf(boff, "%4.4X", (__u32)ct);
+ bhex[0] = '\0';
+ basc[0] = '\0';
+ }
+ if ((sw == 4) || (sw == 12)) {
+ strcat(bhex, " ");
+ }
+ if (sw == 8) {
+ strcat(bhex, " ");
+ }
+#if (CONFIG_ARCH_S390X)
+ sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
+#else
+ sprintf(tbuf,"%2.2X", (__u32)*ptr);
+#endif
+ tbuf[2] = '\0';
+ strcat(bhex, tbuf);
+ if ((0!=isprint(*ptr)) && (*ptr >= 0x20)) {
+ basc[sw] = *ptr;
+ }
+ else {
+ basc[sw] = '.';
+ }
+ basc[sw+1] = '\0';
+ sw++;
+ rm--;
+ if (sw==16) {
+ if ((strcmp(duphex, bhex)) !=0) {
+ if (dup !=0) {
+ sprintf(tdup,"Duplicate as above to"
+ " %s", addr);
+ printk( KERN_INFO " "
+ " --- %s ---\n",tdup);
+ }
+ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ dup = 0;
+ strcpy(duphex, bhex);
+ }
+ else {
+ dup++;
+ }
+ sw = 0;
+ rm = 16;
+ }
+ } /* endfor */
+
+ if (sw != 0) {
+ for ( ; rm > 0; rm--, sw++ ) {
+ if ((sw==4) || (sw==12)) strcat(bhex, " ");
+ if (sw==8) strcat(bhex, " ");
+ strcat(bhex, " ");
+ strcat(basc, " ");
+ }
+ if (dup !=0) {
+ sprintf(tdup,"Duplicate as above to %s", addr);
+ printk( KERN_INFO " --- %s ---\n",
+ tdup);
+ }
+ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ }
+ else {
+ if (dup >=1) {
+ sprintf(tdup,"Duplicate as above to %s", addr);
+ printk( KERN_INFO " --- %s ---\n",
+ tdup);
+ }
+ if (dup !=0) {
+ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ }
+ }
+ return;
+
+} /* end of dumpit */
+#endif
+
+/*-------------------------------------------------------------------*
+* find_link *
+*--------------------------------------------------------------------*/
+static int
+find_link(struct net_device *dev, char *host_name, char *ws_name )
+{
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ int rc=0;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"findlink");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev = \n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable host_name = %s\n",dev->name, host_name);
+ printk(KERN_INFO "%s: variable ws_name = %s\n",dev->name, ws_name);
+#endif
+ privptr=dev->priv;
+ p_env=privptr->p_env;
+ switch (p_env->packing)
+ {
+ case PACKING_ASK:
+ if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
+ (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
+ rc = EINVAL;
+ break;
+ case DO_PACKED:
+ case PACK_SEND:
+ if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
+ (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
+ rc = EINVAL;
+ break;
+ default:
+ if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
+ (memcmp(p_env->api_type , ws_name, 8)!=0))
+ rc = EINVAL;
+ break;
+ }
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return 0;
+} /* end of find_link */
+
+/*-------------------------------------------------------------------*
+ * claw_hw_tx *
+ * *
+ * *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
+{
+ int rc=0;
+ struct claw_privbk *privptr;
+ struct ccwbk *p_this_ccw;
+ struct ccwbk *p_first_ccw;
+ struct ccwbk *p_last_ccw;
+ __u32 numBuffers;
+ signed long len_of_data;
+ unsigned long bytesInThisBuffer;
+ unsigned char *pDataAddress;
+ struct endccw *pEnd;
+ struct ccw1 tempCCW;
+ struct chbk *p_ch;
+ struct claw_env *p_env;
+ int lock;
+ struct clawph *pk_head;
+ struct chbk *ch;
+#ifdef IOTRACE
+ struct ccwbk *p_buf;
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"hw_tx");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev skb =\n",dev->name);
+ dumpit((char *) skb, sizeof(struct sk_buff));
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable linkid = %ld\n",dev->name,linkid);
+#endif
+ privptr = (struct claw_privbk *) (dev->priv);
+ p_ch=(struct chbk *)&privptr->channel[WRITE];
+ p_env =privptr->p_env;
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__);
+ dumpit((char *)skb ,sizeof(struct sk_buff));
+#endif
+ claw_free_wrt_buf(dev); /* Clean up free chain if posible */
+ /* scan the write queue to free any completed write packets */
+ p_first_ccw=NULL;
+ p_last_ccw=NULL;
+ if ((p_env->packing >= PACK_SEND) &&
+ (skb->cb[1] != 'P')) {
+ skb_push(skb,sizeof(struct clawph));
+ pk_head=(struct clawph *)skb->data;
+ pk_head->len=skb->len-sizeof(struct clawph);
+ if (pk_head->len%4) {
+ pk_head->len+= 4-(pk_head->len%4);
+ skb_pad(skb,4-(pk_head->len%4));
+ skb_put(skb,4-(pk_head->len%4));
+ }
+ if (p_env->packing == DO_PACKED)
+ pk_head->link_num = linkid;
+ else
+ pk_head->link_num = 0;
+ pk_head->flag = 0x00;
+ skb_pad(skb,4);
+ skb->cb[1] = 'P';
+ }
+ if (linkid == 0) {
+ if (claw_check_busy(dev)) {
+ if (privptr->write_free_count!=0) {
+ claw_clear_busy(dev);
+ }
+ else {
+ claw_strt_out_IO(dev );
+ claw_free_wrt_buf( dev );
+ if (privptr->write_free_count==0) {
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: "
+ "(claw_check_busy) no free write "
+ "buffers\n", dev->name);
+#endif
+ ch = &privptr->channel[WRITE];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ goto Done;
+ }
+ else {
+ claw_clear_busy(dev);
+ }
+ }
+ }
+ /* tx lock */
+ if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: busy (claw_test_and_setbit_"
+ "busy)\n", dev->name);
+#endif
+ ch = &privptr->channel[WRITE];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ claw_strt_out_IO(dev );
+ rc=-EBUSY;
+ goto Done2;
+ }
+ }
+ /* See how many write buffers are required to hold this data */
+ numBuffers= ( skb->len + privptr->p_env->write_size - 1) /
+ ( privptr->p_env->write_size);
+
+ /* If that number of buffers isn't available, give up for now */
+ if (privptr->write_free_count < numBuffers ||
+ privptr->p_write_free_chain == NULL ) {
+
+ claw_setbit_busy(TB_NOBUFFER,dev);
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: busy (claw_setbit_busy"
+ "(TB_NOBUFFER))\n", dev->name);
+ printk(KERN_INFO " free_count: %d, numBuffers : %d\n",
+ (int)privptr->write_free_count,(int) numBuffers );
+#endif
+ ch = &privptr->channel[WRITE];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ CLAW_DBF_TEXT(2,trace,"clawbusy");
+ goto Done2;
+ }
+ pDataAddress=skb->data;
+ len_of_data=skb->len;
+
+ while (len_of_data > 0) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
+ dev->name ,__FUNCTION__,len_of_data);
+ dumpit((char *)pDataAddress ,64);
+#endif
+ p_this_ccw=privptr->p_write_free_chain; /* get a block */
+ if (p_this_ccw == NULL) { /* lost the race */
+ ch = &privptr->channel[WRITE];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ goto Done2;
+ }
+ privptr->p_write_free_chain=p_this_ccw->next;
+ p_this_ccw->next=NULL;
+ --privptr->write_free_count; /* -1 */
+ bytesInThisBuffer=len_of_data;
+ memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
+ len_of_data-=bytesInThisBuffer;
+ pDataAddress+=(unsigned long)bytesInThisBuffer;
+ /* setup write CCW */
+ p_this_ccw->write.cmd_code = (linkid * 8) +1;
+ if (len_of_data>0) {
+ p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
+ }
+ p_this_ccw->write.count=bytesInThisBuffer;
+ /* now add to end of this chain */
+ if (p_first_ccw==NULL) {
+ p_first_ccw=p_this_ccw;
+ }
+ if (p_last_ccw!=NULL) {
+ p_last_ccw->next=p_this_ccw;
+ /* set up TIC ccws */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&p_this_ccw->write);
+ }
+ p_last_ccw=p_this_ccw; /* save new last block */
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
+ dev->name,__FUNCTION__,bytesInThisBuffer);
+ dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
+ dumpit((char *)p_this_ccw->p_buffer, 64);
+#endif
+ }
+
+ /* FirstCCW and LastCCW now contain a new set of write channel
+ * programs to append to the running channel program
+ */
+
+ if (p_first_ccw!=NULL) {
+ /* setup ending ccw sequence for this segment */
+ pEnd=privptr->p_end_ccw;
+ if (pEnd->write1) {
+ pEnd->write1=0x00; /* second end ccw is now active */
+ /* set up Tic CCWs */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&pEnd->write2_nop1);
+ pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ pEnd->write2_nop2.flags =
+ CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ pEnd->write2_nop2.cda=0;
+ pEnd->write2_nop2.count=1;
+ }
+ else { /* end of if (pEnd->write1)*/
+ pEnd->write1=0x01; /* first end ccw is now active */
+ /* set up Tic CCWs */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&pEnd->write1_nop1);
+ pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ pEnd->write1_nop2.flags =
+ CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ pEnd->write1_nop2.cda=0;
+ pEnd->write1_nop2.count=1;
+ } /* end if if (pEnd->write1) */
+
+
+ if (privptr->p_write_active_first==NULL ) {
+ privptr->p_write_active_first=p_first_ccw;
+ privptr->p_write_active_last=p_last_ccw;
+ }
+ else {
+
+ /* set up Tic CCWs */
+
+ tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
+ tempCCW.count=0;
+ tempCCW.flags=0;
+ tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
+
+ if (pEnd->write1) {
+
+ /*
+ * first set of ending CCW's is chained to the new write
+ * chain, so the second set is chained to the active chain
+ * Therefore modify the second set to point the new write chain.
+ * make sure we update the CCW atomically
+ * so channel does not fetch it when it's only half done
+ */
+ memcpy( &pEnd->write2_nop2, &tempCCW ,
+ sizeof(struct ccw1));
+ privptr->p_write_active_last->w_TIC_1.cda=
+ (__u32)__pa(&p_first_ccw->write);
+ }
+ else {
+
+ /*make sure we update the CCW atomically
+ *so channel does not fetch it when it's only half done
+ */
+ memcpy(&pEnd->write1_nop2, &tempCCW ,
+ sizeof(struct ccw1));
+ privptr->p_write_active_last->w_TIC_1.cda=
+ (__u32)__pa(&p_first_ccw->write);
+
+ } /* end if if (pEnd->write1) */
+
+ privptr->p_write_active_last->next=p_first_ccw;
+ privptr->p_write_active_last=p_last_ccw;
+ }
+
+ } /* endif (p_first_ccw!=NULL) */
+
+
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n",
+ dev->name,__FUNCTION__);
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+ p_buf=(struct ccwbk*)privptr->p_end_ccw;
+ dumpit((char *)p_buf, sizeof(struct endccw));
+#endif
+ dev_kfree_skb(skb);
+ if (linkid==0) {
+ lock=LOCK_NO;
+ }
+ else {
+ lock=LOCK_YES;
+ }
+ claw_strt_out_IO(dev );
+ /* if write free count is zero , set NOBUFFER */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() > free_count is %d\n",
+ dev->name,__FUNCTION__,
+ (int) privptr->write_free_count );
+#endif
+ if (privptr->write_free_count==0) {
+ claw_setbit_busy(TB_NOBUFFER,dev);
+ }
+Done2:
+ claw_clearbit_busy(TB_TX,dev);
+Done:
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
+ dev->name,__FUNCTION__,__LINE__, rc);
+#endif
+ return(rc);
+} /* end of claw_hw_tx */
+
+/*-------------------------------------------------------------------*
+* *
+* init_ccw_bk *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+init_ccw_bk(struct net_device *dev)
+{
+
+ __u32 ccw_blocks_required;
+ __u32 ccw_blocks_perpage;
+ __u32 ccw_pages_required;
+ __u32 claw_reads_perpage=1;
+ __u32 claw_read_pages;
+ __u32 claw_writes_perpage=1;
+ __u32 claw_write_pages;
+ void *p_buff=NULL;
+ struct ccwbk*p_free_chain;
+ struct ccwbk*p_buf;
+ struct ccwbk*p_last_CCWB;
+ struct ccwbk*p_first_CCWB;
+ struct endccw *p_endccw=NULL;
+ addr_t real_address;
+ struct claw_privbk *privptr=dev->priv;
+ struct clawh *pClawH=NULL;
+ addr_t real_TIC_address;
+ int i,j;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"init_ccw");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+#endif
+
+ /* initialize statistics field */
+ privptr->active_link_ID=0;
+ /* initialize ccwbk pointers */
+ privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
+ privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
+ privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
+ privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
+ privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
+ privptr->p_end_ccw=NULL; /* pointer to ending ccw */
+ privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
+ privptr->buffs_alloc = 0;
+ memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
+ memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
+ /* initialize free write ccwbk counter */
+ privptr->write_free_count=0; /* number of free bufs on write chain */
+ p_last_CCWB = NULL;
+ p_first_CCWB= NULL;
+ /*
+ * We need 1 CCW block for each read buffer, 1 for each
+ * write buffer, plus 1 for ClawSignalBlock
+ */
+ ccw_blocks_required =
+ privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() "
+ "ccw_blocks_required=%d\n",
+ dev->name,__FUNCTION__,
+ ccw_blocks_required);
+ printk(KERN_INFO "%s: %s() "
+ "PAGE_SIZE=0x%x\n",
+ dev->name,__FUNCTION__,
+ (unsigned int)PAGE_SIZE);
+ printk(KERN_INFO "%s: %s() > "
+ "PAGE_MASK=0x%x\n",
+ dev->name,__FUNCTION__,
+ (unsigned int)PAGE_MASK);
+#endif
+ /*
+ * compute number of CCW blocks that will fit in a page
+ */
+ ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
+ ccw_pages_required=
+ (ccw_blocks_required+ccw_blocks_perpage -1) /
+ ccw_blocks_perpage;
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
+ dev->name,__FUNCTION__,
+ ccw_blocks_perpage);
+ printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
+ dev->name,__FUNCTION__,
+ ccw_pages_required);
+#endif
+ /*
+ * read and write sizes are set by 2 constants in claw.h
+ * 4k and 32k. Unpacked values other than 4k are not going to
+ * provide good performance. With packing buffers support 32k
+ * buffers are used.
+ */
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size;
+ claw_read_pages= (privptr->p_env->read_buffers +
+ claw_reads_perpage -1) / claw_reads_perpage;
+ }
+ else { /* > or equal */
+ privptr->p_buff_pages_perread=
+ (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ claw_read_pages=
+ privptr->p_env->read_buffers * privptr->p_buff_pages_perread;
+ }
+ if (privptr->p_env->write_size < PAGE_SIZE) {
+ claw_writes_perpage=
+ PAGE_SIZE / privptr->p_env->write_size;
+ claw_write_pages=
+ (privptr->p_env->write_buffers + claw_writes_perpage -1) /
+ claw_writes_perpage;
+
+ }
+ else { /* > or equal */
+ privptr->p_buff_pages_perwrite=
+ (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ claw_write_pages=
+ privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite;
+ }
+#ifdef DEBUGMSG
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
+ dev->name,__FUNCTION__,
+ claw_reads_perpage);
+ }
+ else {
+ printk(KERN_INFO "%s: %s() pages_perread=%d\n",
+ dev->name,__FUNCTION__,
+ privptr->p_buff_pages_perread);
+ }
+ printk(KERN_INFO "%s: %s() read_pages=%d\n",
+ dev->name,__FUNCTION__,
+ claw_read_pages);
+ if (privptr->p_env->write_size < PAGE_SIZE) {
+ printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
+ dev->name,__FUNCTION__,
+ claw_writes_perpage);
+ }
+ else {
+ printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
+ dev->name,__FUNCTION__,
+ privptr->p_buff_pages_perwrite);
+ }
+ printk(KERN_INFO "%s: %s() write_pages=%d\n",
+ dev->name,__FUNCTION__,
+ claw_write_pages);
+#endif
+
+
+ /*
+ * allocate ccw_pages_required
+ */
+ if (privptr->p_buff_ccw==NULL) {
+ privptr->p_buff_ccw=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(ccw_pages_required ));
+ if (privptr->p_buff_ccw==NULL) {
+ printk(KERN_INFO "%s: %s() "
+ "__get_free_pages for CCWs failed : "
+ "pages is %d\n",
+ dev->name,__FUNCTION__,
+ ccw_pages_required );
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > "
+ "exit on line %d, rc = ENOMEM\n",
+ dev->name,__FUNCTION__,
+ __LINE__);
+#endif
+ return -ENOMEM;
+ }
+ privptr->p_buff_ccw_num=ccw_pages_required;
+ }
+ memset(privptr->p_buff_ccw, 0x00,
+ privptr->p_buff_ccw_num * PAGE_SIZE);
+
+ /*
+ * obtain ending ccw block address
+ *
+ */
+ privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
+ real_address = (__u32)__pa(privptr->p_end_ccw);
+ /* Initialize ending CCW block */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
+ dev->name,__FUNCTION__);
+#endif
+
+ p_endccw=privptr->p_end_ccw;
+ p_endccw->real=real_address;
+ p_endccw->write1=0x00;
+ p_endccw->read1=0x00;
+
+ /* write1_nop1 */
+ p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->write1_nop1.count = 1;
+ p_endccw->write1_nop1.cda = 0;
+
+ /* write1_nop2 */
+ p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->write1_nop2.count = 1;
+ p_endccw->write1_nop2.cda = 0;
+
+ /* write2_nop1 */
+ p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->write2_nop1.count = 1;
+ p_endccw->write2_nop1.cda = 0;
+
+ /* write2_nop2 */
+ p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->write2_nop2.count = 1;
+ p_endccw->write2_nop2.cda = 0;
+
+ /* read1_nop1 */
+ p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->read1_nop1.count = 1;
+ p_endccw->read1_nop1.cda = 0;
+
+ /* read1_nop2 */
+ p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->read1_nop2.count = 1;
+ p_endccw->read1_nop2.cda = 0;
+
+ /* read2_nop1 */
+ p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->read2_nop1.count = 1;
+ p_endccw->read2_nop1.cda = 0;
+
+ /* read2_nop2 */
+ p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->read2_nop2.count = 1;
+ p_endccw->read2_nop2.cda = 0;
+
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
+ dev->name,__FUNCTION__);
+ dumpit((char *)p_endccw, sizeof(struct endccw));
+#endif
+
+ /*
+ * Build a chain of CCWs
+ *
+ */
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n",
+ dev->name,__FUNCTION__);
+#endif
+ p_buff=privptr->p_buff_ccw;
+
+ p_free_chain=NULL;
+ for (i=0 ; i < ccw_pages_required; i++ ) {
+ real_address = (__u32)__pa(p_buff);
+ p_buf=p_buff;
+ for (j=0 ; j < ccw_blocks_perpage ; j++) {
+ p_buf->next = p_free_chain;
+ p_free_chain = p_buf;
+ p_buf->real=(__u32)__pa(p_buf);
+ ++p_buf;
+ }
+ p_buff+=PAGE_SIZE;
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() "
+ "End build a chain of CCW buffer \n",
+ dev->name,__FUNCTION__);
+ p_buf=p_free_chain;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+#endif
+
+ /*
+ * Initialize ClawSignalBlock
+ *
+ */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() "
+ "Begin initialize ClawSignalBlock \n",
+ dev->name,__FUNCTION__);
+#endif
+ if (privptr->p_claw_signal_blk==NULL) {
+ privptr->p_claw_signal_blk=p_free_chain;
+ p_free_chain=p_free_chain->next;
+ pClawH=(struct clawh *)privptr->p_claw_signal_blk;
+ pClawH->length=0xffff;
+ pClawH->opcode=0xff;
+ pClawH->flag=CLAW_BUSY;
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() > End initialize "
+ "ClawSignalBlock\n",
+ dev->name,__FUNCTION__);
+ dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
+#endif
+
+ /*
+ * allocate write_pages_required and add to free chain
+ */
+ if (privptr->p_buff_write==NULL) {
+ if (privptr->p_env->write_size < PAGE_SIZE) {
+ privptr->p_buff_write=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(claw_write_pages ));
+ if (privptr->p_buff_write==NULL) {
+ printk(KERN_INFO "%s: %s() __get_free_pages for write"
+ " bufs failed : get is for %d pages\n",
+ dev->name,__FUNCTION__,claw_write_pages );
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ privptr->p_buff_ccw=NULL;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > exit on line %d,"
+ "rc = ENOMEM\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return -ENOMEM;
+ }
+ /*
+ * Build CLAW write free chain
+ *
+ */
+
+ memset(privptr->p_buff_write, 0x00,
+ ccw_pages_required * PAGE_SIZE);
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() Begin build claw write free "
+ "chain \n",dev->name,__FUNCTION__);
+#endif
+ privptr->p_write_free_chain=NULL;
+
+ p_buff=privptr->p_buff_write;
+
+ for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
+ p_buf = p_free_chain; /* get a CCW */
+ p_free_chain = p_buf->next;
+ p_buf->next =privptr->p_write_free_chain;
+ privptr->p_write_free_chain = p_buf;
+ p_buf-> p_buffer = (struct clawbuf *)p_buff;
+ p_buf-> write.cda = (__u32)__pa(p_buff);
+ p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.count = 1;
+ p_buf-> w_read_FF.cda =
+ (__u32)__pa(&p_buf-> header.flag);
+ p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> w_TIC_1.flags = 0;
+ p_buf-> w_TIC_1.count = 0;
+
+ if (((unsigned long)p_buff+privptr->p_env->write_size) >=
+ ((unsigned long)(p_buff+2*
+ (privptr->p_env->write_size) -1) & PAGE_MASK)) {
+ p_buff= p_buff+privptr->p_env->write_size;
+ }
+ }
+ }
+ else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
+ {
+ privptr->p_write_free_chain=NULL;
+ for (i = 0; i< privptr->p_env->write_buffers ; i++) {
+ p_buff=(void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite) );
+#ifdef IOTRACE
+ printk(KERN_INFO "%s:%s __get_free_pages "
+ "for writes buf: get for %d pages\n",
+ dev->name,__FUNCTION__,
+ privptr->p_buff_pages_perwrite);
+#endif
+ if (p_buff==NULL) {
+ printk(KERN_INFO "%s:%s __get_free_pages"
+ "for writes buf failed : get is for %d pages\n",
+ dev->name,
+ __FUNCTION__,
+ privptr->p_buff_pages_perwrite );
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_ccw_num));
+ privptr->p_buff_ccw=NULL;
+ p_buf=privptr->p_buff_write;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)
+ p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite));
+ p_buf=p_buf->next;
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
+ dev->name,
+ __FUNCTION__,
+ __LINE__);
+#endif
+ return -ENOMEM;
+ } /* Error on get_pages */
+ memset(p_buff, 0x00, privptr->p_env->write_size );
+ p_buf = p_free_chain;
+ p_free_chain = p_buf->next;
+ p_buf->next = privptr->p_write_free_chain;
+ privptr->p_write_free_chain = p_buf;
+ privptr->p_buff_write = p_buf;
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ p_buf-> write.cda = (__u32)__pa(p_buff);
+ p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.count = 1;
+ p_buf-> w_read_FF.cda =
+ (__u32)__pa(&p_buf-> header.flag);
+ p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> w_TIC_1.flags = 0;
+ p_buf-> w_TIC_1.count = 0;
+ } /* for all write_buffers */
+
+ } /* else buffers are PAGE_SIZE or bigger */
+
+ }
+ privptr->p_buff_write_num=claw_write_pages;
+ privptr->write_free_count=privptr->p_env->write_buffers;
+
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s End build claw write free chain \n",
+ dev->name,__FUNCTION__);
+ p_buf=privptr->p_write_free_chain;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+#endif
+ /*
+ * allocate read_pages_required and chain to free chain
+ */
+ if (privptr->p_buff_read==NULL) {
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ privptr->p_buff_read=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(claw_read_pages) );
+ if (privptr->p_buff_read==NULL) {
+ printk(KERN_INFO "%s: %s() "
+ "__get_free_pages for read buf failed : "
+ "get is for %d pages\n",
+ dev->name,__FUNCTION__,claw_read_pages );
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_ccw_num));
+ /* free the write pages size is < page size */
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_write_num));
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_write=NULL;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
+ " ENOMEM\n",dev->name,__FUNCTION__,__LINE__);
+#endif
+ return -ENOMEM;
+ }
+ memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
+ privptr->p_buff_read_num=claw_read_pages;
+ /*
+ * Build CLAW read free chain
+ *
+ */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
+ dev->name,__FUNCTION__);
+#endif
+ p_buff=privptr->p_buff_read;
+ for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+ p_buf = p_free_chain;
+ p_free_chain = p_buf->next;
+
+ if (p_last_CCWB==NULL) {
+ p_buf->next=NULL;
+ real_TIC_address=0;
+ p_last_CCWB=p_buf;
+ }
+ else {
+ p_buf->next=p_first_CCWB;
+ real_TIC_address=
+ (__u32)__pa(&p_first_CCWB -> read );
+ }
+
+ p_first_CCWB=p_buf;
+
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ /* initialize read command */
+ p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+ p_buf-> read.cda = (__u32)__pa(p_buff);
+ p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read.count = privptr->p_env->read_size;
+
+ /* initialize read_h command */
+ p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+ p_buf-> read_h.cda =
+ (__u32)__pa(&(p_buf->header));
+ p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read_h.count = sizeof(struct clawh);
+
+ /* initialize Signal command */
+ p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+ p_buf-> signal.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> signal.count = 1;
+
+ /* initialize r_TIC_1 command */
+ p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+ p_buf-> r_TIC_1.flags = 0;
+ p_buf-> r_TIC_1.count = 0;
+
+ /* initialize r_read_FF command */
+ p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> r_read_FF.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> r_read_FF.flags =
+ CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+ p_buf-> r_read_FF.count = 1;
+
+ /* initialize r_TIC_2 */
+ memcpy(&p_buf->r_TIC_2,
+ &p_buf->r_TIC_1, sizeof(struct ccw1));
+
+ /* initialize Header */
+ p_buf->header.length=0xffff;
+ p_buf->header.opcode=0xff;
+ p_buf->header.flag=CLAW_PENDING;
+
+ if (((unsigned long)p_buff+privptr->p_env->read_size) >=
+ ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1)
+ & PAGE_MASK) ) {
+ p_buff= p_buff+privptr->p_env->read_size;
+ }
+ else {
+ p_buff=
+ (void *)((unsigned long)
+ (p_buff+2*(privptr->p_env->read_size) -1)
+ & PAGE_MASK) ;
+ }
+ } /* for read_buffers */
+ } /* read_size < PAGE_SIZE */
+ else { /* read Size >= PAGE_SIZE */
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
+ dev->name,__FUNCTION__);
+#endif
+ for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+ p_buff = (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) );
+ if (p_buff==NULL) {
+ printk(KERN_INFO "%s: %s() __get_free_pages for read "
+ "buf failed : get is for %d pages\n",
+ dev->name,__FUNCTION__,
+ privptr->p_buff_pages_perread );
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ /* free the write pages */
+ p_buf=privptr->p_buff_write;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite ));
+ p_buf=p_buf->next;
+ }
+ /* free any read pages already alloc */
+ p_buf=privptr->p_buff_read;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread ));
+ p_buf=p_buf->next;
+ }
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_write=NULL;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
+ dev->name,__FUNCTION__,
+ __LINE__);
+#endif
+ return -ENOMEM;
+ }
+ memset(p_buff, 0x00, privptr->p_env->read_size);
+ p_buf = p_free_chain;
+ privptr->p_buff_read = p_buf;
+ p_free_chain = p_buf->next;
+
+ if (p_last_CCWB==NULL) {
+ p_buf->next=NULL;
+ real_TIC_address=0;
+ p_last_CCWB=p_buf;
+ }
+ else {
+ p_buf->next=p_first_CCWB;
+ real_TIC_address=
+ (addr_t)__pa(
+ &p_first_CCWB -> read );
+ }
+
+ p_first_CCWB=p_buf;
+ /* save buff address */
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ /* initialize read command */
+ p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+ p_buf-> read.cda = (__u32)__pa(p_buff);
+ p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read.count = privptr->p_env->read_size;
+
+ /* initialize read_h command */
+ p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+ p_buf-> read_h.cda =
+ (__u32)__pa(&(p_buf->header));
+ p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read_h.count = sizeof(struct clawh);
+
+ /* initialize Signal command */
+ p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+ p_buf-> signal.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> signal.count = 1;
+
+ /* initialize r_TIC_1 command */
+ p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+ p_buf-> r_TIC_1.flags = 0;
+ p_buf-> r_TIC_1.count = 0;
+
+ /* initialize r_read_FF command */
+ p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> r_read_FF.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> r_read_FF.flags =
+ CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+ p_buf-> r_read_FF.count = 1;
+
+ /* initialize r_TIC_2 */
+ memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
+ sizeof(struct ccw1));
+
+ /* initialize Header */
+ p_buf->header.length=0xffff;
+ p_buf->header.opcode=0xff;
+ p_buf->header.flag=CLAW_PENDING;
+
+ } /* For read_buffers */
+ } /* read_size >= PAGE_SIZE */
+ } /* pBuffread = NULL */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() > End build claw read free chain \n",
+ dev->name,__FUNCTION__);
+ p_buf=p_first_CCWB;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+
+#endif
+ add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
+ privptr->buffs_alloc = 1;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return 0;
+} /* end of init_ccw_bk */
+
+/*-------------------------------------------------------------------*
+* *
+* probe_error *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+probe_error( struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *privptr;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s enter \n",__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"proberr");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__);
+ dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
+#endif
+ privptr=(struct claw_privbk *)cgdev->dev.driver_data;
+ if (privptr!=NULL) {
+ if (privptr->p_env != NULL) {
+ kfree(privptr->p_env);
+ privptr->p_env=NULL;
+ }
+ if (privptr->p_mtc_envelope!=NULL) {
+ kfree(privptr->p_mtc_envelope);
+ privptr->p_mtc_envelope=NULL;
+ }
+ kfree(privptr);
+ privptr=NULL;
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s > exit on line %d\n",
+ __FUNCTION__,__LINE__);
+#endif
+
+ return;
+} /* probe_error */
+
+
+
+/*-------------------------------------------------------------------*
+* claw_process_control *
+* *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
+{
+
+ struct clawbuf *p_buf;
+ struct clawctl ctlbk;
+ struct clawctl *p_ctlbk;
+ char temp_host_name[8];
+ char temp_ws_name[8];
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct sysval *p_sysval;
+ struct conncmd *p_connect=NULL;
+ int rc;
+ struct chbk *p_ch = NULL;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > enter \n",
+ dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"clw_cntl");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable p_ccw =\n",dev->name);
+ dumpit((char *) p_ccw, sizeof(struct ccwbk *));
+#endif
+ udelay(1000); /* Wait a ms for the control packets to
+ *catch up to each other */
+ privptr=dev->priv;
+ p_env=privptr->p_env;
+ memcpy( &temp_host_name, p_env->host_name, 8);
+ memcpy( &temp_ws_name, p_env->adapter_name , 8);
+ printk(KERN_INFO "%s: CLAW device %.8s: "
+ "Received Control Packet\n",
+ dev->name, temp_ws_name);
+ if (privptr->release_pend==1) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() > "
+ "exit on line %d, rc=0\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return 0;
+ }
+ p_buf=p_ccw->p_buffer;
+ p_ctlbk=&ctlbk;
+ if (p_env->packing == DO_PACKED) { /* packing in progress?*/
+ memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
+ } else {
+ memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
+ }
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: dump claw control data inbound\n",dev->name);
+ dumpit((char *)p_ctlbk, sizeof(struct clawctl));
+#endif
+ switch (p_ctlbk->command)
+ {
+ case SYSTEM_VALIDATE_REQUEST:
+ if (p_ctlbk->version!=CLAW_VERSION_ID) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_WRONG_VERSION );
+ printk("%s: %d is wrong version id. "
+ "Expected %d\n",
+ dev->name, p_ctlbk->version,
+ CLAW_VERSION_ID);
+ }
+ p_sysval=(struct sysval *)&(p_ctlbk->data);
+ printk( "%s: Recv Sys Validate Request: "
+ "Vers=%d,link_id=%d,Corr=%d,WS name=%."
+ "8s,Host name=%.8s\n",
+ dev->name, p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_sysval->WS_name,
+ p_sysval->host_name);
+ if (0!=memcmp(temp_host_name,p_sysval->host_name,8)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_NAME_MISMATCH );
+ CLAW_DBF_TEXT(2,setup,"HSTBAD");
+ CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->host_name);
+ CLAW_DBF_TEXT_(2,setup,"%s",temp_host_name);
+ printk(KERN_INFO "%s: Host name mismatch\n",
+ dev->name);
+ printk(KERN_INFO "%s: Received :%s: "
+ "expected :%s: \n",
+ dev->name,
+ p_sysval->host_name,
+ temp_host_name);
+ }
+ if (0!=memcmp(temp_ws_name,p_sysval->WS_name,8)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_NAME_MISMATCH );
+ CLAW_DBF_TEXT(2,setup,"WSNBAD");
+ CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->WS_name);
+ CLAW_DBF_TEXT_(2,setup,"%s",temp_ws_name);
+ printk(KERN_INFO "%s: WS name mismatch\n",
+ dev->name);
+ printk(KERN_INFO "%s: Received :%s: "
+ "expected :%s: \n",
+ dev->name,
+ p_sysval->WS_name,
+ temp_ws_name);
+ }
+ if (( p_sysval->write_frame_size < p_env->write_size) &&
+ ( p_env->packing == 0)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_HOST_RCV_TOO_SMALL );
+ printk(KERN_INFO "%s: host write size is too "
+ "small\n", dev->name);
+ CLAW_DBF_TEXT(2,setup,"wrtszbad");
+ }
+ if (( p_sysval->read_frame_size < p_env->read_size) &&
+ ( p_env->packing == 0)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_HOST_RCV_TOO_SMALL );
+ printk(KERN_INFO "%s: host read size is too "
+ "small\n", dev->name);
+ CLAW_DBF_TEXT(2,setup,"rdsizbad");
+ }
+ claw_snd_sys_validate_rsp(dev, p_ctlbk, 0 );
+ printk("%s: CLAW device %.8s: System validate"
+ " completed.\n",dev->name, temp_ws_name);
+ printk("%s: sys Validate Rsize:%d Wsize:%d\n",dev->name,
+ p_sysval->read_frame_size,p_sysval->write_frame_size);
+ privptr->system_validate_comp=1;
+ if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+ p_env->packing = PACKING_ASK;
+ }
+ claw_strt_conn_req(dev);
+ break;
+
+ case SYSTEM_VALIDATE_RESPONSE:
+ p_sysval=(struct sysval *)&(p_ctlbk->data);
+ printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
+ "WS name=%.8s,Host name=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->correlator,
+ p_ctlbk->rc,
+ p_sysval->WS_name,
+ p_sysval->host_name);
+ switch (p_ctlbk->rc)
+ {
+ case 0:
+ printk(KERN_INFO "%s: CLAW device "
+ "%.8s: System validate "
+ "completed.\n",
+ dev->name, temp_ws_name);
+ if (privptr->system_validate_comp == 0)
+ claw_strt_conn_req(dev);
+ privptr->system_validate_comp=1;
+ break;
+ case CLAW_RC_NAME_MISMATCH:
+ printk(KERN_INFO "%s: Sys Validate "
+ "Resp : Host, WS name is "
+ "mismatch\n",
+ dev->name);
+ break;
+ case CLAW_RC_WRONG_VERSION:
+ printk(KERN_INFO "%s: Sys Validate "
+ "Resp : Wrong version\n",
+ dev->name);
+ break;
+ case CLAW_RC_HOST_RCV_TOO_SMALL:
+ printk(KERN_INFO "%s: Sys Validate "
+ "Resp : bad frame size\n",
+ dev->name);
+ break;
+ default:
+ printk(KERN_INFO "%s: Sys Validate "
+ "error code=%d \n",
+ dev->name, p_ctlbk->rc );
+ break;
+ }
+ break;
+
+ case CONNECTION_REQUEST:
+ p_connect=(struct conncmd *)&(p_ctlbk->data);
+ printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d,"
+ "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_connect->host_name,
+ p_connect->WS_name);
+ if (privptr->active_link_ID!=0 ) {
+ claw_snd_disc(dev, p_ctlbk);
+ printk(KERN_INFO "%s: Conn Req error : "
+ "already logical link is active \n",
+ dev->name);
+ }
+ if (p_ctlbk->linkid!=1 ) {
+ claw_snd_disc(dev, p_ctlbk);
+ printk(KERN_INFO "%s: Conn Req error : "
+ "req logical link id is not 1\n",
+ dev->name);
+ }
+ rc=find_link(dev,
+ p_connect->host_name, p_connect->WS_name);
+ if (rc!=0) {
+ claw_snd_disc(dev, p_ctlbk);
+ printk(KERN_INFO "%s: Conn Req error : "
+ "req appl name does not match\n",
+ dev->name);
+ }
+ claw_send_control(dev,
+ CONNECTION_CONFIRM, p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ 0, p_connect->host_name,
+ p_connect->WS_name);
+ if (p_env->packing == PACKING_ASK) {
+ printk("%s: Now Pack ask\n",dev->name);
+ p_env->packing = PACK_SEND;
+ claw_snd_conn_req(dev,0);
+ }
+ printk(KERN_INFO "%s: CLAW device %.8s: Connection "
+ "completed link_id=%d.\n",
+ dev->name, temp_ws_name,
+ p_ctlbk->linkid);
+ privptr->active_link_ID=p_ctlbk->linkid;
+ p_ch=&privptr->channel[WRITE];
+ wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
+ break;
+ case CONNECTION_RESPONSE:
+ p_connect=(struct conncmd *)&(p_ctlbk->data);
+ printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d,"
+ "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_ctlbk->rc,
+ p_connect->host_name,
+ p_connect->WS_name);
+
+ if (p_ctlbk->rc !=0 ) {
+ printk(KERN_INFO "%s: Conn Resp error: rc=%d \n",
+ dev->name, p_ctlbk->rc);
+ return 1;
+ }
+ rc=find_link(dev,
+ p_connect->host_name, p_connect->WS_name);
+ if (rc!=0) {
+ claw_snd_disc(dev, p_ctlbk);
+ printk(KERN_INFO "%s: Conn Resp error: "
+ "req appl name does not match\n",
+ dev->name);
+ }
+ /* should be until CONNECTION_CONFIRM */
+ privptr->active_link_ID = - (p_ctlbk->linkid);
+ break;
+ case CONNECTION_CONFIRM:
+ p_connect=(struct conncmd *)&(p_ctlbk->data);
+ printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
+ "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_connect->host_name,
+ p_connect->WS_name);
+ if (p_ctlbk->linkid== -(privptr->active_link_ID)) {
+ privptr->active_link_ID=p_ctlbk->linkid;
+ if (p_env->packing > PACKING_ASK) {
+ printk(KERN_INFO "%s: Confirmed Now packing\n",dev->name);
+ p_env->packing = DO_PACKED;
+ }
+ p_ch=&privptr->channel[WRITE];
+ wake_up(&p_ch->wait);
+ }
+ else {
+ printk(KERN_INFO "%s: Conn confirm: "
+ "unexpected linkid=%d \n",
+ dev->name, p_ctlbk->linkid);
+ claw_snd_disc(dev, p_ctlbk);
+ }
+ break;
+ case DISCONNECT:
+ printk(KERN_INFO "%s: Disconnect: "
+ "Vers=%d,link_id=%d,Corr=%d\n",
+ dev->name, p_ctlbk->version,
+ p_ctlbk->linkid, p_ctlbk->correlator);
+ if ((p_ctlbk->linkid == 2) &&
+ (p_env->packing == PACK_SEND)) {
+ privptr->active_link_ID = 1;
+ p_env->packing = DO_PACKED;
+ }
+ else
+ privptr->active_link_ID=0;
+ break;
+ case CLAW_ERROR:
+ printk(KERN_INFO "%s: CLAW ERROR detected\n",
+ dev->name);
+ break;
+ default:
+ printk(KERN_INFO "%s: Unexpected command code=%d \n",
+ dev->name, p_ctlbk->command);
+ break;
+ }
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+
+ return 0;
+} /* end of claw_process_control */
+
+
+/*-------------------------------------------------------------------*
+* claw_send_control *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+ __u8 correlator, __u8 rc, char *local_name, char *remote_name)
+{
+ struct claw_privbk *privptr;
+ struct clawctl *p_ctl;
+ struct sysval *p_sysval;
+ struct conncmd *p_connect;
+ struct sk_buff *skb;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"sndcntl");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: Sending Control Packet \n",dev->name);
+ printk(KERN_INFO "%s: variable type = 0x%X, link = "
+ "%d, correlator = %d, rc = %d\n",
+ dev->name,type, link, correlator, rc);
+ printk(KERN_INFO "%s: variable local_name = %s, "
+ "remote_name = %s\n",dev->name, local_name, remote_name);
+#endif
+ privptr=dev->priv;
+ p_ctl=(struct clawctl *)&privptr->ctl_bk;
+
+ p_ctl->command=type;
+ p_ctl->version=CLAW_VERSION_ID;
+ p_ctl->linkid=link;
+ p_ctl->correlator=correlator;
+ p_ctl->rc=rc;
+
+ p_sysval=(struct sysval *)&p_ctl->data;
+ p_connect=(struct conncmd *)&p_ctl->data;
+
+ switch (p_ctl->command) {
+ case SYSTEM_VALIDATE_REQUEST:
+ case SYSTEM_VALIDATE_RESPONSE:
+ memcpy(&p_sysval->host_name, local_name, 8);
+ memcpy(&p_sysval->WS_name, remote_name, 8);
+ if (privptr->p_env->packing > 0) {
+ p_sysval->read_frame_size=DEF_PACK_BUFSIZE;
+ p_sysval->write_frame_size=DEF_PACK_BUFSIZE;
+ } else {
+ /* how big is the piggest group of packets */
+ p_sysval->read_frame_size=privptr->p_env->read_size;
+ p_sysval->write_frame_size=privptr->p_env->write_size;
+ }
+ memset(&p_sysval->reserved, 0x00, 4);
+ break;
+ case CONNECTION_REQUEST:
+ case CONNECTION_RESPONSE:
+ case CONNECTION_CONFIRM:
+ case DISCONNECT:
+ memcpy(&p_sysval->host_name, local_name, 8);
+ memcpy(&p_sysval->WS_name, remote_name, 8);
+ if (privptr->p_env->packing > 0) {
+ /* How big is the biggest packet */
+ p_connect->reserved1[0]=CLAW_FRAME_SIZE;
+ p_connect->reserved1[1]=CLAW_FRAME_SIZE;
+ } else {
+ memset(&p_connect->reserved1, 0x00, 4);
+ memset(&p_connect->reserved2, 0x00, 4);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* write Control Record to the device */
+
+
+ skb = dev_alloc_skb(sizeof(struct clawctl));
+ if (!skb) {
+ printk( "%s:%s low on mem, returning...\n",
+ dev->name,__FUNCTION__);
+#ifdef DEBUG
+ printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
+ dev->name,__FUNCTION__);
+#endif
+ return -ENOMEM;
+ }
+ memcpy(skb_put(skb, sizeof(struct clawctl)),
+ p_ctl, sizeof(struct clawctl));
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: outbnd claw cntl data \n",dev->name);
+ dumpit((char *)p_ctl,sizeof(struct clawctl));
+#endif
+ if (privptr->p_env->packing >= PACK_SEND)
+ claw_hw_tx(skb, dev, 1);
+ else
+ claw_hw_tx(skb, dev, 0);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+
+ return 0;
+} /* end of claw_send_control */
+
+/*-------------------------------------------------------------------*
+* claw_snd_conn_req *
+* *
+*--------------------------------------------------------------------*/
+static int
+claw_snd_conn_req(struct net_device *dev, __u8 link)
+{
+ int rc;
+ struct claw_privbk *privptr=dev->priv;
+ struct clawctl *p_ctl;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"snd_conn");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable link = %X, dev =\n",dev->name, link);
+ dumpit((char *) dev, sizeof(struct net_device));
+#endif
+ rc = 1;
+ p_ctl=(struct clawctl *)&privptr->ctl_bk;
+ p_ctl->linkid = link;
+ if ( privptr->system_validate_comp==0x00 ) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return rc;
+ }
+ if (privptr->p_env->packing == PACKING_ASK )
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
+ if (privptr->p_env->packing == PACK_SEND) {
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
+ }
+ if (privptr->p_env->packing == 0)
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ HOST_APPL_NAME, privptr->p_env->api_type);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
+ dev->name,__FUNCTION__,__LINE__, rc);
+#endif
+ return rc;
+
+} /* end of claw_snd_conn_req */
+
+
+/*-------------------------------------------------------------------*
+* claw_snd_disc *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
+{
+ int rc;
+ struct conncmd * p_connect;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"snd_dsc");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable p_ctl",dev->name);
+ dumpit((char *) p_ctl, sizeof(struct clawctl));
+#endif
+ p_connect=(struct conncmd *)&p_ctl->data;
+
+ rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
+ p_ctl->correlator, 0,
+ p_connect->host_name, p_connect->WS_name);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
+ dev->name,__FUNCTION__, __LINE__, rc);
+#endif
+ return rc;
+} /* end of claw_snd_disc */
+
+
+/*-------------------------------------------------------------------*
+* claw_snd_sys_validate_rsp *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_sys_validate_rsp(struct net_device *dev,
+ struct clawctl *p_ctl, __u32 return_code)
+{
+ struct claw_env * p_env;
+ struct claw_privbk *privptr;
+ int rc;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",
+ dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"chkresp");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable return_code = %d, dev =\n",
+ dev->name, return_code);
+ dumpit((char *) dev, sizeof(struct net_device));
+ printk(KERN_INFO "%s: variable p_ctl =\n",dev->name);
+ dumpit((char *) p_ctl, sizeof(struct clawctl));
+#endif
+ privptr = dev->priv;
+ p_env=privptr->p_env;
+ rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
+ p_ctl->linkid,
+ p_ctl->correlator,
+ return_code,
+ p_env->host_name,
+ p_env->adapter_name );
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
+ dev->name,__FUNCTION__,__LINE__, rc);
+#endif
+ return rc;
+} /* end of claw_snd_sys_validate_rsp */
+
+/*-------------------------------------------------------------------*
+* claw_strt_conn_req *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_strt_conn_req(struct net_device *dev )
+{
+ int rc;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"conn_req");
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: variable dev =\n",dev->name);
+ dumpit((char *) dev, sizeof(struct net_device));
+#endif
+ rc=claw_snd_conn_req(dev, 1);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
+ dev->name,__FUNCTION__,__LINE__, rc);
+#endif
+ return rc;
+} /* end of claw_strt_conn_req */
+
+
+
+/*-------------------------------------------------------------------*
+ * claw_stats *
+ *-------------------------------------------------------------------*/
+
+static struct
+net_device_stats *claw_stats(struct net_device *dev)
+{
+ struct claw_privbk *privptr;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"stats");
+ privptr = dev->priv;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return &privptr->stats;
+} /* end of claw_stats */
+
+
+/*-------------------------------------------------------------------*
+* unpack_read *
+* *
+*--------------------------------------------------------------------*/
+static void
+unpack_read(struct net_device *dev )
+{
+ struct sk_buff *skb;
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct ccwbk *p_this_ccw;
+ struct ccwbk *p_first_ccw;
+ struct ccwbk *p_last_ccw;
+ struct clawph *p_packh;
+ void *p_packd;
+ struct clawctl *p_ctlrec=NULL;
+
+ __u32 len_of_data;
+ __u32 pack_off;
+ __u8 link_num;
+ __u8 mtc_this_frm=0;
+ __u32 bytes_to_mov;
+ struct chbk *p_ch = NULL;
+ int i=0;
+ int p=0;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"unpkread");
+ p_first_ccw=NULL;
+ p_last_ccw=NULL;
+ p_packh=NULL;
+ p_packd=NULL;
+ privptr=dev->priv;
+ p_env = privptr->p_env;
+ p_this_ccw=privptr->p_read_active_first;
+ i=0;
+ while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
+#ifdef IOTRACE
+ printk(KERN_INFO "%s p_this_ccw \n",dev->name);
+ dumpit((char*)p_this_ccw, sizeof(struct ccwbk));
+ printk(KERN_INFO "%s Inbound p_this_ccw->p_buffer(64)"
+ " pk=%d \n",dev->name,p_env->packing);
+ dumpit((char *)p_this_ccw->p_buffer, 64 );
+#endif
+ pack_off = 0;
+ p = 0;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ privptr->p_read_active_first=p_this_ccw->next;
+ p_this_ccw->next=NULL;
+ p_packh = (struct clawph *)p_this_ccw->p_buffer;
+ if ((p_env->packing == PACK_SEND) &&
+ (p_packh->len == 32) &&
+ (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
+ p_packh++; /* peek past pack header */
+ p_ctlrec = (struct clawctl *)p_packh;
+ p_packh--; /* un peek */
+ if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
+ (p_ctlrec->command == CONNECTION_CONFIRM))
+ p_env->packing = DO_PACKED;
+ }
+ if (p_env->packing == DO_PACKED)
+ link_num=p_packh->link_num;
+ else
+ link_num=p_this_ccw->header.opcode / 8;
+ if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s > More_to_come is ON\n",
+ dev->name,__FUNCTION__);
+#endif
+ mtc_this_frm=1;
+ if (p_this_ccw->header.length!=
+ privptr->p_env->read_size ) {
+ printk(KERN_INFO " %s: Invalid frame detected "
+ "length is %02x\n" ,
+ dev->name, p_this_ccw->header.length);
+ }
+ }
+
+ if (privptr->mtc_skipping) {
+ /*
+ * We're in the mode of skipping past a
+ * multi-frame message
+ * that we can't process for some reason or other.
+ * The first frame without the More-To-Come flag is
+ * the last frame of the skipped message.
+ */
+ /* in case of More-To-Come not set in this frame */
+ if (mtc_this_frm==0) {
+ privptr->mtc_skipping=0; /* Ok, the end */
+ privptr->mtc_logical_link=-1;
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s goto next "
+ "frame from MoretoComeSkip \n",
+ dev->name,__FUNCTION__);
+#endif
+ goto NextFrame;
+ }
+
+ if (link_num==0) {
+ claw_process_control(dev, p_this_ccw);
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s goto next "
+ "frame from claw_process_control \n",
+ dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"UnpkCntl");
+ goto NextFrame;
+ }
+unpack_next:
+ if (p_env->packing == DO_PACKED) {
+ if (pack_off > p_env->read_size)
+ goto NextFrame;
+ p_packd = p_this_ccw->p_buffer+pack_off;
+ p_packh = (struct clawph *) p_packd;
+ if ((p_packh->len == 0) || /* all done with this frame? */
+ (p_packh->flag != 0))
+ goto NextFrame;
+ bytes_to_mov = p_packh->len;
+ pack_off += bytes_to_mov+sizeof(struct clawph);
+ p++;
+ } else {
+ bytes_to_mov=p_this_ccw->header.length;
+ }
+ if (privptr->mtc_logical_link<0) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n",
+ dev->name,__FUNCTION__);
+#endif
+
+ /*
+ * if More-To-Come is set in this frame then we don't know
+ * length of entire message, and hence have to allocate
+ * large buffer */
+
+ /* We are starting a new envelope */
+ privptr->mtc_offset=0;
+ privptr->mtc_logical_link=link_num;
+ }
+
+ if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
+ /* error */
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s > goto next "
+ "frame from MoretoComeSkip \n",
+ dev->name,
+ __FUNCTION__);
+ printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_"
+ "SIZE-privptr->mtc_offset %d)\n",
+ bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
+#endif
+ privptr->stats.rx_frame_errors++;
+ goto NextFrame;
+ }
+ if (p_env->packing == DO_PACKED) {
+ memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+ p_packd+sizeof(struct clawph), bytes_to_mov);
+
+ } else {
+ memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+ p_this_ccw->p_buffer, bytes_to_mov);
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() received data \n",
+ dev->name,__FUNCTION__);
+ if (p_env->packing == DO_PACKED)
+ dumpit((char *)p_packd+sizeof(struct clawph),32);
+ else
+ dumpit((char *)p_this_ccw->p_buffer, 32);
+ printk(KERN_INFO "%s: %s() bytelength %d \n",
+ dev->name,__FUNCTION__,bytes_to_mov);
+#endif
+ if (mtc_this_frm==0) {
+ len_of_data=privptr->mtc_offset+bytes_to_mov;
+ skb=dev_alloc_skb(len_of_data);
+ if (skb) {
+ memcpy(skb_put(skb,len_of_data),
+ privptr->p_mtc_envelope,
+ len_of_data);
+ skb->mac.raw=skb->data;
+ skb->dev=dev;
+ skb->protocol=htons(ETH_P_IP);
+ skb->ip_summed=CHECKSUM_UNNECESSARY;
+ privptr->stats.rx_packets++;
+ privptr->stats.rx_bytes+=len_of_data;
+ netif_rx(skb);
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: %s() netif_"
+ "rx(skb) completed \n",
+ dev->name,__FUNCTION__);
+#endif
+ }
+ else {
+ privptr->stats.rx_dropped++;
+ printk(KERN_WARNING "%s: %s() low on memory\n",
+ dev->name,__FUNCTION__);
+ }
+ privptr->mtc_offset=0;
+ privptr->mtc_logical_link=-1;
+ }
+ else {
+ privptr->mtc_offset+=bytes_to_mov;
+ }
+ if (p_env->packing == DO_PACKED)
+ goto unpack_next;
+NextFrame:
+ /*
+ * Remove ThisCCWblock from active read queue, and add it
+ * to queue of free blocks to be reused.
+ */
+ i++;
+ p_this_ccw->header.length=0xffff;
+ p_this_ccw->header.opcode=0xff;
+ /*
+ * add this one to the free queue for later reuse
+ */
+ if (p_first_ccw==NULL) {
+ p_first_ccw = p_this_ccw;
+ }
+ else {
+ p_last_ccw->next = p_this_ccw;
+ }
+ p_last_ccw = p_this_ccw;
+ /*
+ * chain to next block on active read queue
+ */
+ p_this_ccw = privptr->p_read_active_first;
+ CLAW_DBF_TEXT_(4,trace,"rxpkt %d",p);
+ } /* end of while */
+
+ /* check validity */
+
+#ifdef IOTRACE
+ printk(KERN_INFO "%s:%s processed frame is %d \n",
+ dev->name,__FUNCTION__,i);
+ printk(KERN_INFO "%s:%s F:%lx L:%lx\n",
+ dev->name,
+ __FUNCTION__,
+ (unsigned long)p_first_ccw,
+ (unsigned long)p_last_ccw);
+#endif
+ CLAW_DBF_TEXT_(4,trace,"rxfrm %d",i);
+ add_claw_reads(dev, p_first_ccw, p_last_ccw);
+ p_ch=&privptr->channel[READ];
+ claw_strt_read(dev, LOCK_YES);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s: %s exit on line %d\n",
+ dev->name, __FUNCTION__, __LINE__);
+#endif
+ return;
+} /* end of unpack_read */
+
+/*-------------------------------------------------------------------*
+* claw_strt_read *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_strt_read (struct net_device *dev, int lock )
+{
+ int rc = 0;
+ __u32 parm;
+ unsigned long saveflags = 0;
+ struct claw_privbk *privptr=dev->priv;
+ struct ccwbk*p_ccwbk;
+ struct chbk *p_ch;
+ struct clawh *p_clawh;
+ p_ch=&privptr->channel[READ];
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
+ printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
+ dumpit((char *) dev, sizeof(struct net_device));
+#endif
+ CLAW_DBF_TEXT(4,trace,"StRdNter");
+ p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
+ p_clawh->flag=CLAW_IDLE; /* 0x00 */
+
+ if ((privptr->p_write_active_first!=NULL &&
+ privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
+ (privptr->p_read_active_first!=NULL &&
+ privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
+ p_clawh->flag=CLAW_BUSY; /* 0xff */
+ }
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s state-%02x\n" ,
+ dev->name,__FUNCTION__, p_ch->claw_state);
+#endif
+ if (lock==LOCK_YES) {
+ spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+ }
+ if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: HOT READ started in %s\n" ,
+ dev->name,__FUNCTION__);
+ p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
+ dumpit((char *)&p_clawh->flag , 1);
+#endif
+ CLAW_DBF_TEXT(4,trace,"HotRead");
+ p_ccwbk=privptr->p_read_active_first;
+ parm = (unsigned long) p_ch;
+ rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
+ 0xff, 0);
+ if (rc != 0) {
+ ccw_check_return_code(p_ch->cdev, rc);
+ }
+ }
+ else {
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
+ dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,trace,"ReadAct");
+ }
+
+ if (lock==LOCK_YES) {
+ spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ CLAW_DBF_TEXT(4,trace,"StRdExit");
+ return;
+} /* end of claw_strt_read */
+
+/*-------------------------------------------------------------------*
+* claw_strt_out_IO *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_strt_out_IO( struct net_device *dev )
+{
+ int rc = 0;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ struct chbk *p_ch;
+ struct ccwbk *p_first_ccw;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ if (!dev) {
+ return;
+ }
+ privptr=(struct claw_privbk *)dev->priv;
+ p_ch=&privptr->channel[WRITE];
+
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s state-%02x\n" ,
+ dev->name,__FUNCTION__,p_ch->claw_state);
+#endif
+ CLAW_DBF_TEXT(4,trace,"strt_io");
+ p_first_ccw=privptr->p_write_active_first;
+
+ if (p_ch->claw_state == CLAW_STOP)
+ return;
+ if (p_first_ccw == NULL) {
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+ return;
+ }
+ if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+ parm = (unsigned long) p_ch;
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__);
+ dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
+#endif
+ CLAW_DBF_TEXT(2,trace,"StWrtIO");
+ rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm,
+ 0xff, 0);
+ if (rc != 0) {
+ ccw_check_return_code(p_ch->cdev, rc);
+ }
+ }
+ dev->trans_start = jiffies;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ dev->name,__FUNCTION__,__LINE__);
+#endif
+
+ return;
+} /* end of claw_strt_out_IO */
+
+/*-------------------------------------------------------------------*
+* Free write buffers *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_free_wrt_buf( struct net_device *dev )
+{
+
+ struct claw_privbk *privptr=(struct claw_privbk *)dev->priv;
+ struct ccwbk*p_first_ccw;
+ struct ccwbk*p_last_ccw;
+ struct ccwbk*p_this_ccw;
+ struct ccwbk*p_next_ccw;
+#ifdef IOTRACE
+ struct ccwbk*p_buf;
+#endif
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+ printk(KERN_INFO "%s: free count = %d variable dev =\n",
+ dev->name,privptr->write_free_count);
+#endif
+ CLAW_DBF_TEXT(4,trace,"freewrtb");
+ /* scan the write queue to free any completed write packets */
+ p_first_ccw=NULL;
+ p_last_ccw=NULL;
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: Dump current CCW chain \n",dev->name );
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+ if (p_buf==NULL) {
+ printk(KERN_INFO "%s: privptr->p_write_"
+ "active_first==NULL\n",dev->name );
+ }
+ p_buf=(struct ccwbk*)privptr->p_end_ccw;
+ dumpit((char *)p_buf, sizeof(struct endccw));
+#endif
+ p_this_ccw=privptr->p_write_active_first;
+ while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
+ {
+ p_next_ccw = p_this_ccw->next;
+ if (((p_next_ccw!=NULL) &&
+ (p_next_ccw->header.flag!=CLAW_PENDING)) ||
+ ((p_this_ccw == privptr->p_write_active_last) &&
+ (p_this_ccw->header.flag!=CLAW_PENDING))) {
+ /* The next CCW is OK or this is */
+ /* the last CCW...free it @A1A */
+ privptr->p_write_active_first=p_this_ccw->next;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ p_this_ccw->next=privptr->p_write_free_chain;
+ privptr->p_write_free_chain=p_this_ccw;
+ ++privptr->write_free_count;
+ privptr->stats.tx_bytes+= p_this_ccw->write.count;
+ p_this_ccw=privptr->p_write_active_first;
+ privptr->stats.tx_packets++;
+ }
+ else {
+ break;
+ }
+ }
+ if (privptr->write_free_count!=0) {
+ claw_clearbit_busy(TB_NOBUFFER,dev);
+ }
+ /* whole chain removed? */
+ if (privptr->p_write_active_first==NULL) {
+ privptr->p_write_active_last=NULL;
+#ifdef DEBUGMSG
+ printk(KERN_INFO "%s:%s p_write_"
+ "active_first==NULL\n",dev->name,__FUNCTION__);
+#endif
+ }
+#ifdef IOTRACE
+ printk(KERN_INFO "%s: Dump arranged CCW chain \n",dev->name );
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ dumpit((char *)p_buf, sizeof(struct ccwbk));
+ p_buf=p_buf->next;
+ }
+ if (p_buf==NULL) {
+ printk(KERN_INFO "%s: privptr->p_write_active_"
+ "first==NULL\n",dev->name );
+ }
+ p_buf=(struct ccwbk*)privptr->p_end_ccw;
+ dumpit((char *)p_buf, sizeof(struct endccw));
+#endif
+
+ CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
+ dev->name,__FUNCTION__, __LINE__,privptr->write_free_count);
+#endif
+ return;
+}
+
+/*-------------------------------------------------------------------*
+* claw free netdevice *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_free_netdevice(struct net_device * dev, int free_dev)
+{
+ struct claw_privbk *privptr;
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"free_dev");
+
+ if (!dev)
+ return;
+ CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
+ privptr = dev->priv;
+ if (dev->flags & IFF_RUNNING)
+ claw_release(dev);
+ if (privptr) {
+ privptr->channel[READ].ndev = NULL; /* say it's free */
+ }
+ dev->priv=NULL;
+#ifdef MODULE
+ if (free_dev) {
+ free_netdev(dev);
+ }
+#endif
+ CLAW_DBF_TEXT(2,setup,"feee_ok");
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
+#endif
+}
+
+/**
+ * Claw init netdevice
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static void
+claw_init_netdevice(struct net_device * dev)
+{
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"init_dev");
+ CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
+ if (!dev) {
+ printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
+ __FUNCTION__,__LINE__);
+ CLAW_DBF_TEXT(2,setup,"baddev");
+ return;
+ }
+ dev->mtu = CLAW_DEFAULT_MTU_SIZE;
+ dev->hard_start_xmit = claw_tx;
+ dev->open = claw_open;
+ dev->stop = claw_release;
+ dev->get_stats = claw_stats;
+ dev->change_mtu = claw_change_mtu;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 1300;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ SET_MODULE_OWNER(dev);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT(2,setup,"initok");
+ return;
+}
+
+/**
+ * Init a new channel in the privptr->channel[i].
+ *
+ * @param cdev The ccw_device to be added.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int
+add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
+{
+ struct chbk *p_ch;
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__);
+#endif
+ CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
+ privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
+ p_ch = &privptr->channel[i];
+ p_ch->cdev = cdev;
+ snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
+ sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno);
+ if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "%s Out of memory in %s for irb\n",
+ p_ch->id,__FUNCTION__);
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ p_ch->id,__FUNCTION__,__LINE__);
+#endif
+ return -ENOMEM;
+ }
+ memset(p_ch->irb, 0, sizeof (struct irb));
+#ifdef FUNCTRACE
+ printk(KERN_INFO "%s:%s Exit on line %d\n",
+ cdev->dev.bus_id,__FUNCTION__,__LINE__);
+#endif
+ return 0;
+}
+
+
+/**
+ *
+ * Setup an interface.
+ *
+ * @param cgdev Device to be setup.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_new_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct net_device *dev;
+ int ret;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
+ CLAW_DBF_TEXT(2,setup,"new_dev");
+ privptr = cgdev->dev.driver_data;
+ cgdev->cdev[READ]->dev.driver_data = privptr;
+ cgdev->cdev[WRITE]->dev.driver_data = privptr;
+ if (!privptr)
+ return -ENODEV;
+ p_env = privptr->p_env;
+ sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x",
+ &p_env->devno[READ]);
+ sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x",
+ &p_env->devno[WRITE]);
+ ret = add_channel(cgdev->cdev[0],0,privptr);
+ if (ret == 0)
+ ret = add_channel(cgdev->cdev[1],1,privptr);
+ if (ret != 0) {
+ printk(KERN_WARNING
+ "add channel failed "
+ "with ret = %d\n", ret);
+ goto out;
+ }
+ ret = ccw_device_set_online(cgdev->cdev[READ]);
+ if (ret != 0) {
+ printk(KERN_WARNING
+ "claw: ccw_device_set_online %s READ failed "
+ "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret);
+ goto out;
+ }
+ ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+ if (ret != 0) {
+ printk(KERN_WARNING
+ "claw: ccw_device_set_online %s WRITE failed "
+ "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret);
+ goto out;
+ }
+ dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
+ if (!dev) {
+ printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__);
+ goto out;
+ }
+ dev->priv = privptr;
+ cgdev->dev.driver_data = privptr;
+ cgdev->cdev[READ]->dev.driver_data = privptr;
+ cgdev->cdev[WRITE]->dev.driver_data = privptr;
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, &cgdev->dev);
+ if (register_netdev(dev) != 0) {
+ claw_free_netdevice(dev, 1);
+ CLAW_DBF_TEXT(2,trace,"regfail");
+ goto out;
+ }
+ dev->flags &=~IFF_RUNNING;
+ if (privptr->buffs_alloc == 0) {
+ ret=init_ccw_bk(dev);
+ if (ret !=0) {
+ printk(KERN_WARNING
+ "claw: init_ccw_bk failed with ret=%d\n", ret);
+ unregister_netdev(dev);
+ claw_free_netdevice(dev,1);
+ CLAW_DBF_TEXT(2,trace,"ccwmem");
+ goto out;
+ }
+ }
+ privptr->channel[READ].ndev = dev;
+ privptr->channel[WRITE].ndev = dev;
+ privptr->p_env->ndev = dev;
+
+ printk(KERN_INFO "%s:readsize=%d writesize=%d "
+ "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
+ dev->name, p_env->read_size,
+ p_env->write_size, p_env->read_buffers,
+ p_env->write_buffers, p_env->devno[READ],
+ p_env->devno[WRITE]);
+ printk(KERN_INFO "%s:host_name:%.8s, adapter_name "
+ ":%.8s api_type: %.8s\n",
+ dev->name, p_env->host_name,
+ p_env->adapter_name , p_env->api_type);
+ return 0;
+out:
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+
+ return -ENODEV;
+}
+
+static void
+claw_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ CLAW_DBF_TEXT(4,trace,"purgque");
+
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ }
+}
+
+/**
+ * Shutdown an interface.
+ *
+ * @param cgdev Device to be shut down.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_shutdown_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *priv;
+ struct net_device *ndev;
+ int ret;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
+ priv = cgdev->dev.driver_data;
+ if (!priv)
+ return -ENODEV;
+ ndev = priv->channel[READ].ndev;
+ if (ndev) {
+ /* Close the device */
+ printk(KERN_INFO
+ "%s: shuting down \n",ndev->name);
+ if (ndev->flags & IFF_RUNNING)
+ ret = claw_release(ndev);
+ ndev->flags &=~IFF_RUNNING;
+ unregister_netdev(ndev);
+ ndev->priv = NULL; /* cgdev data, not ndev's to free */
+ claw_free_netdevice(ndev, 1);
+ priv->channel[READ].ndev = NULL;
+ priv->channel[WRITE].ndev = NULL;
+ priv->p_env->ndev = NULL;
+ }
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+ return 0;
+}
+
+static void
+claw_remove_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *priv;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
+ priv = cgdev->dev.driver_data;
+ if (!priv) {
+ printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__);
+ return;
+ }
+ printk(KERN_INFO "claw: %s() called %s will be removed.\n",
+ __FUNCTION__,cgdev->cdev[0]->dev.bus_id);
+ if (cgdev->state == CCWGROUP_ONLINE)
+ claw_shutdown_device(cgdev);
+ claw_remove_files(&cgdev->dev);
+ if (priv->p_mtc_envelope!=NULL) {
+ kfree(priv->p_mtc_envelope);
+ priv->p_mtc_envelope=NULL;
+ }
+ if (priv->p_env != NULL) {
+ kfree(priv->p_env);
+ priv->p_env=NULL;
+ }
+ if (priv->channel[0].irb != NULL) {
+ kfree(priv->channel[0].irb);
+ priv->channel[0].irb=NULL;
+ }
+ if (priv->channel[1].irb != NULL) {
+ kfree(priv->channel[1].irb);
+ priv->channel[1].irb=NULL;
+ }
+ kfree(priv);
+ cgdev->dev.driver_data=NULL;
+ cgdev->cdev[READ]->dev.driver_data = NULL;
+ cgdev->cdev[WRITE]->dev.driver_data = NULL;
+ put_device(&cgdev->dev);
+}
+
+
+/*
+ * sysfs attributes
+ */
+static ssize_t
+claw_hname_show(struct device *dev, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n",p_env->host_name);
+}
+
+static ssize_t
+claw_hname_write(struct device *dev, const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->host_name, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->host_name,buf, count);
+ p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
+ p_env->host_name[MAX_NAME_LEN] = 0x00;
+ CLAW_DBF_TEXT(2,setup,"HstnSet");
+ CLAW_DBF_TEXT_(2,setup,"%s",p_env->host_name);
+
+ return count;
+}
+
+static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
+
+static ssize_t
+claw_adname_show(struct device *dev, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n",p_env->adapter_name);
+}
+
+static ssize_t
+claw_adname_write(struct device *dev, const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->adapter_name,buf, count);
+ p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
+ p_env->adapter_name[MAX_NAME_LEN] = 0x00;
+ CLAW_DBF_TEXT(2,setup,"AdnSet");
+ CLAW_DBF_TEXT_(2,setup,"%s",p_env->adapter_name);
+
+ return count;
+}
+
+static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
+
+static ssize_t
+claw_apname_show(struct device *dev, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n",
+ p_env->api_type);
+}
+
+static ssize_t
+claw_apname_write(struct device *dev, const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->api_type, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->api_type,buf, count);
+ p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
+ p_env->api_type[MAX_NAME_LEN] = 0x00;
+ if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+ p_env->read_size=DEF_PACK_BUFSIZE;
+ p_env->write_size=DEF_PACK_BUFSIZE;
+ p_env->packing=PACKING_ASK;
+ CLAW_DBF_TEXT(2,setup,"PACKING");
+ }
+ else {
+ p_env->packing=0;
+ p_env->read_size=CLAW_FRAME_SIZE;
+ p_env->write_size=CLAW_FRAME_SIZE;
+ CLAW_DBF_TEXT(2,setup,"ApiSet");
+ }
+ CLAW_DBF_TEXT_(2,setup,"%s",p_env->api_type);
+ return count;
+}
+
+static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
+
+static ssize_t
+claw_wbuff_show(struct device *dev, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%d\n", p_env->write_buffers);
+}
+
+static ssize_t
+claw_wbuff_write(struct device *dev, const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+ int nnn,max;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ sscanf(buf, "%i", &nnn);
+ if (p_env->packing) {
+ max = 64;
+ }
+ else {
+ max = 512;
+ }
+ if ((nnn > max ) || (nnn < 2))
+ return -EINVAL;
+ p_env->write_buffers = nnn;
+ CLAW_DBF_TEXT(2,setup,"Wbufset");
+ CLAW_DBF_TEXT_(2,setup,"WB=%d",p_env->write_buffers);
+ return count;
+}
+
+static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
+
+static ssize_t
+claw_rbuff_show(struct device *dev, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%d\n", p_env->read_buffers);
+}
+
+static ssize_t
+claw_rbuff_write(struct device *dev, const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env *p_env;
+ int nnn,max;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ sscanf(buf, "%i", &nnn);
+ if (p_env->packing) {
+ max = 64;
+ }
+ else {
+ max = 512;
+ }
+ if ((nnn > max ) || (nnn < 2))
+ return -EINVAL;
+ p_env->read_buffers = nnn;
+ CLAW_DBF_TEXT(2,setup,"Rbufset");
+ CLAW_DBF_TEXT_(2,setup,"RB=%d",p_env->read_buffers);
+ return count;
+}
+
+static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
+
+static struct attribute *claw_attr[] = {
+ &dev_attr_read_buffer.attr,
+ &dev_attr_write_buffer.attr,
+ &dev_attr_adapter_name.attr,
+ &dev_attr_api_type.attr,
+ &dev_attr_host_name.attr,
+ NULL,
+};
+
+static struct attribute_group claw_attr_group = {
+ .attrs = claw_attr,
+};
+
+static int
+claw_add_files(struct device *dev)
+{
+ pr_debug("%s() called\n", __FUNCTION__);
+ CLAW_DBF_TEXT(2,setup,"add_file");
+ return sysfs_create_group(&dev->kobj, &claw_attr_group);
+}
+
+static void
+claw_remove_files(struct device *dev)
+{
+ pr_debug("%s() called\n", __FUNCTION__);
+ CLAW_DBF_TEXT(2,setup,"rem_file");
+ sysfs_remove_group(&dev->kobj, &claw_attr_group);
+}
+
+/*--------------------------------------------------------------------*
+* claw_init and cleanup *
+*---------------------------------------------------------------------*/
+
+static void __exit
+claw_cleanup(void)
+{
+ unregister_cu3088_discipline(&claw_group_driver);
+ claw_unregister_debug_facility();
+ printk(KERN_INFO "claw: Driver unloaded\n");
+
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int __init
+claw_init(void)
+{
+ int ret = 0;
+ printk(KERN_INFO "claw: starting driver "
+#ifdef MODULE
+ "module "
+#else
+ "compiled into kernel "
+#endif
+ " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n");
+
+
+#ifdef FUNCTRACE
+ printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__);
+#endif
+ ret = claw_register_debug_facility();
+ if (ret) {
+ printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
+ __FUNCTION__,ret);
+ return ret;
+ }
+ CLAW_DBF_TEXT(2,setup,"init_mod");
+ ret = register_cu3088_discipline(&claw_group_driver);
+ if (ret) {
+ claw_unregister_debug_facility();
+ printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
+ __FUNCTION__,ret);
+ }
+#ifdef FUNCTRACE
+ printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__);
+#endif
+ return ret;
+}
+
+module_init(claw_init);
+module_exit(claw_cleanup);
+
+
+
+/*--------------------------------------------------------------------*
+* End of File *
+*---------------------------------------------------------------------*/
+
+
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
new file mode 100644
index 000000000000..3df71970f601
--- /dev/null
+++ b/drivers/s390/net/claw.h
@@ -0,0 +1,335 @@
+/*******************************************************
+* Define constants *
+* *
+********************************************************/
+#define VERSION_CLAW_H "$Revision: 1.6 $"
+/*-----------------------------------------------------*
+* CCW command codes for CLAW protocol *
+*------------------------------------------------------*/
+
+#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
+#define CCW_CLAW_CMD_READ 0x02 /* read */
+#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
+#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
+#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
+#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
+#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
+#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
+#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
+
+
+/*-----------------------------------------------------*
+* CLAW Unique constants *
+*------------------------------------------------------*/
+
+#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
+#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
+#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
+#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
+#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
+
+/*-----------------------------------------------------*
+* CLAW control comand code *
+*------------------------------------------------------*/
+
+#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
+#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
+#define CONNECTION_REQUEST 0x21 /* Connection request */
+#define CONNECTION_RESPONSE 0x22 /* Connection response */
+#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
+#define DISCONNECT 0x24 /* Disconnect */
+#define CLAW_ERROR 0x41 /* CLAW error message */
+#define CLAW_VERSION_ID 2 /* CLAW version ID */
+
+/*-----------------------------------------------------*
+* CLAW adater sense bytes *
+*------------------------------------------------------*/
+
+#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
+
+/*-----------------------------------------------------*
+* CLAW control command return codes *
+*------------------------------------------------------*/
+
+#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
+#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
+#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
+ /* less than Linux on zSeries*/
+ /* transmit size */
+
+/*-----------------------------------------------------*
+* CLAW Constants application name *
+*------------------------------------------------------*/
+
+#define HOST_APPL_NAME "TCPIP "
+#define WS_APPL_NAME_IP_LINK "TCPIP "
+#define WS_APPL_NAME_IP_NAME "IP "
+#define WS_APPL_NAME_API_LINK "API "
+#define WS_APPL_NAME_PACKED "PACKED "
+#define WS_NAME_NOT_DEF "NOT_DEF "
+#define PACKING_ASK 1
+#define PACK_SEND 2
+#define DO_PACKED 3
+
+#define MAX_ENVELOPE_SIZE 65536
+#define CLAW_DEFAULT_MTU_SIZE 4096
+#define DEF_PACK_BUFSIZE 32768
+#define READ 0
+#define WRITE 1
+
+#define TB_TX 0 /* sk buffer handling in process */
+#define TB_STOP 1 /* network device stop in process */
+#define TB_RETRY 2 /* retry in process */
+#define TB_NOBUFFER 3 /* no buffer on free queue */
+#define CLAW_MAX_LINK_ID 1
+#define CLAW_MAX_DEV 256 /* max claw devices */
+#define MAX_NAME_LEN 8 /* host name, adapter name length */
+#define CLAW_FRAME_SIZE 4096
+#define CLAW_ID_SIZE BUS_ID_SIZE+3
+
+/* state machine codes used in claw_irq_handler */
+
+#define CLAW_STOP 0
+#define CLAW_START_HALT_IO 1
+#define CLAW_START_SENSEID 2
+#define CLAW_START_READ 3
+#define CLAW_START_WRITE 4
+
+/*-----------------------------------------------------*
+* Lock flag *
+*------------------------------------------------------*/
+#define LOCK_YES 0
+#define LOCK_NO 1
+
+/*-----------------------------------------------------*
+* DBF Debug macros *
+*------------------------------------------------------*/
+#define CLAW_DBF_TEXT(level, name, text) \
+ do { \
+ debug_text_event(claw_dbf_##name, level, text); \
+ } while (0)
+
+#define CLAW_DBF_HEX(level,name,addr,len) \
+do { \
+ debug_event(claw_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+#define CLAW_DBF_TEXT_(level,name,text...) \
+do { \
+ sprintf(debug_buffer, text); \
+ debug_text_event(claw_dbf_##name,level, debug_buffer);\
+} while (0)
+
+/*******************************************************
+* Define Control Blocks *
+* *
+********************************************************/
+
+/*------------------------------------------------------*/
+/* CLAW header */
+/*------------------------------------------------------*/
+
+struct clawh {
+ __u16 length; /* length of data read by preceding read CCW */
+ __u8 opcode; /* equivalent read CCW */
+ __u8 flag; /* flag of FF to indicate read was completed */
+};
+
+/*------------------------------------------------------*/
+/* CLAW Packing header 4 bytes */
+/*------------------------------------------------------*/
+struct clawph {
+ __u16 len; /* Length of Packed Data Area */
+ __u8 flag; /* Reserved not used */
+ __u8 link_num; /* Link ID */
+};
+
+/*------------------------------------------------------*/
+/* CLAW Ending struct ccwbk */
+/*------------------------------------------------------*/
+struct endccw {
+ __u32 real; /* real address of this block */
+ __u8 write1; /* write 1 is active */
+ __u8 read1; /* read 1 is active */
+ __u16 reserved; /* reserved for future use */
+ struct ccw1 write1_nop1;
+ struct ccw1 write1_nop2;
+ struct ccw1 write2_nop1;
+ struct ccw1 write2_nop2;
+ struct ccw1 read1_nop1;
+ struct ccw1 read1_nop2;
+ struct ccw1 read2_nop1;
+ struct ccw1 read2_nop2;
+};
+
+/*------------------------------------------------------*/
+/* CLAW struct ccwbk */
+/*------------------------------------------------------*/
+struct ccwbk {
+ void *next; /* pointer to next ccw block */
+ __u32 real; /* real address of this ccw */
+ void *p_buffer; /* virtual address of data */
+ struct clawh header; /* claw header */
+ struct ccw1 write; /* write CCW */
+ struct ccw1 w_read_FF; /* read FF */
+ struct ccw1 w_TIC_1; /* TIC */
+ struct ccw1 read; /* read CCW */
+ struct ccw1 read_h; /* read header */
+ struct ccw1 signal; /* signal SMOD */
+ struct ccw1 r_TIC_1; /* TIC1 */
+ struct ccw1 r_read_FF; /* read FF */
+ struct ccw1 r_TIC_2; /* TIC2 */
+};
+
+/*------------------------------------------------------*/
+/* CLAW control block */
+/*------------------------------------------------------*/
+struct clawctl {
+ __u8 command; /* control command */
+ __u8 version; /* CLAW protocol version */
+ __u8 linkid; /* link ID */
+ __u8 correlator; /* correlator */
+ __u8 rc; /* return code */
+ __u8 reserved1; /* reserved */
+ __u8 reserved2; /* reserved */
+ __u8 reserved3; /* reserved */
+ __u8 data[24]; /* command specific fields */
+};
+
+/*------------------------------------------------------*/
+/* Data for SYSTEMVALIDATE command */
+/*------------------------------------------------------*/
+struct sysval {
+ char WS_name[8]; /* Workstation System name */
+ char host_name[8]; /* Host system name */
+ __u16 read_frame_size; /* read frame size */
+ __u16 write_frame_size; /* write frame size */
+ __u8 reserved[4]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data for Connect command */
+/*------------------------------------------------------*/
+struct conncmd {
+ char WS_name[8]; /* Workstation application name */
+ char host_name[8]; /* Host application name */
+ __u16 reserved1[2]; /* read frame size */
+ __u8 reserved2[4]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data for CLAW error */
+/*------------------------------------------------------*/
+struct clawwerror {
+ char reserved1[8]; /* reserved */
+ char reserved2[8]; /* reserved */
+ char reserved3[8]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data buffer for CLAW */
+/*------------------------------------------------------*/
+struct clawbuf {
+ char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
+};
+
+/*------------------------------------------------------*/
+/* Channel control block for read and write channel */
+/*------------------------------------------------------*/
+
+struct chbk {
+ unsigned int devno;
+ int irq;
+ char id[CLAW_ID_SIZE];
+ __u32 IO_active;
+ __u8 claw_state;
+ struct irb *irb;
+ struct ccw_device *cdev; /* pointer to the channel device */
+ struct net_device *ndev;
+ wait_queue_head_t wait;
+ struct tasklet_struct tasklet;
+ struct timer_list timer;
+ unsigned long flag_a; /* atomic flags */
+#define CLAW_BH_ACTIVE 0
+ unsigned long flag_b; /* atomic flags */
+#define CLAW_WRITE_ACTIVE 0
+ __u8 last_dstat;
+ __u8 flag;
+ struct sk_buff_head collect_queue;
+ spinlock_t collect_lock;
+#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
+#define CLAW_READ 0x01 /* - Set if this is a read channel */
+#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
+};
+
+/*--------------------------------------------------------------*
+* CLAW environment block *
+*---------------------------------------------------------------*/
+
+struct claw_env {
+ unsigned int devno[2]; /* device number */
+ char host_name[9]; /* Host name */
+ char adapter_name [9]; /* adapter name */
+ char api_type[9]; /* TCPIP, API or PACKED */
+ void *p_priv; /* privptr */
+ __u16 read_buffers; /* read buffer number */
+ __u16 write_buffers; /* write buffer number */
+ __u16 read_size; /* read buffer size */
+ __u16 write_size; /* write buffer size */
+ __u16 dev_id; /* device ident */
+ __u8 packing; /* are we packing? */
+ volatile __u8 queme_switch; /* gate for imed packing */
+ volatile unsigned long pk_delay; /* Delay for adaptive packing */
+ __u8 in_use; /* device active flag */
+ struct net_device *ndev; /* backward ptr to the net dev*/
+};
+
+/*--------------------------------------------------------------*
+* CLAW main control block *
+*---------------------------------------------------------------*/
+
+struct claw_privbk {
+ void *p_buff_ccw;
+ __u32 p_buff_ccw_num;
+ void *p_buff_read;
+ __u32 p_buff_read_num;
+ __u32 p_buff_pages_perread;
+ void *p_buff_write;
+ __u32 p_buff_write_num;
+ __u32 p_buff_pages_perwrite;
+ long active_link_ID; /* Active logical link ID */
+ struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
+ struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
+ struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
+ struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
+ struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
+ struct endccw *p_end_ccw; /*ptr to ending ccw */
+ struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
+ __u32 write_free_count; /* number of free bufs for write */
+ struct net_device_stats stats; /* device status */
+ struct chbk channel[2]; /* Channel control blocks */
+ __u8 mtc_skipping;
+ int mtc_offset;
+ int mtc_logical_link;
+ void *p_mtc_envelope;
+ struct sk_buff *pk_skb; /* packing buffer */
+ int pk_cnt;
+ struct clawctl ctl_bk;
+ struct claw_env *p_env;
+ __u8 system_validate_comp;
+ __u8 release_pend;
+ __u8 checksum_received_ip_pkts;
+ __u8 buffs_alloc;
+ struct endccw end_ccw;
+ unsigned long tbusy;
+
+};
+
+
+/************************************************************/
+/* define global constants */
+/************************************************************/
+
+#define CCWBK_SIZE sizeof(struct ccwbk)
+
+
diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c
new file mode 100644
index 000000000000..2c86bfa11b2f
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.c
@@ -0,0 +1,83 @@
+/*
+ *
+ * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
+ *
+ * CTC / ESCON network driver - s390 dbf exploit.
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author(s): Original Code written by
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ctcdbug.h"
+
+/**
+ * Debug Facility Stuff
+ */
+debug_info_t *ctc_dbf_setup = NULL;
+debug_info_t *ctc_dbf_data = NULL;
+debug_info_t *ctc_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
+
+void
+ctc_unregister_dbf_views(void)
+{
+ if (ctc_dbf_setup)
+ debug_unregister(ctc_dbf_setup);
+ if (ctc_dbf_data)
+ debug_unregister(ctc_dbf_data);
+ if (ctc_dbf_trace)
+ debug_unregister(ctc_dbf_trace);
+}
+int
+ctc_register_dbf_views(void)
+{
+ ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
+ CTC_DBF_SETUP_INDEX,
+ CTC_DBF_SETUP_NR_AREAS,
+ CTC_DBF_SETUP_LEN);
+ ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
+ CTC_DBF_DATA_INDEX,
+ CTC_DBF_DATA_NR_AREAS,
+ CTC_DBF_DATA_LEN);
+ ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
+ CTC_DBF_TRACE_INDEX,
+ CTC_DBF_TRACE_NR_AREAS,
+ CTC_DBF_TRACE_LEN);
+
+ if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
+ (ctc_dbf_trace == NULL)) {
+ ctc_unregister_dbf_views();
+ return -ENOMEM;
+ }
+ debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
+
+ debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
+
+ debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
+
+ return 0;
+}
+
+
diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h
new file mode 100644
index 000000000000..ef8883951720
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $)
+ *
+ * CTC / ESCON network driver - s390 dbf exploit.
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author(s): Original Code written by
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ * $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <asm/debug.h>
+/**
+ * Debug Facility stuff
+ */
+#define CTC_DBF_SETUP_NAME "ctc_setup"
+#define CTC_DBF_SETUP_LEN 16
+#define CTC_DBF_SETUP_INDEX 3
+#define CTC_DBF_SETUP_NR_AREAS 1
+#define CTC_DBF_SETUP_LEVEL 3
+
+#define CTC_DBF_DATA_NAME "ctc_data"
+#define CTC_DBF_DATA_LEN 128
+#define CTC_DBF_DATA_INDEX 3
+#define CTC_DBF_DATA_NR_AREAS 1
+#define CTC_DBF_DATA_LEVEL 2
+
+#define CTC_DBF_TRACE_NAME "ctc_trace"
+#define CTC_DBF_TRACE_LEN 16
+#define CTC_DBF_TRACE_INDEX 2
+#define CTC_DBF_TRACE_NR_AREAS 2
+#define CTC_DBF_TRACE_LEVEL 3
+
+#define DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(ctc_dbf_##name,level,text); \
+ } while (0)
+
+#define DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf);
+extern debug_info_t *ctc_dbf_setup;
+extern debug_info_t *ctc_dbf_data;
+extern debug_info_t *ctc_dbf_trace;
+
+
+#define DBF_TEXT_(name,level,text...) \
+ do { \
+ char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
+ sprintf(ctc_dbf_txt_buf, text); \
+ debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
+ put_cpu_var(ctc_dbf_txt_buf); \
+ } while (0)
+
+#define DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
+ debug_sprintf_event(ctc_dbf_trace, level, text ); \
+ } while (0)
+
+
+int ctc_register_dbf_views(void);
+
+void ctc_unregister_dbf_views(void);
+
+/**
+ * some more debug stuff
+ */
+
+#define HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+static inline void
+hex_dump(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ printk("\n");
+ printk("%02x ", *(buf + i));
+ }
+ printk("\n");
+}
+
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
new file mode 100644
index 000000000000..7266bf5ea659
--- /dev/null
+++ b/drivers/s390/net/ctcmain.c
@@ -0,0 +1,3304 @@
+/*
+ * $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $
+ *
+ * CTC / ESCON network driver
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Peter Tiedemann (ptiedem@de.ibm.com)
+ * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
+ *
+ * Documentation used:
+ * - Principles of Operation (IBM doc#: SA22-7201-06)
+ * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
+ * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
+ * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
+ * - ESCON I/O Interface (IBM doc#: SA22-7202-029
+ *
+ * and the source of the original CTC driver by:
+ * Dieter Wellerdiek (wel@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ * Jochen Röhrig (roehrig@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <asm/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "ctctty.h"
+#include "fsm.h"
+#include "cu3088.h"
+#include "ctcdbug.h"
+
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
+MODULE_LICENSE("GPL");
+
+/**
+ * CCW commands, used in this driver.
+ */
+#define CCW_CMD_WRITE 0x01
+#define CCW_CMD_READ 0x02
+#define CCW_CMD_SET_EXTENDED 0xc3
+#define CCW_CMD_PREPARE 0xe3
+
+#define CTC_PROTO_S390 0
+#define CTC_PROTO_LINUX 1
+#define CTC_PROTO_LINUX_TTY 2
+#define CTC_PROTO_OS390 3
+#define CTC_PROTO_MAX 3
+
+#define CTC_BUFSIZE_LIMIT 65535
+#define CTC_BUFSIZE_DEFAULT 32768
+
+#define CTC_TIMEOUT_5SEC 5000
+
+#define CTC_INITIAL_BLOCKLEN 2
+
+#define READ 0
+#define WRITE 1
+
+#define CTC_ID_SIZE BUS_ID_SIZE+3
+
+
+struct ctc_profile {
+ unsigned long maxmulti;
+ unsigned long maxcqueue;
+ unsigned long doios_single;
+ unsigned long doios_multi;
+ unsigned long txlen;
+ unsigned long tx_time;
+ struct timespec send_stamp;
+};
+
+/**
+ * Definition of one channel
+ */
+struct channel {
+
+ /**
+ * Pointer to next channel in list.
+ */
+ struct channel *next;
+ char id[CTC_ID_SIZE];
+ struct ccw_device *cdev;
+
+ /**
+ * Type of this channel.
+ * CTC/A or Escon for valid channels.
+ */
+ enum channel_types type;
+
+ /**
+ * Misc. flags. See CHANNEL_FLAGS_... below
+ */
+ __u32 flags;
+
+ /**
+ * The protocol of this channel
+ */
+ __u16 protocol;
+
+ /**
+ * I/O and irq related stuff
+ */
+ struct ccw1 *ccw;
+ struct irb *irb;
+
+ /**
+ * RX/TX buffer size
+ */
+ int max_bufsize;
+
+ /**
+ * Transmit/Receive buffer.
+ */
+ struct sk_buff *trans_skb;
+
+ /**
+ * Universal I/O queue.
+ */
+ struct sk_buff_head io_queue;
+
+ /**
+ * TX queue for collecting skb's during busy.
+ */
+ struct sk_buff_head collect_queue;
+
+ /**
+ * Amount of data in collect_queue.
+ */
+ int collect_len;
+
+ /**
+ * spinlock for collect_queue and collect_len
+ */
+ spinlock_t collect_lock;
+
+ /**
+ * Timer for detecting unresposive
+ * I/O operations.
+ */
+ fsm_timer timer;
+
+ /**
+ * Retry counter for misc. operations.
+ */
+ int retry;
+
+ /**
+ * The finite state machine of this channel
+ */
+ fsm_instance *fsm;
+
+ /**
+ * The corresponding net_device this channel
+ * belongs to.
+ */
+ struct net_device *netdev;
+
+ struct ctc_profile prof;
+
+ unsigned char *trans_skb_data;
+
+ __u16 logflags;
+};
+
+#define CHANNEL_FLAGS_READ 0
+#define CHANNEL_FLAGS_WRITE 1
+#define CHANNEL_FLAGS_INUSE 2
+#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
+#define CHANNEL_FLAGS_FAILED 8
+#define CHANNEL_FLAGS_WAITIRQ 16
+#define CHANNEL_FLAGS_RWMASK 1
+#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
+
+#define LOG_FLAG_ILLEGALPKT 1
+#define LOG_FLAG_ILLEGALSIZE 2
+#define LOG_FLAG_OVERRUN 4
+#define LOG_FLAG_NOMEM 8
+
+#define CTC_LOGLEVEL_INFO 1
+#define CTC_LOGLEVEL_NOTICE 2
+#define CTC_LOGLEVEL_WARN 4
+#define CTC_LOGLEVEL_EMERG 8
+#define CTC_LOGLEVEL_ERR 16
+#define CTC_LOGLEVEL_DEBUG 32
+#define CTC_LOGLEVEL_CRIT 64
+
+#define CTC_LOGLEVEL_DEFAULT \
+(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
+
+#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
+
+static int loglevel = CTC_LOGLEVEL_DEFAULT;
+
+#define ctc_pr_debug(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
+
+#define ctc_pr_info(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
+
+#define ctc_pr_notice(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
+
+#define ctc_pr_warn(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
+
+#define ctc_pr_emerg(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
+
+#define ctc_pr_err(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
+
+#define ctc_pr_crit(fmt, arg...) \
+do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
+
+/**
+ * Linked list of all detected channels.
+ */
+static struct channel *channels = NULL;
+
+struct ctc_priv {
+ struct net_device_stats stats;
+ unsigned long tbusy;
+ /**
+ * The finite state machine of this interface.
+ */
+ fsm_instance *fsm;
+ /**
+ * The protocol of this device
+ */
+ __u16 protocol;
+ /**
+ * Timer for restarting after I/O Errors
+ */
+ fsm_timer restart_timer;
+
+ int buffer_size;
+
+ struct channel *channel[2];
+};
+
+/**
+ * Definition of our link level header.
+ */
+struct ll_header {
+ __u16 length;
+ __u16 type;
+ __u16 unused;
+};
+#define LL_HEADER_LENGTH (sizeof(struct ll_header))
+
+/**
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static __inline__ void
+ctc_clear_busy(struct net_device * dev)
+{
+ clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
+ if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
+ netif_wake_queue(dev);
+}
+
+static __inline__ int
+ctc_test_and_set_busy(struct net_device * dev)
+{
+ if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
+ netif_stop_queue(dev);
+ return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
+}
+
+/**
+ * Print Banner.
+ */
+static void
+print_banner(void)
+{
+ static int printed = 0;
+ char vbuf[] = "$Revision: 1.72 $";
+ char *version = vbuf;
+
+ if (printed)
+ return;
+ if ((version = strchr(version, ':'))) {
+ char *p = strchr(version + 1, '$');
+ if (p)
+ *p = '\0';
+ } else
+ version = " ??? ";
+ printk(KERN_INFO "CTC driver Version%s"
+#ifdef DEBUG
+ " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
+#endif
+ " initialized\n", version);
+ printed = 1;
+}
+
+/**
+ * Return type of a detected device.
+ */
+static enum channel_types
+get_channel_type(struct ccw_device_id *id)
+{
+ enum channel_types type = (enum channel_types) id->driver_info;
+
+ if (type == channel_type_ficon)
+ type = channel_type_escon;
+
+ return type;
+}
+
+/**
+ * States of the interface statemachine.
+ */
+enum dev_states {
+ DEV_STATE_STOPPED,
+ DEV_STATE_STARTWAIT_RXTX,
+ DEV_STATE_STARTWAIT_RX,
+ DEV_STATE_STARTWAIT_TX,
+ DEV_STATE_STOPWAIT_RXTX,
+ DEV_STATE_STOPWAIT_RX,
+ DEV_STATE_STOPWAIT_TX,
+ DEV_STATE_RUNNING,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_STATES
+};
+
+static const char *dev_state_names[] = {
+ "Stopped",
+ "StartWait RXTX",
+ "StartWait RX",
+ "StartWait TX",
+ "StopWait RXTX",
+ "StopWait RX",
+ "StopWait TX",
+ "Running",
+};
+
+/**
+ * Events of the interface statemachine.
+ */
+enum dev_events {
+ DEV_EVENT_START,
+ DEV_EVENT_STOP,
+ DEV_EVENT_RXUP,
+ DEV_EVENT_TXUP,
+ DEV_EVENT_RXDOWN,
+ DEV_EVENT_TXDOWN,
+ DEV_EVENT_RESTART,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_EVENTS
+};
+
+static const char *dev_event_names[] = {
+ "Start",
+ "Stop",
+ "RX up",
+ "TX up",
+ "RX down",
+ "TX down",
+ "Restart",
+};
+
+/**
+ * Events of the channel statemachine
+ */
+enum ch_events {
+ /**
+ * Events, representing return code of
+ * I/O operations (ccw_device_start, ccw_device_halt et al.)
+ */
+ CH_EVENT_IO_SUCCESS,
+ CH_EVENT_IO_EBUSY,
+ CH_EVENT_IO_ENODEV,
+ CH_EVENT_IO_EIO,
+ CH_EVENT_IO_UNKNOWN,
+
+ CH_EVENT_ATTNBUSY,
+ CH_EVENT_ATTN,
+ CH_EVENT_BUSY,
+
+ /**
+ * Events, representing unit-check
+ */
+ CH_EVENT_UC_RCRESET,
+ CH_EVENT_UC_RSRESET,
+ CH_EVENT_UC_TXTIMEOUT,
+ CH_EVENT_UC_TXPARITY,
+ CH_EVENT_UC_HWFAIL,
+ CH_EVENT_UC_RXPARITY,
+ CH_EVENT_UC_ZERO,
+ CH_EVENT_UC_UNKNOWN,
+
+ /**
+ * Events, representing subchannel-check
+ */
+ CH_EVENT_SC_UNKNOWN,
+
+ /**
+ * Events, representing machine checks
+ */
+ CH_EVENT_MC_FAIL,
+ CH_EVENT_MC_GOOD,
+
+ /**
+ * Event, representing normal IRQ
+ */
+ CH_EVENT_IRQ,
+ CH_EVENT_FINSTAT,
+
+ /**
+ * Event, representing timer expiry.
+ */
+ CH_EVENT_TIMER,
+
+ /**
+ * Events, representing commands from upper levels.
+ */
+ CH_EVENT_START,
+ CH_EVENT_STOP,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CH_EVENTS,
+};
+
+static const char *ch_event_names[] = {
+ "ccw_device success",
+ "ccw_device busy",
+ "ccw_device enodev",
+ "ccw_device ioerr",
+ "ccw_device unknown",
+
+ "Status ATTN & BUSY",
+ "Status ATTN",
+ "Status BUSY",
+
+ "Unit check remote reset",
+ "Unit check remote system reset",
+ "Unit check TX timeout",
+ "Unit check TX parity",
+ "Unit check Hardware failure",
+ "Unit check RX parity",
+ "Unit check ZERO",
+ "Unit check Unknown",
+
+ "SubChannel check Unknown",
+
+ "Machine check failure",
+ "Machine check operational",
+
+ "IRQ normal",
+ "IRQ final",
+
+ "Timer",
+
+ "Start",
+ "Stop",
+};
+
+/**
+ * States of the channel statemachine.
+ */
+enum ch_states {
+ /**
+ * Channel not assigned to any device,
+ * initial state, direction invalid
+ */
+ CH_STATE_IDLE,
+
+ /**
+ * Channel assigned but not operating
+ */
+ CH_STATE_STOPPED,
+ CH_STATE_STARTWAIT,
+ CH_STATE_STARTRETRY,
+ CH_STATE_SETUPWAIT,
+ CH_STATE_RXINIT,
+ CH_STATE_TXINIT,
+ CH_STATE_RX,
+ CH_STATE_TX,
+ CH_STATE_RXIDLE,
+ CH_STATE_TXIDLE,
+ CH_STATE_RXERR,
+ CH_STATE_TXERR,
+ CH_STATE_TERM,
+ CH_STATE_DTERM,
+ CH_STATE_NOTOP,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CH_STATES,
+};
+
+static const char *ch_state_names[] = {
+ "Idle",
+ "Stopped",
+ "StartWait",
+ "StartRetry",
+ "SetupWait",
+ "RX init",
+ "TX init",
+ "RX",
+ "TX",
+ "RX idle",
+ "TX idle",
+ "RX error",
+ "TX error",
+ "Terminating",
+ "Restarting",
+ "Not operational",
+};
+
+#ifdef DEBUG
+/**
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * @param skb The sk_buff to dump.
+ * @param offset Offset relative to skb-data, where to start the dump.
+ */
+static void
+ctc_dump_skb(struct sk_buff *skb, int offset)
+{
+ unsigned char *p = skb->data;
+ __u16 bl;
+ struct ll_header *header;
+ int i;
+
+ if (!(loglevel & CTC_LOGLEVEL_DEBUG))
+ return;
+ p += offset;
+ bl = *((__u16 *) p);
+ p += 2;
+ header = (struct ll_header *) p;
+ p -= 2;
+
+ printk(KERN_DEBUG "dump:\n");
+ printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
+
+ printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
+ header->length);
+ printk(KERN_DEBUG "h->type=%04x\n", header->type);
+ printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
+ if (bl > 16)
+ bl = 16;
+ printk(KERN_DEBUG "data: ");
+ for (i = 0; i < bl; i++)
+ printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
+ printk("\n");
+}
+#else
+static inline void
+ctc_dump_skb(struct sk_buff *skb, int offset)
+{
+}
+#endif
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ *
+ * @param ch The channel where this skb has been received.
+ * @param pskb The received skb.
+ */
+static __inline__ void
+ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+ struct net_device *dev = ch->netdev;
+ struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
+ __u16 len = *((__u16 *) pskb->data);
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ skb_put(pskb, 2 + LL_HEADER_LENGTH);
+ skb_pull(pskb, 2);
+ pskb->dev = dev;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ while (len > 0) {
+ struct sk_buff *skb;
+ struct ll_header *header = (struct ll_header *) pskb->data;
+
+ skb_pull(pskb, LL_HEADER_LENGTH);
+ if ((ch->protocol == CTC_PROTO_S390) &&
+ (header->type != ETH_P_IP)) {
+
+#ifndef DEBUG
+ if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
+#endif
+ /**
+ * Check packet type only if we stick strictly
+ * to S/390's protocol of OS390. This only
+ * supports IP. Otherwise allow any packet
+ * type.
+ */
+ ctc_pr_warn(
+ "%s Illegal packet type 0x%04x received, dropping\n",
+ dev->name, header->type);
+ ch->logflags |= LOG_FLAG_ILLEGALPKT;
+#ifndef DEBUG
+ }
+#endif
+#ifdef DEBUG
+ ctc_dump_skb(pskb, -6);
+#endif
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_frame_errors++;
+ return;
+ }
+ pskb->protocol = ntohs(header->type);
+ if (header->length <= LL_HEADER_LENGTH) {
+#ifndef DEBUG
+ if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
+#endif
+ ctc_pr_warn(
+ "%s Illegal packet size %d "
+ "received (MTU=%d blocklen=%d), "
+ "dropping\n", dev->name, header->length,
+ dev->mtu, len);
+ ch->logflags |= LOG_FLAG_ILLEGALSIZE;
+#ifndef DEBUG
+ }
+#endif
+#ifdef DEBUG
+ ctc_dump_skb(pskb, -6);
+#endif
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_length_errors++;
+ return;
+ }
+ header->length -= LL_HEADER_LENGTH;
+ len -= LL_HEADER_LENGTH;
+ if ((header->length > skb_tailroom(pskb)) ||
+ (header->length > len)) {
+#ifndef DEBUG
+ if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
+#endif
+ ctc_pr_warn(
+ "%s Illegal packet size %d "
+ "(beyond the end of received data), "
+ "dropping\n", dev->name, header->length);
+ ch->logflags |= LOG_FLAG_OVERRUN;
+#ifndef DEBUG
+ }
+#endif
+#ifdef DEBUG
+ ctc_dump_skb(pskb, -6);
+#endif
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_length_errors++;
+ return;
+ }
+ skb_put(pskb, header->length);
+ pskb->mac.raw = pskb->data;
+ len -= header->length;
+ skb = dev_alloc_skb(pskb->len);
+ if (!skb) {
+#ifndef DEBUG
+ if (!(ch->logflags & LOG_FLAG_NOMEM)) {
+#endif
+ ctc_pr_warn(
+ "%s Out of memory in ctc_unpack_skb\n",
+ dev->name);
+ ch->logflags |= LOG_FLAG_NOMEM;
+#ifndef DEBUG
+ }
+#endif
+ privptr->stats.rx_dropped++;
+ return;
+ }
+ memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
+ skb->mac.raw = skb->data;
+ skb->dev = pskb->dev;
+ skb->protocol = pskb->protocol;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (ch->protocol == CTC_PROTO_LINUX_TTY)
+ ctc_tty_netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+ /**
+ * Successful rx; reset logflags
+ */
+ ch->logflags = 0;
+ dev->last_rx = jiffies;
+ privptr->stats.rx_packets++;
+ privptr->stats.rx_bytes += skb->len;
+ if (len > 0) {
+ skb_pull(pskb, header->length);
+ if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
+#ifndef DEBUG
+ if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
+#endif
+ ctc_pr_warn(
+ "%s Overrun in ctc_unpack_skb\n",
+ dev->name);
+ ch->logflags |= LOG_FLAG_OVERRUN;
+#ifndef DEBUG
+ }
+#endif
+ return;
+ }
+ skb_put(pskb, LL_HEADER_LENGTH);
+ }
+ }
+}
+
+/**
+ * Check return code of a preceeding ccw_device call, halt_IO etc...
+ *
+ * @param ch The channel, the error belongs to.
+ * @param return_code The error code to inspect.
+ */
+static void inline
+ccw_check_return_code(struct channel *ch, int return_code, char *msg)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ switch (return_code) {
+ case 0:
+ fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
+ break;
+ case -EBUSY:
+ ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
+ fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
+ break;
+ case -ENODEV:
+ ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
+ ch->id, msg);
+ fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
+ break;
+ case -EIO:
+ ctc_pr_emerg("%s (%s): Status pending... \n",
+ ch->id, msg);
+ fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
+ break;
+ default:
+ ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
+ ch->id, msg, return_code);
+ fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
+ }
+}
+
+/**
+ * Check sense of a unit check.
+ *
+ * @param ch The channel, the sense code belongs to.
+ * @param sense The sense code to inspect.
+ */
+static void inline
+ccw_unit_check(struct channel *ch, unsigned char sense)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (sense & SNS0_INTERVENTION_REQ) {
+ if (sense & 0x01) {
+ if (ch->protocol != CTC_PROTO_LINUX_TTY)
+ ctc_pr_debug("%s: Interface disc. or Sel. reset "
+ "(remote)\n", ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
+ } else {
+ ctc_pr_debug("%s: System reset (remote)\n", ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
+ }
+ } else if (sense & SNS0_EQUIPMENT_CHECK) {
+ if (sense & SNS0_BUS_OUT_CHECK) {
+ ctc_pr_warn("%s: Hardware malfunction (remote)\n",
+ ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
+ } else {
+ ctc_pr_warn("%s: Read-data parity error (remote)\n",
+ ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
+ }
+ } else if (sense & SNS0_BUS_OUT_CHECK) {
+ if (sense & 0x04) {
+ ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
+ } else {
+ ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
+ }
+ } else if (sense & SNS0_CMD_REJECT) {
+ ctc_pr_warn("%s: Command reject\n", ch->id);
+ } else if (sense == 0) {
+ ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
+ fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
+ } else {
+ ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
+ ch->id, sense);
+ fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
+ }
+}
+
+static void
+ctc_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ }
+}
+
+static __inline__ int
+ctc_checkalloc_buffer(struct channel *ch, int warn)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if ((ch->trans_skb == NULL) ||
+ (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
+ if (ch->trans_skb != NULL)
+ dev_kfree_skb(ch->trans_skb);
+ clear_normalized_cda(&ch->ccw[1]);
+ ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
+ GFP_ATOMIC | GFP_DMA);
+ if (ch->trans_skb == NULL) {
+ if (warn)
+ ctc_pr_warn(
+ "%s: Couldn't alloc %s trans_skb\n",
+ ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == READ) ?
+ "RX" : "TX");
+ return -ENOMEM;
+ }
+ ch->ccw[1].count = ch->max_bufsize;
+ if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+ dev_kfree_skb(ch->trans_skb);
+ ch->trans_skb = NULL;
+ if (warn)
+ ctc_pr_warn(
+ "%s: set_normalized_cda for %s "
+ "trans_skb failed, dropping packets\n",
+ ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == READ) ?
+ "RX" : "TX");
+ return -ENOMEM;
+ }
+ ch->ccw[1].count = 0;
+ ch->trans_skb_data = ch->trans_skb->data;
+ ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ }
+ return 0;
+}
+
+/**
+ * Dummy NOP action for statemachines
+ */
+static void
+fsm_action_nop(fsm_instance * fi, int event, void *arg)
+{
+}
+
+/**
+ * Actions for channel - statemachines.
+ *****************************************************************************/
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_txdone(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+ struct ctc_priv *privptr = dev->priv;
+ struct sk_buff *skb;
+ int first = 1;
+ int i;
+ unsigned long duration;
+ struct timespec done_stamp = xtime;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+
+ duration =
+ (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
+ (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+ if (duration > ch->prof.tx_time)
+ ch->prof.tx_time = duration;
+
+ if (ch->irb->scsw.count != 0)
+ ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
+ dev->name, ch->irb->scsw.count);
+ fsm_deltimer(&ch->timer);
+ while ((skb = skb_dequeue(&ch->io_queue))) {
+ privptr->stats.tx_packets++;
+ privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+ if (first) {
+ privptr->stats.tx_bytes += 2;
+ first = 0;
+ }
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ }
+ spin_lock(&ch->collect_lock);
+ clear_normalized_cda(&ch->ccw[4]);
+ if (ch->collect_len > 0) {
+ int rc;
+
+ if (ctc_checkalloc_buffer(ch, 1)) {
+ spin_unlock(&ch->collect_lock);
+ return;
+ }
+ ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
+ ch->trans_skb->len = 0;
+ if (ch->prof.maxmulti < (ch->collect_len + 2))
+ ch->prof.maxmulti = ch->collect_len + 2;
+ if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+ ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+ *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
+ i = 0;
+ while ((skb = skb_dequeue(&ch->collect_queue))) {
+ memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
+ skb->len);
+ privptr->stats.tx_packets++;
+ privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ i++;
+ }
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ ch->ccw[1].count = ch->trans_skb->len;
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ ch->prof.send_stamp = xtime;
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long) ch, 0xff, 0);
+ ch->prof.doios_multi++;
+ if (rc != 0) {
+ privptr->stats.tx_dropped += i;
+ privptr->stats.tx_errors += i;
+ fsm_deltimer(&ch->timer);
+ ccw_check_return_code(ch, rc, "chained TX");
+ }
+ } else {
+ spin_unlock(&ch->collect_lock);
+ fsm_newstate(fi, CH_STATE_TXIDLE);
+ }
+ ctc_clear_busy(dev);
+}
+
+/**
+ * Initial data is sent.
+ * Notify device statemachine that we are up and
+ * running.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_txidle(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CH_STATE_TXIDLE);
+ fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
+ ch->netdev);
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_rx(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+ struct ctc_priv *privptr = dev->priv;
+ int len = ch->max_bufsize - ch->irb->scsw.count;
+ struct sk_buff *skb = ch->trans_skb;
+ __u16 block_len = *((__u16 *) skb->data);
+ int check_len;
+ int rc;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ if (len < 8) {
+ ctc_pr_debug("%s: got packet with length %d < 8\n",
+ dev->name, len);
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_length_errors++;
+ goto again;
+ }
+ if (len > ch->max_bufsize) {
+ ctc_pr_debug("%s: got packet with length %d > %d\n",
+ dev->name, len, ch->max_bufsize);
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_length_errors++;
+ goto again;
+ }
+
+ /**
+ * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
+ */
+ switch (ch->protocol) {
+ case CTC_PROTO_S390:
+ case CTC_PROTO_OS390:
+ check_len = block_len + 2;
+ break;
+ default:
+ check_len = block_len;
+ break;
+ }
+ if ((len < block_len) || (len > check_len)) {
+ ctc_pr_debug("%s: got block length %d != rx length %d\n",
+ dev->name, block_len, len);
+#ifdef DEBUG
+ ctc_dump_skb(skb, 0);
+#endif
+ *((__u16 *) skb->data) = len;
+ privptr->stats.rx_dropped++;
+ privptr->stats.rx_length_errors++;
+ goto again;
+ }
+ block_len -= 2;
+ if (block_len > 0) {
+ *((__u16 *) skb->data) = block_len;
+ ctc_unpack_skb(ch, skb);
+ }
+ again:
+ skb->data = skb->tail = ch->trans_skb_data;
+ skb->len = 0;
+ if (ctc_checkalloc_buffer(ch, 1))
+ return;
+ ch->ccw[1].count = ch->max_bufsize;
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
+ if (rc != 0)
+ ccw_check_return_code(ch, rc, "normal RX");
+}
+
+static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_firstio(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ int rc;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+
+ if (fsm_getstate(fi) == CH_STATE_TXIDLE)
+ ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
+ fsm_deltimer(&ch->timer);
+ if (ctc_checkalloc_buffer(ch, 1))
+ return;
+ if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
+ (ch->protocol == CTC_PROTO_OS390)) {
+ /* OS/390 resp. z/OS */
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
+ CH_EVENT_TIMER, ch);
+ ch_action_rxidle(fi, event, arg);
+ } else {
+ struct net_device *dev = ch->netdev;
+ fsm_newstate(fi, CH_STATE_TXIDLE);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXUP, dev);
+ }
+ return;
+ }
+
+ /**
+ * Don´t setup a timer for receiving the initial RX frame
+ * if in compatibility mode, since VM TCP delays the initial
+ * frame until it has some data to send.
+ */
+ if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+ (ch->protocol != CTC_PROTO_S390))
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+
+ *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
+ ch->ccw[1].count = 2; /* Transfer only length */
+
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ ? CH_STATE_RXINIT : CH_STATE_TXINIT);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CH_STATE_SETUPWAIT);
+ ccw_check_return_code(ch, rc, "init IO");
+ }
+ /**
+ * If in compatibility mode since we don´t setup a timer, we
+ * also signal RX channel up immediately. This enables us
+ * to send packets early which in turn usually triggers some
+ * reply from VM TCP which brings up the RX channel to it´s
+ * final state.
+ */
+ if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+ (ch->protocol == CTC_PROTO_S390)) {
+ struct net_device *dev = ch->netdev;
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
+ dev);
+ }
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_rxidle(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+ __u16 buflen;
+ int rc;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ buflen = *((__u16 *) ch->trans_skb->data);
+#ifdef DEBUG
+ ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
+#endif
+ if (buflen >= CTC_INITIAL_BLOCKLEN) {
+ if (ctc_checkalloc_buffer(ch, 1))
+ return;
+ ch->ccw[1].count = ch->max_bufsize;
+ fsm_newstate(fi, CH_STATE_RXIDLE);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long) ch, 0xff, 0);
+ if (rc != 0) {
+ fsm_newstate(fi, CH_STATE_RXINIT);
+ ccw_check_return_code(ch, rc, "initial RX");
+ } else
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXUP, dev);
+ } else {
+ ctc_pr_debug("%s: Initial RX count %d not %d\n",
+ dev->name, buflen, CTC_INITIAL_BLOCKLEN);
+ ch_action_firstio(fi, event, arg);
+ }
+}
+
+/**
+ * Set channel into extended mode.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_setmode(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ int rc;
+ unsigned long saveflags;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ fsm_newstate(fi, CH_STATE_SETUPWAIT);
+ saveflags = 0; /* avoids compiler warning with
+ spin_unlock_irqrestore */
+ if (event == CH_EVENT_TIMER) // only for timer not yet locked
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
+ if (event == CH_EVENT_TIMER)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CH_STATE_STARTWAIT);
+ ccw_check_return_code(ch, rc, "set Mode");
+ } else
+ ch->retry = 0;
+}
+
+/**
+ * Setup channel.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_start(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ unsigned long saveflags;
+ int rc;
+ struct net_device *dev;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ch == NULL) {
+ ctc_pr_warn("ch_action_start ch=NULL\n");
+ return;
+ }
+ if (ch->netdev == NULL) {
+ ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
+ return;
+ }
+ dev = ch->netdev;
+
+#ifdef DEBUG
+ ctc_pr_debug("%s: %s channel start\n", dev->name,
+ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+#endif
+
+ if (ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb(ch->trans_skb);
+ ch->trans_skb = NULL;
+ }
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ ch->ccw[1].cmd_code = CCW_CMD_READ;
+ ch->ccw[1].flags = CCW_FLAG_SLI;
+ ch->ccw[1].count = 0;
+ } else {
+ ch->ccw[1].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[1].count = 0;
+ }
+ if (ctc_checkalloc_buffer(ch, 0)) {
+ ctc_pr_notice(
+ "%s: Could not allocate %s trans_skb, delaying "
+ "allocation until first transfer\n",
+ dev->name,
+ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ }
+
+ ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
+ ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[0].count = 0;
+ ch->ccw[0].cda = 0;
+ ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
+ ch->ccw[2].flags = CCW_FLAG_SLI;
+ ch->ccw[2].count = 0;
+ ch->ccw[2].cda = 0;
+ memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
+ ch->ccw[4].cda = 0;
+ ch->ccw[4].flags &= ~CCW_FLAG_IDA;
+
+ fsm_newstate(fi, CH_STATE_STARTWAIT);
+ fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ if (rc != -EBUSY)
+ fsm_deltimer(&ch->timer);
+ ccw_check_return_code(ch, rc, "initial HaltIO");
+ }
+#ifdef DEBUG
+ ctc_pr_debug("ctc: %s(): leaving\n", __func__);
+#endif
+}
+
+/**
+ * Shutdown a channel.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_haltio(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ unsigned long saveflags;
+ int rc;
+ int oldstate;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ saveflags = 0; /* avoids comp warning with
+ spin_unlock_irqrestore */
+ if (event == CH_EVENT_STOP) // only for STOP not yet locked
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ oldstate = fsm_getstate(fi);
+ fsm_newstate(fi, CH_STATE_TERM);
+ rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
+ if (event == CH_EVENT_STOP)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ if (rc != -EBUSY) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, oldstate);
+ }
+ ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
+ }
+}
+
+/**
+ * A channel has successfully been halted.
+ * Cleanup it's queue and notify interface statemachine.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_stopped(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CH_STATE_STOPPED);
+ if (ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb(ch->trans_skb);
+ ch->trans_skb = NULL;
+ }
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ skb_queue_purge(&ch->io_queue);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXDOWN, dev);
+ } else {
+ ctc_purge_skb_queue(&ch->io_queue);
+ spin_lock(&ch->collect_lock);
+ ctc_purge_skb_queue(&ch->collect_queue);
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/**
+ * A stop command from device statemachine arrived and we are in
+ * not operational mode. Set state to stopped.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_stop(fsm_instance * fi, int event, void *arg)
+{
+ fsm_newstate(fi, CH_STATE_STOPPED);
+}
+
+/**
+ * A machine check for no path, not operational status or gone device has
+ * happened.
+ * Cleanup queue and notify interface statemachine.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_fail(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CH_STATE_NOTOP);
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ skb_queue_purge(&ch->io_queue);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXDOWN, dev);
+ } else {
+ ctc_purge_skb_queue(&ch->io_queue);
+ spin_lock(&ch->collect_lock);
+ ctc_purge_skb_queue(&ch->collect_queue);
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/**
+ * Handle error during setup of channel.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_setuperr(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(setup, 3, __FUNCTION__);
+ /**
+ * Special case: Got UC_RCRESET on setmode.
+ * This means that remote side isn't setup. In this case
+ * simply retry after some 10 secs...
+ */
+ if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
+ ((event == CH_EVENT_UC_RCRESET) ||
+ (event == CH_EVENT_UC_RSRESET))) {
+ fsm_newstate(fi, CH_STATE_STARTRETRY);
+ fsm_deltimer(&ch->timer);
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
+ if (rc != 0)
+ ccw_check_return_code(
+ ch, rc, "HaltIO in ch_action_setuperr");
+ }
+ return;
+ }
+
+ ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
+ dev->name, ch_event_names[event],
+ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+ fsm_getstate_str(fi));
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ fsm_newstate(fi, CH_STATE_RXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXDOWN, dev);
+ } else {
+ fsm_newstate(fi, CH_STATE_TXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/**
+ * Restart a channel after an error.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_restart(fsm_instance * fi, int event, void *arg)
+{
+ unsigned long saveflags;
+ int oldstate;
+ int rc;
+
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ ctc_pr_debug("%s: %s channel restart\n", dev->name,
+ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ oldstate = fsm_getstate(fi);
+ fsm_newstate(fi, CH_STATE_STARTWAIT);
+ saveflags = 0; /* avoids compiler warning with
+ spin_unlock_irqrestore */
+ if (event == CH_EVENT_TIMER) // only for timer not yet locked
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
+ if (event == CH_EVENT_TIMER)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ if (rc != -EBUSY) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, oldstate);
+ }
+ ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
+ }
+}
+
+/**
+ * Handle error during RX initial handshake (exchange of
+ * 0-length block header)
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(setup, 3, __FUNCTION__);
+ if (event == CH_EVENT_TIMER) {
+ fsm_deltimer(&ch->timer);
+ ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
+ if (ch->retry++ < 3)
+ ch_action_restart(fi, event, arg);
+ else {
+ fsm_newstate(fi, CH_STATE_RXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXDOWN, dev);
+ }
+ } else
+ ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
+}
+
+/**
+ * Notify device statemachine if we gave up initialization
+ * of RX channel.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(setup, 3, __FUNCTION__);
+ fsm_newstate(fi, CH_STATE_RXERR);
+ ctc_pr_warn("%s: RX initialization failed\n", dev->name);
+ ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
+}
+
+/**
+ * Handle RX Unit check remote reset (remote disconnected)
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct channel *ch2;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
+ dev->name);
+
+ /**
+ * Notify device statemachine
+ */
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
+
+ fsm_newstate(fi, CH_STATE_DTERM);
+ ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
+ fsm_newstate(ch2->fsm, CH_STATE_DTERM);
+
+ ccw_device_halt(ch->cdev, (unsigned long) ch);
+ ccw_device_halt(ch2->cdev, (unsigned long) ch2);
+}
+
+/**
+ * Handle error during TX channel initialization.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(setup, 2, __FUNCTION__);
+ if (event == CH_EVENT_TIMER) {
+ fsm_deltimer(&ch->timer);
+ ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
+ if (ch->retry++ < 3)
+ ch_action_restart(fi, event, arg);
+ else {
+ fsm_newstate(fi, CH_STATE_TXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ }
+ } else
+ ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
+}
+
+/**
+ * Handle TX timeout by retrying operation.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_txretry(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+ unsigned long saveflags;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ if (ch->retry++ > 3) {
+ ctc_pr_debug("%s: TX retry failed, restarting channel\n",
+ dev->name);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ ch_action_restart(fi, event, arg);
+ } else {
+ struct sk_buff *skb;
+
+ ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
+ if ((skb = skb_peek(&ch->io_queue))) {
+ int rc = 0;
+
+ clear_normalized_cda(&ch->ccw[4]);
+ ch->ccw[4].count = skb->len;
+ if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+ ctc_pr_debug(
+ "%s: IDAL alloc failed, chan restart\n",
+ dev->name);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ ch_action_restart(fi, event, arg);
+ return;
+ }
+ fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
+ saveflags = 0; /* avoids compiler warning with
+ spin_unlock_irqrestore */
+ if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
+ saveflags);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[3],
+ (unsigned long) ch, 0xff, 0);
+ if (event == CH_EVENT_TIMER)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
+ saveflags);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
+ ctc_purge_skb_queue(&ch->io_queue);
+ }
+ }
+ }
+
+}
+
+/**
+ * Handle fatal errors during an I/O command.
+ *
+ * @param fi An instance of a channel statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from channel * upon call.
+ */
+static void
+ch_action_iofatal(fsm_instance * fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *) arg;
+ struct net_device *dev = ch->netdev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_deltimer(&ch->timer);
+ if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ ctc_pr_debug("%s: RX I/O error\n", dev->name);
+ fsm_newstate(fi, CH_STATE_RXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_RXDOWN, dev);
+ } else {
+ ctc_pr_debug("%s: TX I/O error\n", dev->name);
+ fsm_newstate(fi, CH_STATE_TXERR);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm,
+ DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+static void
+ch_action_reinit(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = (struct channel *)arg;
+ struct net_device *dev = ch->netdev;
+ struct ctc_priv *privptr = dev->priv;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ ch_action_iofatal(fi, event, arg);
+ fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
+}
+
+
+/**
+ * The statemachine for a channel.
+ */
+static const fsm_node ch_fsm[] = {
+ {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
+ {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
+ {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
+ {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
+
+ {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
+ {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
+ {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
+ {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
+
+ {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
+ {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
+ {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
+ {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
+ {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
+ {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
+ {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
+ {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
+ {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
+ {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
+ {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
+ {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
+ {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
+ {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
+ {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
+ {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
+// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
+ {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
+ {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
+
+ {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
+ {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
+ {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
+ {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
+ {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
+ {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
+ {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
+ {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
+ {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
+ {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
+ {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
+ {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
+ {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
+ {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
+ {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
+ {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
+ {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
+ {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
+ {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
+ {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
+ {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
+ {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
+ {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
+ {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
+
+ {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
+ {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
+ {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
+};
+
+static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
+
+/**
+ * Functions related to setup and device detection.
+ *****************************************************************************/
+
+static inline int
+less_than(char *id1, char *id2)
+{
+ int dev1, dev2, i;
+
+ for (i = 0; i < 5; i++) {
+ id1++;
+ id2++;
+ }
+ dev1 = simple_strtoul(id1, &id1, 16);
+ dev2 = simple_strtoul(id2, &id2, 16);
+
+ return (dev1 < dev2);
+}
+
+/**
+ * Add a new channel to the list of channels.
+ * Keeps the channel list sorted.
+ *
+ * @param cdev The ccw_device to be added.
+ * @param type The type class of the new channel.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int
+add_channel(struct ccw_device *cdev, enum channel_types type)
+{
+ struct channel **c = &channels;
+ struct channel *ch;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ if ((ch =
+ (struct channel *) kmalloc(sizeof (struct channel),
+ GFP_KERNEL)) == NULL) {
+ ctc_pr_warn("ctc: Out of memory in add_channel\n");
+ return -1;
+ }
+ memset(ch, 0, sizeof (struct channel));
+ if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
+ GFP_KERNEL | GFP_DMA)) == NULL) {
+ kfree(ch);
+ ctc_pr_warn("ctc: Out of memory in add_channel\n");
+ return -1;
+ }
+
+ memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
+
+ /**
+ * "static" ccws are used in the following way:
+ *
+ * ccw[0..2] (Channel program for generic I/O):
+ * 0: prepare
+ * 1: read or write (depending on direction) with fixed
+ * buffer (idal allocated once when buffer is allocated)
+ * 2: nop
+ * ccw[3..5] (Channel program for direct write of packets)
+ * 3: prepare
+ * 4: write (idal allocated on every write).
+ * 5: nop
+ * ccw[6..7] (Channel program for initial channel setup):
+ * 6: set extended mode
+ * 7: nop
+ *
+ * ch->ccw[0..5] are initialized in ch_action_start because
+ * the channel's direction is yet unknown here.
+ */
+ ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
+ ch->ccw[6].flags = CCW_FLAG_SLI;
+
+ ch->ccw[7].cmd_code = CCW_CMD_NOOP;
+ ch->ccw[7].flags = CCW_FLAG_SLI;
+
+ ch->cdev = cdev;
+ snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
+ ch->type = type;
+ loglevel = CTC_LOGLEVEL_DEFAULT;
+ ch->fsm = init_fsm(ch->id, ch_state_names,
+ ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
+ ch_fsm, CH_FSM_LEN, GFP_KERNEL);
+ if (ch->fsm == NULL) {
+ ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
+ kfree(ch->ccw);
+ kfree(ch);
+ return -1;
+ }
+ fsm_newstate(ch->fsm, CH_STATE_IDLE);
+ if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
+ GFP_KERNEL)) == NULL) {
+ ctc_pr_warn("ctc: Out of memory in add_channel\n");
+ kfree_fsm(ch->fsm);
+ kfree(ch->ccw);
+ kfree(ch);
+ return -1;
+ }
+ memset(ch->irb, 0, sizeof (struct irb));
+ while (*c && less_than((*c)->id, ch->id))
+ c = &(*c)->next;
+ if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
+ ctc_pr_debug(
+ "ctc: add_channel: device %s already in list, "
+ "using old entry\n", (*c)->id);
+ kfree(ch->irb);
+ kfree_fsm(ch->fsm);
+ kfree(ch->ccw);
+ kfree(ch);
+ return 0;
+ }
+ fsm_settimer(ch->fsm, &ch->timer);
+ skb_queue_head_init(&ch->io_queue);
+ skb_queue_head_init(&ch->collect_queue);
+ ch->next = *c;
+ *c = ch;
+ return 0;
+}
+
+/**
+ * Release a specific channel in the channel list.
+ *
+ * @param ch Pointer to channel struct to be released.
+ */
+static void
+channel_free(struct channel *ch)
+{
+ ch->flags &= ~CHANNEL_FLAGS_INUSE;
+ fsm_newstate(ch->fsm, CH_STATE_IDLE);
+}
+
+/**
+ * Remove a specific channel in the channel list.
+ *
+ * @param ch Pointer to channel struct to be released.
+ */
+static void
+channel_remove(struct channel *ch)
+{
+ struct channel **c = &channels;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ if (ch == NULL)
+ return;
+
+ channel_free(ch);
+ while (*c) {
+ if (*c == ch) {
+ *c = ch->next;
+ fsm_deltimer(&ch->timer);
+ kfree_fsm(ch->fsm);
+ clear_normalized_cda(&ch->ccw[4]);
+ if (ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb(ch->trans_skb);
+ }
+ kfree(ch->ccw);
+ kfree(ch->irb);
+ kfree(ch);
+ return;
+ }
+ c = &((*c)->next);
+ }
+}
+
+/**
+ * Get a specific channel from the channel list.
+ *
+ * @param type Type of channel we are interested in.
+ * @param id Id of channel we are interested in.
+ * @param direction Direction we want to use this channel for.
+ *
+ * @return Pointer to a channel or NULL if no matching channel available.
+ */
+static struct channel
+*
+channel_get(enum channel_types type, char *id, int direction)
+{
+ struct channel *ch = channels;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+#ifdef DEBUG
+ ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
+ __func__, id, type);
+#endif
+
+ while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
+#ifdef DEBUG
+ ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
+ __func__, ch, ch->id, ch->type);
+#endif
+ ch = ch->next;
+ }
+#ifdef DEBUG
+ ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
+ __func__, ch, ch->id, ch->type);
+#endif
+ if (!ch) {
+ ctc_pr_warn("ctc: %s(): channel with id %s "
+ "and type %d not found in channel list\n",
+ __func__, id, type);
+ } else {
+ if (ch->flags & CHANNEL_FLAGS_INUSE)
+ ch = NULL;
+ else {
+ ch->flags |= CHANNEL_FLAGS_INUSE;
+ ch->flags &= ~CHANNEL_FLAGS_RWMASK;
+ ch->flags |= (direction == WRITE)
+ ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
+ fsm_newstate(ch->fsm, CH_STATE_STOPPED);
+ }
+ }
+ return ch;
+}
+
+/**
+ * Return the channel type by name.
+ *
+ * @param name Name of network interface.
+ *
+ * @return Type class of channel to be used for that interface.
+ */
+static enum channel_types inline
+extract_channel_media(char *name)
+{
+ enum channel_types ret = channel_type_unknown;
+
+ if (name != NULL) {
+ if (strncmp(name, "ctc", 3) == 0)
+ ret = channel_type_parallel;
+ if (strncmp(name, "escon", 5) == 0)
+ ret = channel_type_escon;
+ }
+ return ret;
+}
+
+static long
+__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!IS_ERR(irb))
+ return 0;
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
+// CTC_DBF_TEXT(trace, 2, "ckirberr");
+// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
+ break;
+ case -ETIMEDOUT:
+ ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
+// CTC_DBF_TEXT(trace, 2, "ckirberr");
+// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
+ break;
+ default:
+ ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
+ cdev->dev.bus_id);
+// CTC_DBF_TEXT(trace, 2, "ckirberr");
+// CTC_DBF_TEXT(trace, 2, " rc???");
+ }
+ return PTR_ERR(irb);
+}
+
+/**
+ * Main IRQ handler.
+ *
+ * @param cdev The ccw_device the interrupt is for.
+ * @param intparm interruption parameter.
+ * @param irb interruption response block.
+ */
+static void
+ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct channel *ch;
+ struct net_device *dev;
+ struct ctc_priv *priv;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (__ctc_check_irb_error(cdev, irb))
+ return;
+
+ /* Check for unsolicited interrupts. */
+ if (!cdev->dev.driver_data) {
+ ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
+ cdev->dev.bus_id, irb->scsw.cstat,
+ irb->scsw.dstat);
+ return;
+ }
+
+ priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
+ ->dev.driver_data;
+
+ /* Try to extract channel from driver data. */
+ if (priv->channel[READ]->cdev == cdev)
+ ch = priv->channel[READ];
+ else if (priv->channel[WRITE]->cdev == cdev)
+ ch = priv->channel[WRITE];
+ else {
+ ctc_pr_err("ctc: Can't determine channel for interrupt, "
+ "device %s\n", cdev->dev.bus_id);
+ return;
+ }
+
+ dev = (struct net_device *) (ch->netdev);
+ if (dev == NULL) {
+ ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
+ cdev->dev.bus_id, ch);
+ return;
+ }
+
+#ifdef DEBUG
+ ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
+ dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
+#endif
+
+ /* Copy interruption response block. */
+ memcpy(ch->irb, irb, sizeof(struct irb));
+
+ /* Check for good subchannel return code, otherwise error message */
+ if (ch->irb->scsw.cstat) {
+ fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
+ ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
+ dev->name, ch->id, ch->irb->scsw.cstat,
+ ch->irb->scsw.dstat);
+ return;
+ }
+
+ /* Check the reason-code of a unit check */
+ if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
+ ccw_unit_check(ch, ch->irb->ecw[0]);
+ return;
+ }
+ if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
+ if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
+ fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
+ else
+ fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
+ return;
+ }
+ if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
+ fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
+ return;
+ }
+ if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (ch->irb->scsw.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
+ fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
+ else
+ fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
+
+}
+
+/**
+ * Actions for interface - statemachine.
+ *****************************************************************************/
+
+/**
+ * Startup channels by sending CH_EVENT_START to each channel.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_start(fsm_instance * fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct ctc_priv *privptr = dev->priv;
+ int direction;
+
+ DBF_TEXT(setup, 3, __FUNCTION__);
+ fsm_deltimer(&privptr->restart_timer);
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ for (direction = READ; direction <= WRITE; direction++) {
+ struct channel *ch = privptr->channel[direction];
+ fsm_event(ch->fsm, CH_EVENT_START, ch);
+ }
+}
+
+/**
+ * Shutdown channels by sending CH_EVENT_STOP to each channel.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_stop(fsm_instance * fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct ctc_priv *privptr = dev->priv;
+ int direction;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ for (direction = READ; direction <= WRITE; direction++) {
+ struct channel *ch = privptr->channel[direction];
+ fsm_event(ch->fsm, CH_EVENT_STOP, ch);
+ }
+}
+static void
+dev_action_restart(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct ctc_priv *privptr = dev->priv;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ ctc_pr_debug("%s: Restarting\n", dev->name);
+ dev_action_stop(fi, event, arg);
+ fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
+ fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
+ DEV_EVENT_START, dev);
+}
+
+/**
+ * Called from channel statemachine
+ * when a channel is up and running.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_chup(fsm_instance * fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct ctc_priv *privptr = dev->priv;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_STARTWAIT_RXTX:
+ if (event == DEV_EVENT_RXUP)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+ else
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+ break;
+ case DEV_STATE_STARTWAIT_RX:
+ if (event == DEV_EVENT_RXUP) {
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ ctc_pr_info("%s: connected with remote side\n",
+ dev->name);
+ if (privptr->protocol == CTC_PROTO_LINUX_TTY)
+ ctc_tty_setcarrier(dev, 1);
+ ctc_clear_busy(dev);
+ }
+ break;
+ case DEV_STATE_STARTWAIT_TX:
+ if (event == DEV_EVENT_TXUP) {
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ ctc_pr_info("%s: connected with remote side\n",
+ dev->name);
+ if (privptr->protocol == CTC_PROTO_LINUX_TTY)
+ ctc_tty_setcarrier(dev, 1);
+ ctc_clear_busy(dev);
+ }
+ break;
+ case DEV_STATE_STOPWAIT_TX:
+ if (event == DEV_EVENT_RXUP)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ break;
+ case DEV_STATE_STOPWAIT_RX:
+ if (event == DEV_EVENT_TXUP)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ break;
+ }
+}
+
+/**
+ * Called from channel statemachine
+ * when a channel has been shutdown.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_chdown(fsm_instance * fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct ctc_priv *privptr = dev->priv;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_RUNNING:
+ if (privptr->protocol == CTC_PROTO_LINUX_TTY)
+ ctc_tty_setcarrier(dev, 0);
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+ else
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+ break;
+ case DEV_STATE_STARTWAIT_RX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ break;
+ case DEV_STATE_STARTWAIT_TX:
+ if (event == DEV_EVENT_RXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ break;
+ case DEV_STATE_STOPWAIT_RXTX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
+ else
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
+ break;
+ case DEV_STATE_STOPWAIT_RX:
+ if (event == DEV_EVENT_RXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ break;
+ case DEV_STATE_STOPWAIT_TX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ break;
+ }
+}
+
+static const fsm_node dev_fsm[] = {
+ {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
+
+ {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
+ {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
+ {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
+ {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
+ {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
+ {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
+ {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
+ {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
+ {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
+ {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
+ {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
+
+ {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
+ {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
+ {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
+ {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
+ {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
+ {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
+};
+
+static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
+
+/**
+ * Transmit a packet.
+ * This is a helper function for ctc_tx().
+ *
+ * @param ch Channel to be used for sending.
+ * @param skb Pointer to struct sk_buff of packet to send.
+ * The linklevel header has already been set up
+ * by ctc_tx().
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+ unsigned long saveflags;
+ struct ll_header header;
+ int rc = 0;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
+ int l = skb->len + LL_HEADER_LENGTH;
+
+ spin_lock_irqsave(&ch->collect_lock, saveflags);
+ if (ch->collect_len + l > ch->max_bufsize - 2)
+ rc = -EBUSY;
+ else {
+ atomic_inc(&skb->users);
+ header.length = l;
+ header.type = skb->protocol;
+ header.unused = 0;
+ memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
+ LL_HEADER_LENGTH);
+ skb_queue_tail(&ch->collect_queue, skb);
+ ch->collect_len += l;
+ }
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ } else {
+ __u16 block_len;
+ int ccw_idx;
+ struct sk_buff *nskb;
+ unsigned long hi;
+
+ /**
+ * Protect skb against beeing free'd by upper
+ * layers.
+ */
+ atomic_inc(&skb->users);
+ ch->prof.txlen += skb->len;
+ header.length = skb->len + LL_HEADER_LENGTH;
+ header.type = skb->protocol;
+ header.unused = 0;
+ memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
+ LL_HEADER_LENGTH);
+ block_len = skb->len + 2;
+ *((__u16 *) skb_push(skb, 2)) = block_len;
+
+ /**
+ * IDAL support in CTC is broken, so we have to
+ * care about skb's above 2G ourselves.
+ */
+ hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
+ if (hi) {
+ nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ if (!nskb) {
+ atomic_dec(&skb->users);
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ return -ENOMEM;
+ } else {
+ memcpy(skb_put(nskb, skb->len),
+ skb->data, skb->len);
+ atomic_inc(&nskb->users);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ skb = nskb;
+ }
+ }
+
+ ch->ccw[4].count = block_len;
+ if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+ /**
+ * idal allocation failed, try via copying to
+ * trans_skb. trans_skb usually has a pre-allocated
+ * idal.
+ */
+ if (ctc_checkalloc_buffer(ch, 1)) {
+ /**
+ * Remove our header. It gets added
+ * again on retransmit.
+ */
+ atomic_dec(&skb->users);
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ return -EBUSY;
+ }
+
+ ch->trans_skb->tail = ch->trans_skb->data;
+ ch->trans_skb->len = 0;
+ ch->ccw[1].count = skb->len;
+ memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
+ skb->len);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ ccw_idx = 0;
+ } else {
+ skb_queue_tail(&ch->io_queue, skb);
+ ccw_idx = 3;
+ }
+ ch->retry = 0;
+ fsm_newstate(ch->fsm, CH_STATE_TX);
+ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ ch->prof.send_stamp = xtime;
+ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+ (unsigned long) ch, 0xff, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (ccw_idx == 3)
+ ch->prof.doios_single++;
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ ccw_check_return_code(ch, rc, "single skb TX");
+ if (ccw_idx == 3)
+ skb_dequeue_tail(&ch->io_queue);
+ /**
+ * Remove our header. It gets added
+ * again on retransmit.
+ */
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ } else {
+ if (ccw_idx == 0) {
+ struct net_device *dev = ch->netdev;
+ struct ctc_priv *privptr = dev->priv;
+ privptr->stats.tx_packets++;
+ privptr->stats.tx_bytes +=
+ skb->len - LL_HEADER_LENGTH;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Interface API for upper network layers
+ *****************************************************************************/
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+ctc_open(struct net_device * dev)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
+ return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+ctc_close(struct net_device * dev)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
+ return 0;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * @param skb Pointer to buffer containing the packet.
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 if packet consumed, !0 if packet rejected.
+ * Note: If we return !0, then the packet is free'd by
+ * the generic network layer.
+ */
+static int
+ctc_tx(struct sk_buff *skb, struct net_device * dev)
+{
+ int rc = 0;
+ struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ /**
+ * Some sanity checks ...
+ */
+ if (skb == NULL) {
+ ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
+ privptr->stats.tx_dropped++;
+ return 0;
+ }
+ if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
+ ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
+ dev->name, LL_HEADER_LENGTH + 2);
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ return 0;
+ }
+
+ /**
+ * If channels are not running, try to restart them
+ * and throw away packet.
+ */
+ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+ fsm_event(privptr->fsm, DEV_EVENT_START, dev);
+ if (privptr->protocol == CTC_PROTO_LINUX_TTY)
+ return -EBUSY;
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ privptr->stats.tx_errors++;
+ privptr->stats.tx_carrier_errors++;
+ return 0;
+ }
+
+ if (ctc_test_and_set_busy(dev))
+ return -EBUSY;
+
+ dev->trans_start = jiffies;
+ if (transmit_skb(privptr->channel[WRITE], skb) != 0)
+ rc = 1;
+ ctc_clear_busy(dev);
+ return rc;
+}
+
+/**
+ * Sets MTU of an interface.
+ *
+ * @param dev Pointer to interface struct.
+ * @param new_mtu The new MTU to use for this interface.
+ *
+ * @return 0 on success, -EINVAL if MTU is out of valid range.
+ * (valid range is 576 .. 65527). If VM is on the
+ * remote side, maximum MTU is 32760, however this is
+ * <em>not</em> checked here.
+ */
+static int
+ctc_change_mtu(struct net_device * dev, int new_mtu)
+{
+ struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if ((new_mtu < 576) || (new_mtu > 65527) ||
+ (new_mtu > (privptr->channel[READ]->max_bufsize -
+ LL_HEADER_LENGTH - 2)))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ dev->hard_header_len = LL_HEADER_LENGTH + 2;
+ return 0;
+}
+
+/**
+ * Returns interface statistics of a device.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return Pointer to stats struct of this interface.
+ */
+static struct net_device_stats *
+ctc_stats(struct net_device * dev)
+{
+ return &((struct ctc_priv *) dev->priv)->stats;
+}
+
+/*
+ * sysfs attributes
+ */
+static ssize_t
+buffer_show(struct device *dev, char *buf)
+{
+ struct ctc_priv *priv;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ priv->buffer_size);
+}
+
+static ssize_t
+buffer_write(struct device *dev, const char *buf, size_t count)
+{
+ struct ctc_priv *priv;
+ struct net_device *ndev;
+ int bs1;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ ndev = priv->channel[READ]->netdev;
+ if (!ndev)
+ return -ENODEV;
+ sscanf(buf, "%u", &bs1);
+
+ if (bs1 > CTC_BUFSIZE_LIMIT)
+ return -EINVAL;
+ if ((ndev->flags & IFF_RUNNING) &&
+ (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
+ return -EINVAL;
+ if (bs1 < (576 + LL_HEADER_LENGTH + 2))
+ return -EINVAL;
+
+ priv->buffer_size = bs1;
+ priv->channel[READ]->max_bufsize =
+ priv->channel[WRITE]->max_bufsize = bs1;
+ if (!(ndev->flags & IFF_RUNNING))
+ ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
+ priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+
+ return count;
+
+}
+
+static ssize_t
+loglevel_show(struct device *dev, char *buf)
+{
+ struct ctc_priv *priv;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", loglevel);
+}
+
+static ssize_t
+loglevel_write(struct device *dev, const char *buf, size_t count)
+{
+ struct ctc_priv *priv;
+ int ll1;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ sscanf(buf, "%i", &ll1);
+
+ if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
+ return -EINVAL;
+ loglevel = ll1;
+ return count;
+}
+
+static void
+ctc_print_statistics(struct ctc_priv *priv)
+{
+ char *sbuf;
+ char *p;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (!priv)
+ return;
+ sbuf = (char *)kmalloc(2048, GFP_KERNEL);
+ if (sbuf == NULL)
+ return;
+ p = sbuf;
+
+ p += sprintf(p, " Device FSM state: %s\n",
+ fsm_getstate_str(priv->fsm));
+ p += sprintf(p, " RX channel FSM state: %s\n",
+ fsm_getstate_str(priv->channel[READ]->fsm));
+ p += sprintf(p, " TX channel FSM state: %s\n",
+ fsm_getstate_str(priv->channel[WRITE]->fsm));
+ p += sprintf(p, " Max. TX buffer used: %ld\n",
+ priv->channel[WRITE]->prof.maxmulti);
+ p += sprintf(p, " Max. chained SKBs: %ld\n",
+ priv->channel[WRITE]->prof.maxcqueue);
+ p += sprintf(p, " TX single write ops: %ld\n",
+ priv->channel[WRITE]->prof.doios_single);
+ p += sprintf(p, " TX multi write ops: %ld\n",
+ priv->channel[WRITE]->prof.doios_multi);
+ p += sprintf(p, " Netto bytes written: %ld\n",
+ priv->channel[WRITE]->prof.txlen);
+ p += sprintf(p, " Max. TX IO-time: %ld\n",
+ priv->channel[WRITE]->prof.tx_time);
+
+ ctc_pr_debug("Statistics for %s:\n%s",
+ priv->channel[WRITE]->netdev->name, sbuf);
+ kfree(sbuf);
+ return;
+}
+
+static ssize_t
+stats_show(struct device *dev, char *buf)
+{
+ struct ctc_priv *priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ ctc_print_statistics(priv);
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t
+stats_write(struct device *dev, const char *buf, size_t count)
+{
+ struct ctc_priv *priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ /* Reset statistics */
+ memset(&priv->channel[WRITE]->prof, 0,
+ sizeof(priv->channel[WRITE]->prof));
+ return count;
+}
+
+static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
+static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
+static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
+
+static int
+ctc_add_attributes(struct device *dev)
+{
+// device_create_file(dev, &dev_attr_buffer);
+ device_create_file(dev, &dev_attr_loglevel);
+ device_create_file(dev, &dev_attr_stats);
+ return 0;
+}
+
+static void
+ctc_remove_attributes(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_stats);
+ device_remove_file(dev, &dev_attr_loglevel);
+// device_remove_file(dev, &dev_attr_buffer);
+}
+
+
+static void
+ctc_netdev_unregister(struct net_device * dev)
+{
+ struct ctc_priv *privptr;
+
+ if (!dev)
+ return;
+ privptr = (struct ctc_priv *) dev->priv;
+ if (privptr->protocol != CTC_PROTO_LINUX_TTY)
+ unregister_netdev(dev);
+ else
+ ctc_tty_unregister_netdev(dev);
+}
+
+static int
+ctc_netdev_register(struct net_device * dev)
+{
+ struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
+ if (privptr->protocol != CTC_PROTO_LINUX_TTY)
+ return register_netdev(dev);
+ else
+ return ctc_tty_register_netdev(dev);
+}
+
+static void
+ctc_free_netdevice(struct net_device * dev, int free_dev)
+{
+ struct ctc_priv *privptr;
+ if (!dev)
+ return;
+ privptr = dev->priv;
+ if (privptr) {
+ if (privptr->fsm)
+ kfree_fsm(privptr->fsm);
+ kfree(privptr);
+ }
+#ifdef MODULE
+ if (free_dev)
+ free_netdev(dev);
+#endif
+}
+
+/**
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static struct net_device *
+ctc_init_netdevice(struct net_device * dev, int alloc_device,
+ struct ctc_priv *privptr)
+{
+ if (!privptr)
+ return NULL;
+
+ DBF_TEXT(setup, 3, __FUNCTION__);
+ if (alloc_device) {
+ dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof (struct net_device));
+ }
+
+ dev->priv = privptr;
+ privptr->fsm = init_fsm("ctcdev", dev_state_names,
+ dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
+ dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
+ if (privptr->fsm == NULL) {
+ if (alloc_device)
+ kfree(dev);
+ return NULL;
+ }
+ fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
+ fsm_settimer(privptr->fsm, &privptr->restart_timer);
+ if (dev->mtu == 0)
+ dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
+ dev->hard_start_xmit = ctc_tx;
+ dev->open = ctc_open;
+ dev->stop = ctc_close;
+ dev->get_stats = ctc_stats;
+ dev->change_mtu = ctc_change_mtu;
+ dev->hard_header_len = LL_HEADER_LENGTH + 2;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 100;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ SET_MODULE_OWNER(dev);
+ return dev;
+}
+
+static ssize_t
+ctc_proto_show(struct device *dev, char *buf)
+{
+ struct ctc_priv *priv;
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", priv->protocol);
+}
+
+static ssize_t
+ctc_proto_store(struct device *dev, const char *buf, size_t count)
+{
+ struct ctc_priv *priv;
+ int value;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
+
+ priv = dev->driver_data;
+ if (!priv)
+ return -ENODEV;
+ sscanf(buf, "%u", &value);
+ if ((value < 0) || (value > CTC_PROTO_MAX))
+ return -EINVAL;
+ priv->protocol = value;
+
+ return count;
+}
+
+static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
+
+static ssize_t
+ctc_type_show(struct device *dev, char *buf)
+{
+ struct ccwgroup_device *cgdev;
+
+ cgdev = to_ccwgroupdev(dev);
+ if (!cgdev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
+
+static struct attribute *ctc_attr[] = {
+ &dev_attr_protocol.attr,
+ &dev_attr_type.attr,
+ &dev_attr_buffer.attr,
+ NULL,
+};
+
+static struct attribute_group ctc_attr_group = {
+ .attrs = ctc_attr,
+};
+
+static int
+ctc_add_files(struct device *dev)
+{
+ pr_debug("%s() called\n", __FUNCTION__);
+
+ return sysfs_create_group(&dev->kobj, &ctc_attr_group);
+}
+
+static void
+ctc_remove_files(struct device *dev)
+{
+ pr_debug("%s() called\n", __FUNCTION__);
+
+ sysfs_remove_group(&dev->kobj, &ctc_attr_group);
+}
+
+/**
+ * Add ctc specific attributes.
+ * Add ctc private data.
+ *
+ * @param cgdev pointer to ccwgroup_device just added
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+
+static int
+ctc_probe_device(struct ccwgroup_device *cgdev)
+{
+ struct ctc_priv *priv;
+ int rc;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
+
+ if (!get_device(&cgdev->dev))
+ return -ENODEV;
+
+ priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
+ if (!priv) {
+ ctc_pr_err("%s: Out of memory\n", __func__);
+ put_device(&cgdev->dev);
+ return -ENOMEM;
+ }
+
+ memset(priv, 0, sizeof (struct ctc_priv));
+ rc = ctc_add_files(&cgdev->dev);
+ if (rc) {
+ kfree(priv);
+ put_device(&cgdev->dev);
+ return rc;
+ }
+ priv->buffer_size = CTC_BUFSIZE_DEFAULT;
+ cgdev->cdev[0]->handler = ctc_irq_handler;
+ cgdev->cdev[1]->handler = ctc_irq_handler;
+ cgdev->dev.driver_data = priv;
+
+ return 0;
+}
+
+/**
+ *
+ * Setup an interface.
+ *
+ * @param cgdev Device to be setup.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+ctc_new_device(struct ccwgroup_device *cgdev)
+{
+ char read_id[CTC_ID_SIZE];
+ char write_id[CTC_ID_SIZE];
+ int direction;
+ enum channel_types type;
+ struct ctc_priv *privptr;
+ struct net_device *dev;
+ int ret;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
+
+ privptr = cgdev->dev.driver_data;
+ if (!privptr)
+ return -ENODEV;
+
+ type = get_channel_type(&cgdev->cdev[0]->id);
+
+ snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
+ snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
+
+ if (add_channel(cgdev->cdev[0], type))
+ return -ENOMEM;
+ if (add_channel(cgdev->cdev[1], type))
+ return -ENOMEM;
+
+ ret = ccw_device_set_online(cgdev->cdev[0]);
+ if (ret != 0) {
+ printk(KERN_WARNING
+ "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
+ }
+
+ ret = ccw_device_set_online(cgdev->cdev[1]);
+ if (ret != 0) {
+ printk(KERN_WARNING
+ "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
+ }
+
+ dev = ctc_init_netdevice(NULL, 1, privptr);
+
+ if (!dev) {
+ ctc_pr_warn("ctc_init_netdevice failed\n");
+ goto out;
+ }
+
+ if (privptr->protocol == CTC_PROTO_LINUX_TTY)
+ strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
+ else
+ strlcpy(dev->name, "ctc%d", IFNAMSIZ);
+
+ for (direction = READ; direction <= WRITE; direction++) {
+ privptr->channel[direction] =
+ channel_get(type, direction == READ ? read_id : write_id,
+ direction);
+ if (privptr->channel[direction] == NULL) {
+ if (direction == WRITE)
+ channel_free(privptr->channel[READ]);
+
+ ctc_free_netdevice(dev, 1);
+ goto out;
+ }
+ privptr->channel[direction]->netdev = dev;
+ privptr->channel[direction]->protocol = privptr->protocol;
+ privptr->channel[direction]->max_bufsize = privptr->buffer_size;
+ }
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, &cgdev->dev);
+
+ if (ctc_netdev_register(dev) != 0) {
+ ctc_free_netdevice(dev, 1);
+ goto out;
+ }
+
+ ctc_add_attributes(&cgdev->dev);
+
+ strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
+
+ print_banner();
+
+ ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
+ dev->name, privptr->channel[READ]->id,
+ privptr->channel[WRITE]->id, privptr->protocol);
+
+ return 0;
+out:
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+
+ return -ENODEV;
+}
+
+/**
+ * Shutdown an interface.
+ *
+ * @param cgdev Device to be shut down.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+ctc_shutdown_device(struct ccwgroup_device *cgdev)
+{
+ struct ctc_priv *priv;
+ struct net_device *ndev;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
+
+ priv = cgdev->dev.driver_data;
+ ndev = NULL;
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->channel[READ]) {
+ ndev = priv->channel[READ]->netdev;
+
+ /* Close the device */
+ ctc_close(ndev);
+ ndev->flags &=~IFF_RUNNING;
+
+ ctc_remove_attributes(&cgdev->dev);
+
+ channel_free(priv->channel[READ]);
+ }
+ if (priv->channel[WRITE])
+ channel_free(priv->channel[WRITE]);
+
+ if (ndev) {
+ ctc_netdev_unregister(ndev);
+ ndev->priv = NULL;
+ ctc_free_netdevice(ndev, 1);
+ }
+
+ if (priv->fsm)
+ kfree_fsm(priv->fsm);
+
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+
+ if (priv->channel[READ])
+ channel_remove(priv->channel[READ]);
+ if (priv->channel[WRITE])
+ channel_remove(priv->channel[WRITE]);
+
+ priv->channel[READ] = priv->channel[WRITE] = NULL;
+
+ return 0;
+
+}
+
+static void
+ctc_remove_device(struct ccwgroup_device *cgdev)
+{
+ struct ctc_priv *priv;
+
+ pr_debug("%s() called\n", __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
+
+ priv = cgdev->dev.driver_data;
+ if (!priv)
+ return;
+ if (cgdev->state == CCWGROUP_ONLINE)
+ ctc_shutdown_device(cgdev);
+ ctc_remove_files(&cgdev->dev);
+ cgdev->dev.driver_data = NULL;
+ kfree(priv);
+ put_device(&cgdev->dev);
+}
+
+static struct ccwgroup_driver ctc_group_driver = {
+ .owner = THIS_MODULE,
+ .name = "ctc",
+ .max_slaves = 2,
+ .driver_id = 0xC3E3C3,
+ .probe = ctc_probe_device,
+ .remove = ctc_remove_device,
+ .set_online = ctc_new_device,
+ .set_offline = ctc_shutdown_device,
+};
+
+/**
+ * Module related routines
+ *****************************************************************************/
+
+/**
+ * Prepare to be unloaded. Free IRQ's and release all resources.
+ * This is called just before this module is unloaded. It is
+ * <em>not</em> called, if the usage count is !0, so we don't need to check
+ * for that.
+ */
+static void __exit
+ctc_exit(void)
+{
+ unregister_cu3088_discipline(&ctc_group_driver);
+ ctc_tty_cleanup();
+ ctc_unregister_dbf_views();
+ ctc_pr_info("CTC driver unloaded\n");
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int __init
+ctc_init(void)
+{
+ int ret = 0;
+
+ print_banner();
+
+ ret = ctc_register_dbf_views();
+ if (ret){
+ ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
+ return ret;
+ }
+ ctc_tty_init();
+ ret = register_cu3088_discipline(&ctc_group_driver);
+ if (ret) {
+ ctc_tty_cleanup();
+ ctc_unregister_dbf_views();
+ }
+ return ret;
+}
+
+module_init(ctc_init);
+module_exit(ctc_exit);
+
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
new file mode 100644
index 000000000000..9257d60c7833
--- /dev/null
+++ b/drivers/s390/net/ctctty.c
@@ -0,0 +1,1276 @@
+/*
+ * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
+ *
+ * CTC / ESCON network driver, tty interface.
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/serial_reg.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <linux/devfs_fs_kernel.h>
+#include "ctctty.h"
+#include "ctcdbug.h"
+
+#define CTC_TTY_MAJOR 43
+#define CTC_TTY_MAX_DEVICES 64
+
+#define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
+#define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */
+#define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */
+#define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */
+#define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
+#define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
+#define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */
+#define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */
+#define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */
+#define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */
+#define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */
+#define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
+
+/* Private data (similar to async_struct in <linux/serial.h>) */
+typedef struct {
+ int magic;
+ int flags; /* defined in tty.h */
+ int mcr; /* Modem control register */
+ int msr; /* Modem status register */
+ int lsr; /* Line status register */
+ int line;
+ int count; /* # of fd on device */
+ int blocked_open; /* # of blocked opens */
+ struct net_device *netdev;
+ struct sk_buff_head tx_queue; /* transmit queue */
+ struct sk_buff_head rx_queue; /* receive queue */
+ struct tty_struct *tty; /* Pointer to corresponding tty */
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+ struct semaphore write_sem;
+ struct tasklet_struct tasklet;
+ struct timer_list stoptimer;
+} ctc_tty_info;
+
+/* Description of one CTC-tty */
+typedef struct {
+ struct tty_driver *ctc_tty_device; /* tty-device */
+ ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */
+} ctc_tty_driver;
+
+static ctc_tty_driver *driver;
+
+/* Leave this unchanged unless you know what you do! */
+#define MODEM_PARANOIA_CHECK
+#define MODEM_DO_RESTART
+
+#define CTC_TTY_NAME "ctctty"
+
+static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
+static int ctc_tty_shuttingdown = 0;
+
+static spinlock_t ctc_tty_lock;
+
+/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb()
+ * to stuff incoming data directly into a tty's flip-buffer. If the
+ * flip buffer is full, the packet gets queued up.
+ *
+ * Return:
+ * 1 = Success
+ * 0 = Failure, data has to be buffered and later processed by
+ * ctc_tty_readmodem().
+ */
+static int
+ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
+{
+ int c;
+ int len;
+ struct tty_struct *tty;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if ((tty = info->tty)) {
+ if (info->mcr & UART_MCR_RTS) {
+ c = TTY_FLIPBUF_SIZE - tty->flip.count;
+ len = skb->len;
+ if (c >= len) {
+ memcpy(tty->flip.char_buf_ptr, skb->data, len);
+ memset(tty->flip.flag_buf_ptr, 0, len);
+ tty->flip.count += len;
+ tty->flip.char_buf_ptr += len;
+ tty->flip.flag_buf_ptr += len;
+ tty_flip_buffer_push(tty);
+ kfree_skb(skb);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* ctc_tty_readmodem() is called periodically from within timer-interrupt.
+ * It tries getting received data from the receive queue an stuff it into
+ * the tty's flip-buffer.
+ */
+static int
+ctc_tty_readmodem(ctc_tty_info *info)
+{
+ int ret = 1;
+ struct tty_struct *tty;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if ((tty = info->tty)) {
+ if (info->mcr & UART_MCR_RTS) {
+ int c = TTY_FLIPBUF_SIZE - tty->flip.count;
+ struct sk_buff *skb;
+
+ if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) {
+ int len = skb->len;
+ if (len > c)
+ len = c;
+ memcpy(tty->flip.char_buf_ptr, skb->data, len);
+ skb_pull(skb, len);
+ memset(tty->flip.flag_buf_ptr, 0, len);
+ tty->flip.count += len;
+ tty->flip.char_buf_ptr += len;
+ tty->flip.flag_buf_ptr += len;
+ tty_flip_buffer_push(tty);
+ if (skb->len > 0)
+ skb_queue_head(&info->rx_queue, skb);
+ else {
+ kfree_skb(skb);
+ ret = skb_queue_len(&info->rx_queue);
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+void
+ctc_tty_setcarrier(struct net_device *netdev, int on)
+{
+ int i;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if ((!driver) || ctc_tty_shuttingdown)
+ return;
+ for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
+ if (driver->info[i].netdev == netdev) {
+ ctc_tty_info *info = &driver->info[i];
+ if (on)
+ info->msr |= UART_MSR_DCD;
+ else
+ info->msr &= ~UART_MSR_DCD;
+ if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on))
+ tty_hangup(info->tty);
+ }
+}
+
+void
+ctc_tty_netif_rx(struct sk_buff *skb)
+{
+ int i;
+ ctc_tty_info *info = NULL;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (!skb)
+ return;
+ if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
+ if (driver->info[i].netdev == skb->dev) {
+ info = &driver->info[i];
+ break;
+ }
+ if (!info) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ if (skb->len < 6) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ skb_pull(skb, sizeof(__u32));
+
+ i = *((int *)skb->data);
+ skb_pull(skb, sizeof(info->mcr));
+ if (i & UART_MCR_RTS) {
+ info->msr |= UART_MSR_CTS;
+ if (info->flags & CTC_ASYNC_CTS_FLOW)
+ info->tty->hw_stopped = 0;
+ } else {
+ info->msr &= ~UART_MSR_CTS;
+ if (info->flags & CTC_ASYNC_CTS_FLOW)
+ info->tty->hw_stopped = 1;
+ }
+ if (i & UART_MCR_DTR)
+ info->msr |= UART_MSR_DSR;
+ else
+ info->msr &= ~UART_MSR_DSR;
+ if (skb->len <= 0) {
+ kfree_skb(skb);
+ return;
+ }
+ /* Try to deliver directly via tty-flip-buf if queue is empty */
+ if (skb_queue_empty(&info->rx_queue))
+ if (ctc_tty_try_read(info, skb))
+ return;
+ /* Direct deliver failed or queue wasn't empty.
+ * Queue up for later dequeueing via timer-irq.
+ */
+ skb_queue_tail(&info->rx_queue, skb);
+ /* Schedule dequeuing */
+ tasklet_schedule(&info->tasklet);
+}
+
+static int
+ctc_tty_tint(ctc_tty_info * info)
+{
+ struct sk_buff *skb = skb_dequeue(&info->tx_queue);
+ int stopped = (info->tty->hw_stopped || info->tty->stopped);
+ int wake = 1;
+ int rc;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (!info->netdev) {
+ if (skb)
+ kfree_skb(skb);
+ return 0;
+ }
+ if (info->flags & CTC_ASYNC_TX_LINESTAT) {
+ int skb_res = info->netdev->hard_header_len +
+ sizeof(info->mcr) + sizeof(__u32);
+ /* If we must update line status,
+ * create an empty dummy skb and insert it.
+ */
+ if (skb)
+ skb_queue_head(&info->tx_queue, skb);
+
+ skb = dev_alloc_skb(skb_res);
+ if (!skb) {
+ printk(KERN_WARNING
+ "ctc_tty: Out of memory in %s%d tint\n",
+ CTC_TTY_NAME, info->line);
+ return 1;
+ }
+ skb_reserve(skb, skb_res);
+ stopped = 0;
+ wake = 0;
+ }
+ if (!skb)
+ return 0;
+ if (stopped) {
+ skb_queue_head(&info->tx_queue, skb);
+ return 1;
+ }
+#if 0
+ if (skb->len > 0)
+ printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
+ else
+ printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
+#endif
+ memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
+ memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
+ rc = info->netdev->hard_start_xmit(skb, info->netdev);
+ if (rc) {
+ skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
+ if (skb->len > 0)
+ skb_queue_head(&info->tx_queue, skb);
+ else
+ kfree_skb(skb);
+ } else {
+ struct tty_struct *tty = info->tty;
+
+ info->flags &= ~CTC_ASYNC_TX_LINESTAT;
+ if (tty) {
+ tty_wakeup(tty);
+ }
+ }
+ return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
+}
+
+/************************************************************
+ *
+ * Modem-functions
+ *
+ * mostly "stolen" from original Linux-serial.c and friends.
+ *
+ ************************************************************/
+
+static inline int
+ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine)
+{
+#ifdef MODEM_PARANOIA_CHECK
+ if (!info) {
+ printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n",
+ name, routine);
+ return 1;
+ }
+ if (info->magic != CTC_ASYNC_MAGIC) {
+ printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n",
+ name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static void
+ctc_tty_inject(ctc_tty_info *info, char c)
+{
+ int skb_res;
+ struct sk_buff *skb;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_shuttingdown)
+ return;
+ skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
+ sizeof(__u32) + 1;
+ skb = dev_alloc_skb(skb_res);
+ if (!skb) {
+ printk(KERN_WARNING
+ "ctc_tty: Out of memory in %s%d tx_inject\n",
+ CTC_TTY_NAME, info->line);
+ return;
+ }
+ skb_reserve(skb, skb_res);
+ *(skb_put(skb, 1)) = c;
+ skb_queue_head(&info->tx_queue, skb);
+ tasklet_schedule(&info->tasklet);
+}
+
+static void
+ctc_tty_transmit_status(ctc_tty_info *info)
+{
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (ctc_tty_shuttingdown)
+ return;
+ info->flags |= CTC_ASYNC_TX_LINESTAT;
+ tasklet_schedule(&info->tasklet);
+}
+
+static void
+ctc_tty_change_speed(ctc_tty_info * info)
+{
+ unsigned int cflag;
+ unsigned int quot;
+ int i;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if (!info->tty || !info->tty->termios)
+ return;
+ cflag = info->tty->termios->c_cflag;
+
+ quot = i = cflag & CBAUD;
+ if (i & CBAUDEX) {
+ i &= ~CBAUDEX;
+ if (i < 1 || i > 2)
+ info->tty->termios->c_cflag &= ~CBAUDEX;
+ else
+ i += 15;
+ }
+ if (quot) {
+ info->mcr |= UART_MCR_DTR;
+ info->mcr |= UART_MCR_RTS;
+ ctc_tty_transmit_status(info);
+ } else {
+ info->mcr &= ~UART_MCR_DTR;
+ info->mcr &= ~UART_MCR_RTS;
+ ctc_tty_transmit_status(info);
+ return;
+ }
+
+ /* CTS flow control flag and modem status interrupts */
+ if (cflag & CRTSCTS) {
+ info->flags |= CTC_ASYNC_CTS_FLOW;
+ } else
+ info->flags &= ~CTC_ASYNC_CTS_FLOW;
+ if (cflag & CLOCAL)
+ info->flags &= ~CTC_ASYNC_CHECK_CD;
+ else {
+ info->flags |= CTC_ASYNC_CHECK_CD;
+ }
+}
+
+static int
+ctc_tty_startup(ctc_tty_info * info)
+{
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if (info->flags & CTC_ASYNC_INITIALIZED)
+ return 0;
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line);
+#endif
+ /*
+ * Now, initialize the UART
+ */
+ info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ /*
+ * and set the speed of the serial port
+ */
+ ctc_tty_change_speed(info);
+
+ info->flags |= CTC_ASYNC_INITIALIZED;
+ if (!(info->flags & CTC_ASYNC_NETDEV_OPEN))
+ info->netdev->open(info->netdev);
+ info->flags |= CTC_ASYNC_NETDEV_OPEN;
+ return 0;
+}
+
+static void
+ctc_tty_stopdev(unsigned long data)
+{
+ ctc_tty_info *info = (ctc_tty_info *)data;
+
+ if ((!info) || (!info->netdev) ||
+ (info->flags & CTC_ASYNC_INITIALIZED))
+ return;
+ info->netdev->stop(info->netdev);
+ info->flags &= ~CTC_ASYNC_NETDEV_OPEN;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void
+ctc_tty_shutdown(ctc_tty_info * info)
+{
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if (!(info->flags & CTC_ASYNC_INITIALIZED))
+ return;
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line);
+#endif
+ info->msr &= ~UART_MSR_RI;
+ if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
+ info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+ mod_timer(&info->stoptimer, jiffies + (10 * HZ));
+ skb_queue_purge(&info->tx_queue);
+ skb_queue_purge(&info->rx_queue);
+ info->flags &= ~CTC_ASYNC_INITIALIZED;
+}
+
+/* ctc_tty_write() is the main send-routine. It is called from the upper
+ * levels within the kernel to perform sending data. Depending on the
+ * online-flag it either directs output to the at-command-interpreter or
+ * to the lower level. Additional tasks done here:
+ * - If online, check for escape-sequence (+++)
+ * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes.
+ * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed.
+ * - If dialing, abort dial.
+ */
+static int
+ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
+{
+ int c;
+ int total = 0;
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ DBF_TEXT(trace, 5, __FUNCTION__);
+ if (ctc_tty_shuttingdown)
+ goto ex;
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
+ goto ex;
+ if (!tty)
+ goto ex;
+ if (!info->netdev) {
+ total = -ENODEV;
+ goto ex;
+ }
+ while (1) {
+ struct sk_buff *skb;
+ int skb_res;
+
+ c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE;
+ if (c <= 0)
+ break;
+
+ skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
+ + sizeof(__u32);
+ skb = dev_alloc_skb(skb_res + c);
+ if (!skb) {
+ printk(KERN_WARNING
+ "ctc_tty: Out of memory in %s%d write\n",
+ CTC_TTY_NAME, info->line);
+ break;
+ }
+ skb_reserve(skb, skb_res);
+ memcpy(skb_put(skb, c), buf, c);
+ skb_queue_tail(&info->tx_queue, skb);
+ buf += c;
+ total += c;
+ count -= c;
+ }
+ if (skb_queue_len(&info->tx_queue)) {
+ info->lsr &= ~UART_LSR_TEMT;
+ tasklet_schedule(&info->tasklet);
+ }
+ex:
+ DBF_TEXT(trace, 6, __FUNCTION__);
+ return total;
+}
+
+static int
+ctc_tty_write_room(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room"))
+ return 0;
+ return CTC_TTY_XMIT_SIZE;
+}
+
+static int
+ctc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer"))
+ return 0;
+ return 0;
+}
+
+static void
+ctc_tty_flush_buffer(struct tty_struct *tty)
+{
+ ctc_tty_info *info;
+ unsigned long flags;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (!tty)
+ goto ex;
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ info = (ctc_tty_info *) tty->driver_data;
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ goto ex;
+ }
+ skb_queue_purge(&info->tx_queue);
+ info->lsr |= UART_LSR_TEMT;
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+ex:
+ DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
+ return;
+}
+
+static void
+ctc_tty_flush_chars(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_shuttingdown)
+ return;
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
+ return;
+ if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue)))
+ return;
+ tasklet_schedule(&info->tasklet);
+}
+
+/*
+ * ------------------------------------------------------------
+ * ctc_tty_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void
+ctc_tty_throttle(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
+ return;
+ info->mcr &= ~UART_MCR_RTS;
+ if (I_IXOFF(tty))
+ ctc_tty_inject(info, STOP_CHAR(tty));
+ ctc_tty_transmit_status(info);
+}
+
+static void
+ctc_tty_unthrottle(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
+ return;
+ info->mcr |= UART_MCR_RTS;
+ if (I_IXOFF(tty))
+ ctc_tty_inject(info, START_CHAR(tty));
+ ctc_tty_transmit_status(info);
+}
+
+/*
+ * ------------------------------------------------------------
+ * ctc_tty_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+/*
+ * ctc_tty_get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows RS485 driver to be written in user space.
+ */
+static int
+ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value)
+{
+ u_char status;
+ uint result;
+ ulong flags;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ status = info->lsr;
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
+ put_user(result, value);
+ return 0;
+}
+
+
+static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ u_char control,
+ status;
+ uint result;
+ ulong flags;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ control = info->mcr;
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ status = info->msr;
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
+ | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
+ | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
+ | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
+ | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
+ | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+ return result;
+}
+
+static int
+ctc_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ if (set & TIOCM_RTS)
+ info->mcr |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ info->mcr |= UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ info->mcr &= ~UART_MCR_RTS;
+ if (clear & TIOCM_DTR)
+ info->mcr &= ~UART_MCR_DTR;
+
+ if ((set | clear) & (TIOCM_RTS|TIOCM_DTR))
+ ctc_tty_transmit_status(info);
+ return 0;
+}
+
+static int
+ctc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ uint cmd, ulong arg)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ int error;
+ int retval;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ switch (cmd) {
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line);
+#endif
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line);
+#endif
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ return 0;
+ case TIOCGSOFTCAR:
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME,
+ info->line);
+#endif
+ error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
+ return error;
+ case TIOCSSOFTCAR:
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME,
+ info->line);
+#endif
+ error = get_user(arg, (ulong __user *) arg);
+ if (error)
+ return error;
+ tty->termios->c_cflag =
+ ((tty->termios->c_cflag & ~CLOCAL) |
+ (arg ? CLOCAL : 0));
+ return 0;
+ case TIOCSERGETLSR: /* Get line status register */
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME,
+ info->line);
+#endif
+ if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint)))
+ return ctc_tty_get_lsr_info(info, (uint __user *) arg);
+ else
+ return -EFAULT;
+ default:
+#ifdef CTC_DEBUG_MODEM_IOCTL
+ printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd,
+ CTC_TTY_NAME, info->line);
+#endif
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static void
+ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ unsigned int cflag = tty->termios->c_cflag;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ ctc_tty_change_speed(info);
+
+ /* Handle transition to B0 */
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
+ info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS);
+ ctc_tty_transmit_status(info);
+ }
+
+ /* Handle transition from B0 to other */
+ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ info->mcr |= UART_MCR_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->mcr |= UART_MCR_RTS;
+ }
+ ctc_tty_transmit_status(info);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS))
+ tty->hw_stopped = 0;
+}
+
+/*
+ * ------------------------------------------------------------
+ * ctc_tty_open() and friends
+ * ------------------------------------------------------------
+ */
+static int
+ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info)
+{
+ DECLARE_WAITQUEUE(wait, NULL);
+ int do_clocal = 0;
+ unsigned long flags;
+ int retval;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & CTC_ASYNC_CLOSING)) {
+ if (info->flags & CTC_ASYNC_CLOSING)
+ wait_event(info->close_wait,
+ !(info->flags & CTC_ASYNC_CLOSING));
+#ifdef MODEM_DO_RESTART
+ if (info->flags & CTC_ASYNC_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+#else
+ return -EAGAIN;
+#endif
+ }
+ /*
+ * If non-blocking mode is set, then make the check up front
+ * and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, info->count is dropped by one, so that
+ * ctc_tty_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n",
+ CTC_TTY_NAME, info->line, info->count);
+#endif
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ if (!(tty_hung_up_p(filp)))
+ info->count--;
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ info->blocked_open++;
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & CTC_ASYNC_INITIALIZED)) {
+#ifdef MODEM_DO_RESTART
+ if (info->flags & CTC_ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & CTC_ASYNC_CLOSING) &&
+ (do_clocal || (info->msr & UART_MSR_DCD))) {
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n",
+ CTC_TTY_NAME, info->line, info->count);
+#endif
+ schedule();
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&info->open_wait, &wait);
+ if (!tty_hung_up_p(filp))
+ info->count++;
+ info->blocked_open--;
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n",
+ CTC_TTY_NAME, info->line, info->count);
+#endif
+ if (retval)
+ return retval;
+ info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
+ return 0;
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+static int
+ctc_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ ctc_tty_info *info;
+ unsigned long saveflags;
+ int retval,
+ line;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ line = tty->index;
+ if (line < 0 || line > CTC_TTY_MAX_DEVICES)
+ return -ENODEV;
+ info = &driver->info[line];
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open"))
+ return -ENODEV;
+ if (!info->netdev)
+ return -ENODEV;
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name,
+ info->count);
+#endif
+ spin_lock_irqsave(&ctc_tty_lock, saveflags);
+ info->count++;
+ tty->driver_data = info;
+ info->tty = tty;
+ spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
+ /*
+ * Start up serial port
+ */
+ retval = ctc_tty_startup(info);
+ if (retval) {
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_open return after startup\n");
+#endif
+ return retval;
+ }
+ retval = ctc_tty_block_til_ready(tty, filp, info);
+ if (retval) {
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n");
+#endif
+ return retval;
+ }
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name);
+#endif
+ return 0;
+}
+
+static void
+ctc_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ ulong flags;
+ ulong timeout;
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
+ return;
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ if (tty_hung_up_p(filp)) {
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n");
+#endif
+ return;
+ }
+ if ((tty->count == 1) && (info->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. Info->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, "
+ "info->count is %d\n", info->count);
+ info->count = 1;
+ }
+ if (--info->count < 0) {
+ printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n",
+ CTC_TTY_NAME, info->line, info->count);
+ info->count = 0;
+ }
+ if (info->count) {
+ local_irq_restore(flags);
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n");
+#endif
+ return;
+ }
+ info->flags |= CTC_ASYNC_CLOSING;
+ tty->closing = 1;
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+ if (info->flags & CTC_ASYNC_INITIALIZED) {
+ tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ timeout = jiffies + HZ;
+ while (!(info->lsr & UART_LSR_TEMT)) {
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+ msleep(500);
+ spin_lock_irqsave(&ctc_tty_lock, flags);
+ if (time_after(jiffies,timeout))
+ break;
+ }
+ }
+ ctc_tty_shutdown(info);
+ if (tty->driver->flush_buffer) {
+ skb_queue_purge(&info->tx_queue);
+ info->lsr |= UART_LSR_TEMT;
+ }
+ tty_ldisc_flush(tty);
+ info->tty = 0;
+ tty->closing = 0;
+ if (info->blocked_open) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ/2);
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ spin_unlock_irqrestore(&ctc_tty_lock, flags);
+#ifdef CTC_DEBUG_MODEM_OPEN
+ printk(KERN_DEBUG "ctc_tty_close normal exit\n");
+#endif
+}
+
+/*
+ * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void
+ctc_tty_hangup(struct tty_struct *tty)
+{
+ ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
+ unsigned long saveflags;
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
+ return;
+ ctc_tty_shutdown(info);
+ info->count = 0;
+ info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE;
+ spin_lock_irqsave(&ctc_tty_lock, saveflags);
+ info->tty = 0;
+ spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
+ wake_up_interruptible(&info->open_wait);
+}
+
+
+/*
+ * For all online tty's, try sending data to
+ * the lower levels.
+ */
+static void
+ctc_tty_task(unsigned long arg)
+{
+ ctc_tty_info *info = (void *)arg;
+ unsigned long saveflags;
+ int again;
+
+ DBF_TEXT(trace, 3, __FUNCTION__);
+ spin_lock_irqsave(&ctc_tty_lock, saveflags);
+ if ((!ctc_tty_shuttingdown) && info) {
+ again = ctc_tty_tint(info);
+ if (!again)
+ info->lsr |= UART_LSR_TEMT;
+ again |= ctc_tty_readmodem(info);
+ if (again) {
+ tasklet_schedule(&info->tasklet);
+ }
+ }
+ spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
+}
+
+static struct tty_operations ctc_ops = {
+ .open = ctc_tty_open,
+ .close = ctc_tty_close,
+ .write = ctc_tty_write,
+ .flush_chars = ctc_tty_flush_chars,
+ .write_room = ctc_tty_write_room,
+ .chars_in_buffer = ctc_tty_chars_in_buffer,
+ .flush_buffer = ctc_tty_flush_buffer,
+ .ioctl = ctc_tty_ioctl,
+ .throttle = ctc_tty_throttle,
+ .unthrottle = ctc_tty_unthrottle,
+ .set_termios = ctc_tty_set_termios,
+ .hangup = ctc_tty_hangup,
+ .tiocmget = ctc_tty_tiocmget,
+ .tiocmset = ctc_tty_tiocmset,
+};
+
+int
+ctc_tty_init(void)
+{
+ int i;
+ ctc_tty_info *info;
+ struct tty_driver *device;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
+ if (driver == NULL) {
+ printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
+ return -ENOMEM;
+ }
+ memset(driver, 0, sizeof(ctc_tty_driver));
+ device = alloc_tty_driver(CTC_TTY_MAX_DEVICES);
+ if (!device) {
+ kfree(driver);
+ printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
+ return -ENOMEM;
+ }
+
+ device->devfs_name = "ctc/" CTC_TTY_NAME;
+ device->name = CTC_TTY_NAME;
+ device->major = CTC_TTY_MAJOR;
+ device->minor_start = 0;
+ device->type = TTY_DRIVER_TYPE_SERIAL;
+ device->subtype = SERIAL_TYPE_NORMAL;
+ device->init_termios = tty_std_termios;
+ device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ device->flags = TTY_DRIVER_REAL_RAW;
+ device->driver_name = "ctc_tty",
+ tty_set_operations(device, &ctc_ops);
+ if (tty_register_driver(device)) {
+ printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n");
+ put_tty_driver(device);
+ kfree(driver);
+ return -1;
+ }
+ driver->ctc_tty_device = device;
+ for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) {
+ info = &driver->info[i];
+ init_MUTEX(&info->write_sem);
+ tasklet_init(&info->tasklet, ctc_tty_task,
+ (unsigned long) info);
+ info->magic = CTC_ASYNC_MAGIC;
+ info->line = i;
+ info->tty = 0;
+ info->count = 0;
+ info->blocked_open = 0;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ skb_queue_head_init(&info->tx_queue);
+ skb_queue_head_init(&info->rx_queue);
+ init_timer(&info->stoptimer);
+ info->stoptimer.function = ctc_tty_stopdev;
+ info->stoptimer.data = (unsigned long)info;
+ info->mcr = UART_MCR_RTS;
+ }
+ return 0;
+}
+
+int
+ctc_tty_register_netdev(struct net_device *dev) {
+ int ttynum;
+ char *err;
+ char *p;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ if ((!dev) || (!dev->name)) {
+ printk(KERN_WARNING
+ "ctc_tty_register_netdev called "
+ "with NULL dev or NULL dev-name\n");
+ return -1;
+ }
+
+ /*
+ * If the name is a format string the caller wants us to
+ * do a name allocation : format string must end with %d
+ */
+ if (strchr(dev->name, '%'))
+ {
+ int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
+ if (err < 0) {
+ printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
+ return err;
+ }
+
+ }
+
+ for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
+ ttynum = simple_strtoul(p, &err, 0);
+ if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
+ (err && *err)) {
+ printk(KERN_WARNING
+ "ctc_tty_register_netdev called "
+ "with number in name '%s'\n", dev->name);
+ return -1;
+ }
+ if (driver->info[ttynum].netdev) {
+ printk(KERN_WARNING
+ "ctc_tty_register_netdev called "
+ "for already registered device '%s'\n",
+ dev->name);
+ return -1;
+ }
+ driver->info[ttynum].netdev = dev;
+ return 0;
+}
+
+void
+ctc_tty_unregister_netdev(struct net_device *dev) {
+ int i;
+ unsigned long saveflags;
+ ctc_tty_info *info = NULL;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ spin_lock_irqsave(&ctc_tty_lock, saveflags);
+ for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
+ if (driver->info[i].netdev == dev) {
+ info = &driver->info[i];
+ break;
+ }
+ if (info) {
+ info->netdev = NULL;
+ skb_queue_purge(&info->tx_queue);
+ skb_queue_purge(&info->rx_queue);
+ }
+ spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
+}
+
+void
+ctc_tty_cleanup(void) {
+ unsigned long saveflags;
+
+ DBF_TEXT(trace, 2, __FUNCTION__);
+ spin_lock_irqsave(&ctc_tty_lock, saveflags);
+ ctc_tty_shuttingdown = 1;
+ spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
+ tty_unregister_driver(driver->ctc_tty_device);
+ put_tty_driver(driver->ctc_tty_device);
+ kfree(driver);
+ driver = NULL;
+}
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h
new file mode 100644
index 000000000000..84b2f8f23ab3
--- /dev/null
+++ b/drivers/s390/net/ctctty.h
@@ -0,0 +1,37 @@
+/*
+ * $Id: ctctty.h,v 1.4 2003/09/18 08:01:10 mschwide Exp $
+ *
+ * CTC / ESCON network driver, tty interface.
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _CTCTTY_H_
+#define _CTCTTY_H_
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+extern int ctc_tty_register_netdev(struct net_device *);
+extern void ctc_tty_unregister_netdev(struct net_device *);
+extern void ctc_tty_netif_rx(struct sk_buff *);
+extern int ctc_tty_init(void);
+extern void ctc_tty_cleanup(void);
+extern void ctc_tty_setcarrier(struct net_device *, int);
+
+#endif
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
new file mode 100644
index 000000000000..1b0a9f16024c
--- /dev/null
+++ b/drivers/s390/net/cu3088.c
@@ -0,0 +1,166 @@
+/*
+ * $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $
+ *
+ * CTC / LCS ccw_device driver
+ *
+ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Arnd Bergmann <arndb@de.ibm.com>
+ * Cornelia Huck <cohuck@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include "cu3088.h"
+
+const char *cu3088_type[] = {
+ "not a channel",
+ "CTC/A",
+ "ESCON channel",
+ "FICON channel",
+ "P390 LCS card",
+ "OSA LCS card",
+ "unknown channel type",
+ "unsupported channel type",
+};
+
+/* static definitions */
+
+static struct ccw_device_id cu3088_ids[] = {
+ { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
+ { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
+ { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
+ { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
+ { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
+ { /* end of list */ }
+};
+
+static struct ccw_driver cu3088_driver;
+
+struct device *cu3088_root_dev;
+
+static ssize_t
+group_write(struct device_driver *drv, const char *buf, size_t count)
+{
+ const char *start, *end;
+ char bus_ids[2][BUS_ID_SIZE], *argv[2];
+ int i;
+ int ret;
+ struct ccwgroup_driver *cdrv;
+
+ cdrv = to_ccwgroupdrv(drv);
+ if (!cdrv)
+ return -EINVAL;
+ start = buf;
+ for (i=0; i<2; i++) {
+ static const char delim[] = {',', '\n'};
+ int len;
+
+ if (!(end = strchr(start, delim[i])))
+ return count;
+ len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
+ strlcpy (bus_ids[i], start, len);
+ argv[i] = bus_ids[i];
+ start = end + 1;
+ }
+
+ ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
+ &cu3088_driver, 2, argv);
+
+ return (ret == 0) ? count : ret;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, group_write);
+
+/* Register-unregister for ctc&lcs */
+int
+register_cu3088_discipline(struct ccwgroup_driver *dcp)
+{
+ int rc;
+
+ if (!dcp)
+ return -EINVAL;
+
+ /* Register discipline.*/
+ rc = ccwgroup_driver_register(dcp);
+ if (rc)
+ return rc;
+
+ rc = driver_create_file(&dcp->driver, &driver_attr_group);
+ if (rc)
+ ccwgroup_driver_unregister(dcp);
+
+ return rc;
+
+}
+
+void
+unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
+{
+ if (!dcp)
+ return;
+
+ driver_remove_file(&dcp->driver, &driver_attr_group);
+ ccwgroup_driver_unregister(dcp);
+}
+
+static struct ccw_driver cu3088_driver = {
+ .owner = THIS_MODULE,
+ .ids = cu3088_ids,
+ .name = "cu3088",
+ .probe = ccwgroup_probe_ccwdev,
+ .remove = ccwgroup_remove_ccwdev,
+};
+
+/* module setup */
+static int __init
+cu3088_init (void)
+{
+ int rc;
+
+ cu3088_root_dev = s390_root_dev_register("cu3088");
+ if (IS_ERR(cu3088_root_dev))
+ return PTR_ERR(cu3088_root_dev);
+ rc = ccw_driver_register(&cu3088_driver);
+ if (rc)
+ s390_root_dev_unregister(cu3088_root_dev);
+
+ return rc;
+}
+
+static void __exit
+cu3088_exit (void)
+{
+ ccw_driver_unregister(&cu3088_driver);
+ s390_root_dev_unregister(cu3088_root_dev);
+}
+
+MODULE_DEVICE_TABLE(ccw,cu3088_ids);
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_LICENSE("GPL");
+
+module_init(cu3088_init);
+module_exit(cu3088_exit);
+
+EXPORT_SYMBOL_GPL(cu3088_type);
+EXPORT_SYMBOL_GPL(register_cu3088_discipline);
+EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
new file mode 100644
index 000000000000..0ec49a8b3adc
--- /dev/null
+++ b/drivers/s390/net/cu3088.h
@@ -0,0 +1,41 @@
+#ifndef _CU3088_H
+#define _CU3088_H
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum channel_types {
+ /* Device is not a channel */
+ channel_type_none,
+
+ /* Device is a CTC/A */
+ channel_type_parallel,
+
+ /* Device is a ESCON channel */
+ channel_type_escon,
+
+ /* Device is a FICON channel */
+ channel_type_ficon,
+
+ /* Device is a P390 LCS card */
+ channel_type_p390,
+
+ /* Device is a OSA2 card */
+ channel_type_osa2,
+
+ /* Device is a channel, but we don't know
+ * anything about it */
+ channel_type_unknown,
+
+ /* Device is an unsupported model */
+ channel_type_unsupported,
+
+ /* number of type entries */
+ num_channel_types
+};
+
+extern const char *cu3088_type[num_channel_types];
+extern int register_cu3088_discipline(struct ccwgroup_driver *);
+extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
+
+#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
new file mode 100644
index 000000000000..fa09440d82e5
--- /dev/null
+++ b/drivers/s390/net/fsm.c
@@ -0,0 +1,220 @@
+/**
+ * $Id: fsm.c,v 1.6 2003/10/15 11:37:29 mschwide Exp $
+ *
+ * A generic FSM based on fsm used in isdn4linux
+ *
+ */
+
+#include "fsm.h"
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Finite state machine helper functions");
+MODULE_LICENSE("GPL");
+
+fsm_instance *
+init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
+ int nr_events, const fsm_node *tmpl, int tmpl_len, int order)
+{
+ int i;
+ fsm_instance *this;
+ fsm_function_t *m;
+ fsm *f;
+
+ this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order);
+ if (this == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc instance\n", name);
+ return NULL;
+ }
+ memset(this, 0, sizeof(fsm_instance));
+ strlcpy(this->name, name, sizeof(this->name));
+
+ f = (fsm *)kmalloc(sizeof(fsm), order);
+ if (f == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
+ kfree_fsm(this);
+ return NULL;
+ }
+ memset(f, 0, sizeof(fsm));
+ f->nr_events = nr_events;
+ f->nr_states = nr_states;
+ f->event_names = event_names;
+ f->state_names = state_names;
+ this->f = f;
+
+ m = (fsm_function_t *)kmalloc(
+ sizeof(fsm_function_t) * nr_states * nr_events, order);
+ if (m == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
+ kfree_fsm(this);
+ return NULL;
+ }
+ memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events);
+ f->jumpmatrix = m;
+
+ for (i = 0; i < tmpl_len; i++) {
+ if ((tmpl[i].cond_state >= nr_states) ||
+ (tmpl[i].cond_event >= nr_events) ) {
+ printk(KERN_ERR
+ "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
+ name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
+ (long)tmpl[i].cond_event, (long)f->nr_events);
+ kfree_fsm(this);
+ return NULL;
+ } else
+ m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
+ tmpl[i].function;
+ }
+ return this;
+}
+
+void
+kfree_fsm(fsm_instance *this)
+{
+ if (this) {
+ if (this->f) {
+ if (this->f->jumpmatrix)
+ kfree(this->f->jumpmatrix);
+ kfree(this->f);
+ }
+ kfree(this);
+ } else
+ printk(KERN_WARNING
+ "fsm: kfree_fsm called with NULL argument\n");
+}
+
+#if FSM_DEBUG_HISTORY
+void
+fsm_print_history(fsm_instance *fi)
+{
+ int idx = 0;
+ int i;
+
+ if (fi->history_size >= FSM_HISTORY_SIZE)
+ idx = fi->history_index;
+
+ printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
+ for (i = 0; i < fi->history_size; i++) {
+ int e = fi->history[idx].event;
+ int s = fi->history[idx++].state;
+ idx %= FSM_HISTORY_SIZE;
+ if (e == -1)
+ printk(KERN_DEBUG " S=%s\n",
+ fi->f->state_names[s]);
+ else
+ printk(KERN_DEBUG " S=%s E=%s\n",
+ fi->f->state_names[s],
+ fi->f->event_names[e]);
+ }
+ fi->history_size = fi->history_index = 0;
+}
+
+void
+fsm_record_history(fsm_instance *fi, int state, int event)
+{
+ fi->history[fi->history_index].state = state;
+ fi->history[fi->history_index++].event = event;
+ fi->history_index %= FSM_HISTORY_SIZE;
+ if (fi->history_size < FSM_HISTORY_SIZE)
+ fi->history_size++;
+}
+#endif
+
+const char *
+fsm_getstate_str(fsm_instance *fi)
+{
+ int st = atomic_read(&fi->state);
+ if (st >= fi->f->nr_states)
+ return "Invalid";
+ return fi->f->state_names[st];
+}
+
+static void
+fsm_expire_timer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
+ this->fi->name, this);
+#endif
+ fsm_event(this->fi, this->expire_event, this->event_arg);
+}
+
+void
+fsm_settimer(fsm_instance *fi, fsm_timer *this)
+{
+ this->fi = fi;
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
+ this);
+#endif
+ init_timer(&this->tl);
+}
+
+void
+fsm_deltimer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
+ this);
+#endif
+ del_timer(&this->tl);
+}
+
+int
+fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
+ this->fi->name, this, millisec);
+#endif
+
+ init_timer(&this->tl);
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+ this->expire_event = event;
+ this->event_arg = arg;
+ this->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&this->tl);
+ return 0;
+}
+
+/* FIXME: this function is never used, why */
+void
+fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
+ this->fi->name, this, millisec);
+#endif
+
+ del_timer(&this->tl);
+ init_timer(&this->tl);
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+ this->expire_event = event;
+ this->event_arg = arg;
+ this->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&this->tl);
+}
+
+EXPORT_SYMBOL(init_fsm);
+EXPORT_SYMBOL(kfree_fsm);
+EXPORT_SYMBOL(fsm_settimer);
+EXPORT_SYMBOL(fsm_deltimer);
+EXPORT_SYMBOL(fsm_addtimer);
+EXPORT_SYMBOL(fsm_modtimer);
+EXPORT_SYMBOL(fsm_getstate_str);
+
+#if FSM_DEBUG_HISTORY
+EXPORT_SYMBOL(fsm_print_history);
+EXPORT_SYMBOL(fsm_record_history);
+#endif
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
new file mode 100644
index 000000000000..f9a011001eb6
--- /dev/null
+++ b/drivers/s390/net/fsm.h
@@ -0,0 +1,265 @@
+/* $Id: fsm.h,v 1.1.1.1 2002/03/13 19:33:09 mschwide Exp $
+ */
+#ifndef _FSM_H_
+#define _FSM_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <asm/atomic.h>
+
+/**
+ * Define this to get debugging messages.
+ */
+#define FSM_DEBUG 0
+
+/**
+ * Define this to get debugging massages for
+ * timer handling.
+ */
+#define FSM_TIMER_DEBUG 0
+
+/**
+ * Define these to record a history of
+ * Events/Statechanges and print it if a
+ * action_function is not found.
+ */
+#define FSM_DEBUG_HISTORY 0
+#define FSM_HISTORY_SIZE 40
+
+struct fsm_instance_t;
+
+/**
+ * Definition of an action function, called by a FSM
+ */
+typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
+
+/**
+ * Internal jump table for a FSM
+ */
+typedef struct {
+ fsm_function_t *jumpmatrix;
+ int nr_events;
+ int nr_states;
+ const char **event_names;
+ const char **state_names;
+} fsm;
+
+#if FSM_DEBUG_HISTORY
+/**
+ * Element of State/Event history used for debugging.
+ */
+typedef struct {
+ int state;
+ int event;
+} fsm_history;
+#endif
+
+/**
+ * Representation of a FSM
+ */
+typedef struct fsm_instance_t {
+ fsm *f;
+ atomic_t state;
+ char name[16];
+ void *userdata;
+ int userint;
+#if FSM_DEBUG_HISTORY
+ int history_index;
+ int history_size;
+ fsm_history history[FSM_HISTORY_SIZE];
+#endif
+} fsm_instance;
+
+/**
+ * Description of a state-event combination
+ */
+typedef struct {
+ int cond_state;
+ int cond_event;
+ fsm_function_t function;
+} fsm_node;
+
+/**
+ * Description of a FSM Timer.
+ */
+typedef struct {
+ fsm_instance *fi;
+ struct timer_list tl;
+ int expire_event;
+ void *event_arg;
+} fsm_timer;
+
+/**
+ * Creates an FSM
+ *
+ * @param name Name of this instance for logging purposes.
+ * @param state_names An array of names for all states for logging purposes.
+ * @param event_names An array of names for all events for logging purposes.
+ * @param nr_states Number of states for this instance.
+ * @param nr_events Number of events for this instance.
+ * @param tmpl An array of fsm_nodes, describing this FSM.
+ * @param tmpl_len Length of the describing array.
+ * @param order Parameter for allocation of the FSM data structs.
+ */
+extern fsm_instance *
+init_fsm(char *name, const char **state_names,
+ const char **event_names,
+ int nr_states, int nr_events, const fsm_node *tmpl,
+ int tmpl_len, int order);
+
+/**
+ * Releases an FSM
+ *
+ * @param fi Pointer to an FSM, previously created with init_fsm.
+ */
+extern void kfree_fsm(fsm_instance *fi);
+
+#if FSM_DEBUG_HISTORY
+extern void
+fsm_print_history(fsm_instance *fi);
+
+extern void
+fsm_record_history(fsm_instance *fi, int state, int event);
+#endif
+
+/**
+ * Emits an event to a FSM.
+ * If an action function is defined for the current state/event combination,
+ * this function is called.
+ *
+ * @param fi Pointer to FSM which should receive the event.
+ * @param event The event do be delivered.
+ * @param arg A generic argument, handed to the action function.
+ *
+ * @return 0 on success,
+ * 1 if current state or event is out of range
+ * !0 if state and event in range, but no action defined.
+ */
+extern __inline__ int
+fsm_event(fsm_instance *fi, int event, void *arg)
+{
+ fsm_function_t r;
+ int state = atomic_read(&fi->state);
+
+ if ((state >= fi->f->nr_states) ||
+ (event >= fi->f->nr_events) ) {
+ printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
+ fi->name, (long)state,(long)fi->f->nr_states, event,
+ (long)fi->f->nr_events);
+#if FSM_DEBUG_HISTORY
+ fsm_print_history(fi);
+#endif
+ return 1;
+ }
+ r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
+ if (r) {
+#if FSM_DEBUG
+ printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
+ fi->name, fi->f->state_names[state],
+ fi->f->event_names[event]);
+#endif
+#if FSM_DEBUG_HISTORY
+ fsm_record_history(fi, state, event);
+#endif
+ r(fi, event, arg);
+ return 0;
+ } else {
+#if FSM_DEBUG || FSM_DEBUG_HISTORY
+ printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
+ fi->name, fi->f->event_names[event],
+ fi->f->state_names[state]);
+#endif
+#if FSM_DEBUG_HISTORY
+ fsm_print_history(fi);
+#endif
+ return !0;
+ }
+}
+
+/**
+ * Modifies the state of an FSM.
+ * This does <em>not</em> trigger an event or calls an action function.
+ *
+ * @param fi Pointer to FSM
+ * @param state The new state for this FSM.
+ */
+extern __inline__ void
+fsm_newstate(fsm_instance *fi, int newstate)
+{
+ atomic_set(&fi->state,newstate);
+#if FSM_DEBUG_HISTORY
+ fsm_record_history(fi, newstate, -1);
+#endif
+#if FSM_DEBUG
+ printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
+ fi->f->state_names[newstate]);
+#endif
+}
+
+/**
+ * Retrieves the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM.
+ */
+extern __inline__ int
+fsm_getstate(fsm_instance *fi)
+{
+ return atomic_read(&fi->state);
+}
+
+/**
+ * Retrieves the name of the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM in a human readable form.
+ */
+extern const char *fsm_getstate_str(fsm_instance *fi);
+
+/**
+ * Initializes a timer for an FSM.
+ * This prepares an fsm_timer for usage with fsm_addtimer.
+ *
+ * @param fi Pointer to FSM
+ * @param timer The timer to be initialized.
+ */
+extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
+
+/**
+ * Clears a pending timer of an FSM instance.
+ *
+ * @param timer The timer to clear.
+ */
+extern void fsm_deltimer(fsm_timer *timer);
+
+/**
+ * Adds and starts a timer to an FSM instance.
+ *
+ * @param timer The timer to be added. The field fi of that timer
+ * must have been set to point to the instance.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event Event, to trigger if timer expires.
+ * @param arg Generic argument, provided to expiry function.
+ *
+ * @return 0 on success, -1 if timer is already active.
+ */
+extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+/**
+ * Modifies a timer of an FSM.
+ *
+ * @param timer The timer to modify.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event Event, to trigger if timer expires.
+ * @param arg Generic argument, provided to expiry function.
+ */
+extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+#endif /* _FSM_H_ */
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
new file mode 100644
index 000000000000..1ac6563ee3e0
--- /dev/null
+++ b/drivers/s390/net/iucv.c
@@ -0,0 +1,2567 @@
+/*
+ * $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $
+ *
+ * IUCV network driver
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original source:
+ * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
+ * Xenia Tkatschow (xenia@us.ibm.com)
+ * 2Gb awareness and general cleanup:
+ * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *
+ * Documentation used:
+ * The original source
+ * CP Programming Service, IBM document # SC24-5760
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $
+ *
+ */
+
+/* #define DEBUG */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/config.h>
+
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+#include "iucv.h"
+#include <asm/io.h>
+#include <asm/s390_ext.h>
+#include <asm/ebcdic.h>
+#include <asm/smp.h>
+#include <asm/ccwdev.h> //for root device stuff
+
+/* FLAGS:
+ * All flags are defined in the field IPFLAGS1 of each function
+ * and can be found in CP Programming Services.
+ * IPSRCCLS - Indicates you have specified a source class
+ * IPFGMCL - Indicates you have specified a target class
+ * IPFGPID - Indicates you have specified a pathid
+ * IPFGMID - Indicates you have specified a message ID
+ * IPANSLST - Indicates that you are using an address list for
+ * reply data
+ * IPBUFLST - Indicates that you are using an address list for
+ * message data
+ */
+
+#define IPSRCCLS 0x01
+#define IPFGMCL 0x01
+#define IPFGPID 0x02
+#define IPFGMID 0x04
+#define IPANSLST 0x08
+#define IPBUFLST 0x40
+
+static int
+iucv_bus_match (struct device *dev, struct device_driver *drv)
+{
+ return 0;
+}
+
+struct bus_type iucv_bus = {
+ .name = "iucv",
+ .match = iucv_bus_match,
+};
+
+struct device *iucv_root;
+
+/* General IUCV interrupt structure */
+typedef struct {
+ __u16 ippathid;
+ __u8 res1;
+ __u8 iptype;
+ __u32 res2;
+ __u8 ipvmid[8];
+ __u8 res3[24];
+} iucv_GeneralInterrupt;
+
+static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
+
+/* Spin Lock declaration */
+
+static DEFINE_SPINLOCK(iucv_lock);
+
+static int messagesDisabled = 0;
+
+/***************INTERRUPT HANDLING ***************/
+
+typedef struct {
+ struct list_head queue;
+ iucv_GeneralInterrupt data;
+} iucv_irqdata;
+
+static struct list_head iucv_irq_queue;
+static DEFINE_SPINLOCK(iucv_irq_queue_lock);
+
+/*
+ *Internal function prototypes
+ */
+static void iucv_tasklet_handler(unsigned long);
+static void iucv_irq_handler(struct pt_regs *, __u16);
+
+static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
+
+/************ FUNCTION ID'S ****************************/
+
+#define ACCEPT 10
+#define CONNECT 11
+#define DECLARE_BUFFER 12
+#define PURGE 9
+#define QUERY 0
+#define QUIESCE 13
+#define RECEIVE 5
+#define REJECT 8
+#define REPLY 6
+#define RESUME 14
+#define RETRIEVE_BUFFER 2
+#define SEND 4
+#define SETMASK 16
+#define SEVER 15
+
+/**
+ * Structure: handler
+ * members: list - list management.
+ * structure: id
+ * userid - 8 char array of machine identification
+ * user_data - 16 char array for user identification
+ * mask - 24 char array used to compare the 2 previous
+ * interrupt_table - vector of interrupt functions.
+ * pgm_data - ulong, application data that is passed
+ * to the interrupt handlers
+*/
+typedef struct handler_t {
+ struct list_head list;
+ struct {
+ __u8 userid[8];
+ __u8 user_data[16];
+ __u8 mask[24];
+ } id;
+ iucv_interrupt_ops_t *interrupt_table;
+ void *pgm_data;
+} handler;
+
+/**
+ * iucv_handler_table: List of registered handlers.
+ */
+static struct list_head iucv_handler_table;
+
+/**
+ * iucv_pathid_table: an array of *handler pointing into
+ * iucv_handler_table for fast indexing by pathid;
+ */
+static handler **iucv_pathid_table;
+
+static unsigned long max_connections;
+
+/**
+ * iucv_cpuid: contains the logical cpu number of the cpu which
+ * has declared the iucv buffer by issuing DECLARE_BUFFER.
+ * If no cpu has done the initialization iucv_cpuid contains -1.
+ */
+static int iucv_cpuid = -1;
+/**
+ * register_flag: is 0 when external interrupt has not been registered
+ */
+static int register_flag;
+
+/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
+/* Data struct 1: iparml_control
+ * Used for iucv_accept
+ * iucv_connect
+ * iucv_quiesce
+ * iucv_resume
+ * iucv_sever
+ * iucv_retrieve_buffer
+ * Data struct 2: iparml_dpl (data in parameter list)
+ * Used for iucv_send_prmmsg
+ * iucv_send2way_prmmsg
+ * iucv_send2way_prmmsg_array
+ * iucv_reply_prmmsg
+ * Data struct 3: iparml_db (data in a buffer)
+ * Used for iucv_receive
+ * iucv_receive_array
+ * iucv_reject
+ * iucv_reply
+ * iucv_reply_array
+ * iucv_send
+ * iucv_send_array
+ * iucv_send2way
+ * iucv_send2way_array
+ * iucv_declare_buffer
+ * Data struct 4: iparml_purge
+ * Used for iucv_purge
+ * iucv_query
+ * Data struct 5: iparml_set_mask
+ * Used for iucv_set_mask
+ */
+
+typedef struct {
+ __u16 ippathid;
+ __u8 ipflags1;
+ __u8 iprcode;
+ __u16 ipmsglim;
+ __u16 res1;
+ __u8 ipvmid[8];
+ __u8 ipuser[16];
+ __u8 iptarget[8];
+} iparml_control;
+
+typedef struct {
+ __u16 ippathid;
+ __u8 ipflags1;
+ __u8 iprcode;
+ __u32 ipmsgid;
+ __u32 iptrgcls;
+ __u8 iprmmsg[8];
+ __u32 ipsrccls;
+ __u32 ipmsgtag;
+ __u32 ipbfadr2;
+ __u32 ipbfln2f;
+ __u32 res;
+} iparml_dpl;
+
+typedef struct {
+ __u16 ippathid;
+ __u8 ipflags1;
+ __u8 iprcode;
+ __u32 ipmsgid;
+ __u32 iptrgcls;
+ __u32 ipbfadr1;
+ __u32 ipbfln1f;
+ __u32 ipsrccls;
+ __u32 ipmsgtag;
+ __u32 ipbfadr2;
+ __u32 ipbfln2f;
+ __u32 res;
+} iparml_db;
+
+typedef struct {
+ __u16 ippathid;
+ __u8 ipflags1;
+ __u8 iprcode;
+ __u32 ipmsgid;
+ __u8 ipaudit[3];
+ __u8 res1[5];
+ __u32 res2;
+ __u32 ipsrccls;
+ __u32 ipmsgtag;
+ __u32 res3[3];
+} iparml_purge;
+
+typedef struct {
+ __u8 ipmask;
+ __u8 res1[2];
+ __u8 iprcode;
+ __u32 res2[9];
+} iparml_set_mask;
+
+typedef struct {
+ union {
+ iparml_control p_ctrl;
+ iparml_dpl p_dpl;
+ iparml_db p_db;
+ iparml_purge p_purge;
+ iparml_set_mask p_set_mask;
+ } param;
+ atomic_t in_use;
+ __u32 res;
+} __attribute__ ((aligned(8))) iucv_param;
+#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
+
+static iucv_param * iucv_param_pool;
+
+MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Debugging stuff
+ *******************************************************************************/
+
+
+#ifdef DEBUG
+static int debuglevel = 0;
+
+module_param(debuglevel, int, 0);
+MODULE_PARM_DESC(debuglevel,
+ "Specifies the debug level (0=off ... 3=all)");
+
+static void
+iucv_dumpit(char *title, void *buf, int len)
+{
+ int i;
+ __u8 *p = (__u8 *)buf;
+
+ if (debuglevel < 3)
+ return;
+
+ printk(KERN_DEBUG "%s\n", title);
+ printk(" ");
+ for (i = 0; i < len; i++) {
+ if (!(i % 16) && i != 0)
+ printk ("\n ");
+ else if (!(i % 4) && i != 0)
+ printk(" ");
+ printk("%02X", *p++);
+ }
+ if (len % 16)
+ printk ("\n");
+ return;
+}
+#define iucv_debug(lvl, fmt, args...) \
+do { \
+ if (debuglevel >= lvl) \
+ printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
+} while (0)
+
+#else
+
+#define iucv_debug(lvl, fmt, args...)
+#define iucv_dumpit(title, buf, len)
+
+#endif
+
+/*
+ * Internal functions
+ *******************************************************************************/
+
+/**
+ * print start banner
+ */
+static void
+iucv_banner(void)
+{
+ char vbuf[] = "$Revision: 1.43 $";
+ char *version = vbuf;
+
+ if ((version = strchr(version, ':'))) {
+ char *p = strchr(version + 1, '$');
+ if (p)
+ *p = '\0';
+ } else
+ version = " ??? ";
+ printk(KERN_INFO
+ "IUCV lowlevel driver Version%s initialized\n", version);
+}
+
+/**
+ * iucv_init - Initialization
+ *
+ * Allocates and initializes various data structures.
+ */
+static int
+iucv_init(void)
+{
+ int ret;
+
+ if (iucv_external_int_buffer)
+ return 0;
+
+ if (!MACHINE_IS_VM) {
+ printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ ret = bus_register(&iucv_bus);
+ if (ret) {
+ printk(KERN_ERR "IUCV: failed to register bus.\n");
+ return ret;
+ }
+
+ iucv_root = s390_root_dev_register("iucv");
+ if (IS_ERR(iucv_root)) {
+ printk(KERN_ERR "IUCV: failed to register iucv root.\n");
+ bus_unregister(&iucv_bus);
+ return PTR_ERR(iucv_root);
+ }
+
+ /* Note: GFP_DMA used used to get memory below 2G */
+ iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt),
+ GFP_KERNEL|GFP_DMA);
+ if (!iucv_external_int_buffer) {
+ printk(KERN_WARNING
+ "%s: Could not allocate external interrupt buffer\n",
+ __FUNCTION__);
+ s390_root_dev_unregister(iucv_root);
+ bus_unregister(&iucv_bus);
+ return -ENOMEM;
+ }
+ memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt));
+
+ /* Initialize parameter pool */
+ iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
+ GFP_KERNEL|GFP_DMA);
+ if (!iucv_param_pool) {
+ printk(KERN_WARNING "%s: Could not allocate param pool\n",
+ __FUNCTION__);
+ kfree(iucv_external_int_buffer);
+ iucv_external_int_buffer = NULL;
+ s390_root_dev_unregister(iucv_root);
+ bus_unregister(&iucv_bus);
+ return -ENOMEM;
+ }
+ memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE);
+
+ /* Initialize irq queue */
+ INIT_LIST_HEAD(&iucv_irq_queue);
+
+ /* Initialize handler table */
+ INIT_LIST_HEAD(&iucv_handler_table);
+
+ iucv_banner();
+ return 0;
+}
+
+/**
+ * iucv_exit - De-Initialization
+ *
+ * Frees everything allocated from iucv_init.
+ */
+static int iucv_retrieve_buffer (void);
+
+static void
+iucv_exit(void)
+{
+ iucv_retrieve_buffer();
+ if (iucv_external_int_buffer) {
+ kfree(iucv_external_int_buffer);
+ iucv_external_int_buffer = NULL;
+ }
+ if (iucv_param_pool) {
+ kfree(iucv_param_pool);
+ iucv_param_pool = NULL;
+ }
+ s390_root_dev_unregister(iucv_root);
+ bus_unregister(&iucv_bus);
+ printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
+}
+
+/**
+ * grab_param: - Get a parameter buffer from the pre-allocated pool.
+ *
+ * This function searches for an unused element in the pre-allocated pool
+ * of parameter buffers. If one is found, it marks it "in use" and returns
+ * a pointer to it. The calling function is responsible for releasing it
+ * when it has finished its usage.
+ *
+ * Returns: A pointer to iucv_param.
+ */
+static __inline__ iucv_param *
+grab_param(void)
+{
+ iucv_param *ptr;
+ static int hint = 0;
+
+ ptr = iucv_param_pool + hint;
+ do {
+ ptr++;
+ if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
+ ptr = iucv_param_pool;
+ } while (atomic_compare_and_swap(0, 1, &ptr->in_use));
+ hint = ptr - iucv_param_pool;
+
+ memset(&ptr->param, 0, sizeof(ptr->param));
+ return ptr;
+}
+
+/**
+ * release_param - Release a parameter buffer.
+ * @p: A pointer to a struct iucv_param, previously obtained by calling
+ * grab_param().
+ *
+ * This function marks the specified parameter buffer "unused".
+ */
+static __inline__ void
+release_param(void *p)
+{
+ atomic_set(&((iucv_param *)p)->in_use, 0);
+}
+
+/**
+ * iucv_add_handler: - Add a new handler
+ * @new_handler: handle that is being entered into chain.
+ *
+ * Places new handle on iucv_handler_table, if identical handler is not
+ * found.
+ *
+ * Returns: 0 on success, !0 on failure (handler already in chain).
+ */
+static int
+iucv_add_handler (handler *new)
+{
+ ulong flags;
+
+ iucv_debug(1, "entering");
+ iucv_dumpit("handler:", new, sizeof(handler));
+
+ spin_lock_irqsave (&iucv_lock, flags);
+ if (!list_empty(&iucv_handler_table)) {
+ struct list_head *lh;
+
+ /**
+ * Search list for handler with identical id. If one
+ * is found, the new handler is _not_ added.
+ */
+ list_for_each(lh, &iucv_handler_table) {
+ handler *h = list_entry(lh, handler, list);
+ if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
+ iucv_debug(1, "ret 1");
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ return 1;
+ }
+ }
+ }
+ /**
+ * If we get here, no handler was found.
+ */
+ INIT_LIST_HEAD(&new->list);
+ list_add(&new->list, &iucv_handler_table);
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ iucv_debug(1, "exiting");
+ return 0;
+}
+
+/**
+ * b2f0:
+ * @code: identifier of IUCV call to CP.
+ * @parm: pointer to 40 byte iparml area passed to CP
+ *
+ * Calls CP to execute IUCV commands.
+ *
+ * Returns: return code from CP's IUCV call
+ */
+static __inline__ ulong
+b2f0(__u32 code, void *parm)
+{
+ iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
+
+ asm volatile (
+ "LRA 1,0(%1)\n\t"
+ "LR 0,%0\n\t"
+ ".long 0xb2f01000"
+ :
+ : "d" (code), "a" (parm)
+ : "0", "1"
+ );
+
+ iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
+
+ return (unsigned long)*((__u8 *)(parm + 3));
+}
+
+/*
+ * Name: iucv_add_pathid
+ * Purpose: Adds a path id to the system.
+ * Input: pathid - pathid that is going to be entered into system
+ * handle - address of handler that the pathid will be associated
+ * with.
+ * pgm_data - token passed in by application.
+ * Output: 0: successful addition of pathid
+ * - EINVAL - pathid entry is being used by another application
+ * - ENOMEM - storage allocation for a new pathid table failed
+*/
+static int
+__iucv_add_pathid(__u16 pathid, handler *handler)
+{
+
+ iucv_debug(1, "entering");
+
+ iucv_debug(1, "handler is pointing to %p", handler);
+
+ if (pathid > (max_connections - 1))
+ return -EINVAL;
+
+ if (iucv_pathid_table[pathid]) {
+ iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
+ printk(KERN_WARNING
+ "%s: Pathid being used, error.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ iucv_pathid_table[pathid] = handler;
+
+ iucv_debug(1, "exiting");
+ return 0;
+} /* end of add_pathid function */
+
+static int
+iucv_add_pathid(__u16 pathid, handler *handler)
+{
+ ulong flags;
+ int rc;
+
+ spin_lock_irqsave (&iucv_lock, flags);
+ rc = __iucv_add_pathid(pathid, handler);
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ return rc;
+}
+
+static void
+iucv_remove_pathid(__u16 pathid)
+{
+ ulong flags;
+
+ if (pathid > (max_connections - 1))
+ return;
+
+ spin_lock_irqsave (&iucv_lock, flags);
+ iucv_pathid_table[pathid] = NULL;
+ spin_unlock_irqrestore (&iucv_lock, flags);
+}
+
+/**
+ * iucv_declare_buffer_cpuid
+ * Register at VM for subsequent IUCV operations. This is executed
+ * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
+ */
+static void
+iucv_declare_buffer_cpuid (void *result)
+{
+ iparml_db *parm;
+
+ parm = (iparml_db *)grab_param();
+ parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
+ if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
+ *((ulong *)result) = parm->iprcode;
+ release_param(parm);
+}
+
+/**
+ * iucv_retrieve_buffer_cpuid:
+ * Unregister IUCV usage at VM. This is always executed on the same
+ * cpu that registered the buffer to VM.
+ * Called from iucv_retrieve_buffer().
+ */
+static void
+iucv_retrieve_buffer_cpuid (void *cpu)
+{
+ iparml_control *parm;
+
+ parm = (iparml_control *)grab_param();
+ b2f0(RETRIEVE_BUFFER, parm);
+ release_param(parm);
+}
+
+/**
+ * Name: iucv_declare_buffer
+ * Purpose: Specifies the guests real address of an external
+ * interrupt.
+ * Input: void
+ * Output: iprcode - return code from b2f0 call
+ */
+static int
+iucv_declare_buffer (void)
+{
+ unsigned long flags;
+ ulong b2f0_result;
+
+ iucv_debug(1, "entering");
+ b2f0_result = -ENODEV;
+ spin_lock_irqsave (&iucv_lock, flags);
+ if (iucv_cpuid == -1) {
+ /* Reserve any cpu for use by iucv. */
+ iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ smp_call_function_on(iucv_declare_buffer_cpuid,
+ &b2f0_result, 0, 1, iucv_cpuid);
+ if (b2f0_result) {
+ smp_put_cpu(iucv_cpuid);
+ iucv_cpuid = -1;
+ }
+ iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
+ } else {
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ b2f0_result = 0;
+ }
+ iucv_debug(1, "exiting");
+ return b2f0_result;
+}
+
+/**
+ * iucv_retrieve_buffer:
+ *
+ * Terminates all use of IUCV.
+ * Returns: return code from CP
+ */
+static int
+iucv_retrieve_buffer (void)
+{
+ iucv_debug(1, "entering");
+ if (iucv_cpuid != -1) {
+ smp_call_function_on(iucv_retrieve_buffer_cpuid,
+ 0, 0, 1, iucv_cpuid);
+ /* Release the cpu reserved by iucv_declare_buffer. */
+ smp_put_cpu(iucv_cpuid);
+ iucv_cpuid = -1;
+ }
+ iucv_debug(1, "exiting");
+ return 0;
+}
+
+/**
+ * iucv_remove_handler:
+ * @users_handler: handler to be removed
+ *
+ * Remove handler when application unregisters.
+ */
+static void
+iucv_remove_handler(handler *handler)
+{
+ unsigned long flags;
+
+ if ((!iucv_pathid_table) || (!handler))
+ return;
+
+ iucv_debug(1, "entering");
+
+ spin_lock_irqsave (&iucv_lock, flags);
+ list_del(&handler->list);
+ if (list_empty(&iucv_handler_table)) {
+ if (register_flag) {
+ unregister_external_interrupt(0x4000, iucv_irq_handler);
+ register_flag = 0;
+ }
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ iucv_debug(1, "exiting");
+ return;
+}
+
+/**
+ * iucv_register_program:
+ * @pgmname: user identification
+ * @userid: machine identification
+ * @pgmmask: Indicates which bits in the pgmname and userid combined will be
+ * used to determine who is given control.
+ * @ops: Address of interrupt handler table.
+ * @pgm_data: Application data to be passed to interrupt handlers.
+ *
+ * Registers an application with IUCV.
+ * Returns:
+ * The address of handler, or NULL on failure.
+ * NOTE on pgmmask:
+ * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
+ * handler as is.
+ * If pgmmask is NULL, the internal mask is set to all 0xff's
+ * When userid is NULL, the first 8 bytes of the internal mask are forced
+ * to 0x00.
+ * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
+ * are forced to 0x00 and the last 16 bytes to 0xff.
+ */
+
+iucv_handle_t
+iucv_register_program (__u8 pgmname[16],
+ __u8 userid[8],
+ __u8 pgmmask[24],
+ iucv_interrupt_ops_t * ops, void *pgm_data)
+{
+ ulong rc = 0; /* return code from function calls */
+ handler *new_handler;
+
+ iucv_debug(1, "entering");
+
+ if (ops == NULL) {
+ /* interrupt table is not defined */
+ printk(KERN_WARNING "%s: Interrupt table is not defined, "
+ "exiting\n", __FUNCTION__);
+ return NULL;
+ }
+ if (!pgmname) {
+ printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
+ return NULL;
+ }
+
+ /* Allocate handler entry */
+ new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC);
+ if (new_handler == NULL) {
+ printk(KERN_WARNING "%s: storage allocation for new handler "
+ "failed.\n", __FUNCTION__);
+ return NULL;
+ }
+
+ if (!iucv_pathid_table) {
+ if (iucv_init()) {
+ kfree(new_handler);
+ return NULL;
+ }
+
+ max_connections = iucv_query_maxconn();
+ iucv_pathid_table = kmalloc(max_connections * sizeof(handler *),
+ GFP_ATOMIC);
+ if (iucv_pathid_table == NULL) {
+ printk(KERN_WARNING "%s: iucv_pathid_table storage "
+ "allocation failed\n", __FUNCTION__);
+ kfree(new_handler);
+ return NULL;
+ }
+ memset (iucv_pathid_table, 0, max_connections * sizeof(handler *));
+ }
+ memset(new_handler, 0, sizeof (handler));
+ memcpy(new_handler->id.user_data, pgmname,
+ sizeof (new_handler->id.user_data));
+ if (userid) {
+ memcpy (new_handler->id.userid, userid,
+ sizeof (new_handler->id.userid));
+ ASCEBC (new_handler->id.userid,
+ sizeof (new_handler->id.userid));
+ EBC_TOUPPER (new_handler->id.userid,
+ sizeof (new_handler->id.userid));
+
+ if (pgmmask) {
+ memcpy (new_handler->id.mask, pgmmask,
+ sizeof (new_handler->id.mask));
+ } else {
+ memset (new_handler->id.mask, 0xFF,
+ sizeof (new_handler->id.mask));
+ }
+ } else {
+ if (pgmmask) {
+ memcpy (new_handler->id.mask, pgmmask,
+ sizeof (new_handler->id.mask));
+ } else {
+ memset (new_handler->id.mask, 0xFF,
+ sizeof (new_handler->id.mask));
+ }
+ memset (new_handler->id.userid, 0x00,
+ sizeof (new_handler->id.userid));
+ }
+ /* fill in the rest of handler */
+ new_handler->pgm_data = pgm_data;
+ new_handler->interrupt_table = ops;
+
+ /*
+ * Check if someone else is registered with same pgmname, userid
+ * and mask. If someone is already registered with same pgmname,
+ * userid and mask, registration will fail and NULL will be returned
+ * to the application.
+ * If identical handler not found, then handler is added to list.
+ */
+ rc = iucv_add_handler(new_handler);
+ if (rc) {
+ printk(KERN_WARNING "%s: Someone already registered with same "
+ "pgmname, userid, pgmmask\n", __FUNCTION__);
+ kfree (new_handler);
+ return NULL;
+ }
+
+ rc = iucv_declare_buffer();
+ if (rc) {
+ char *err = "Unknown";
+ iucv_remove_handler(new_handler);
+ kfree(new_handler);
+ switch(rc) {
+ case 0x03:
+ err = "Directory error";
+ break;
+ case 0x0a:
+ err = "Invalid length";
+ break;
+ case 0x13:
+ err = "Buffer already exists";
+ break;
+ case 0x3e:
+ err = "Buffer overlap";
+ break;
+ case 0x5c:
+ err = "Paging or storage error";
+ break;
+ }
+ printk(KERN_WARNING "%s: iucv_declare_buffer "
+ "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
+ return NULL;
+ }
+ if (!register_flag) {
+ /* request the 0x4000 external interrupt */
+ rc = register_external_interrupt (0x4000, iucv_irq_handler);
+ if (rc) {
+ iucv_remove_handler(new_handler);
+ kfree (new_handler);
+ printk(KERN_WARNING "%s: "
+ "register_external_interrupt returned %ld\n",
+ __FUNCTION__, rc);
+ return NULL;
+
+ }
+ register_flag = 1;
+ }
+ iucv_debug(1, "exiting");
+ return new_handler;
+} /* end of register function */
+
+/**
+ * iucv_unregister_program:
+ * @handle: address of handler
+ *
+ * Unregister application with IUCV.
+ * Returns:
+ * 0 on success, -EINVAL, if specified handle is invalid.
+ */
+
+int
+iucv_unregister_program (iucv_handle_t handle)
+{
+ handler *h = NULL;
+ struct list_head *lh;
+ int i;
+ ulong flags;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "address of handler is %p", h);
+
+ /* Checking if handle is valid */
+ spin_lock_irqsave (&iucv_lock, flags);
+ list_for_each(lh, &iucv_handler_table) {
+ if ((handler *)handle == list_entry(lh, handler, list)) {
+ h = (handler *)handle;
+ break;
+ }
+ }
+ if (!h) {
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ if (handle)
+ printk(KERN_WARNING
+ "%s: Handler not found in iucv_handler_table.\n",
+ __FUNCTION__);
+ else
+ printk(KERN_WARNING
+ "%s: NULL handle passed by application.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /**
+ * First, walk thru iucv_pathid_table and sever any pathid which is
+ * still pointing to the handler to be removed.
+ */
+ for (i = 0; i < max_connections; i++)
+ if (iucv_pathid_table[i] == h) {
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ iucv_sever(i, h->id.user_data);
+ spin_lock_irqsave(&iucv_lock, flags);
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ iucv_remove_handler(h);
+ kfree(h);
+
+ iucv_debug(1, "exiting");
+ return 0;
+}
+
+/**
+ * iucv_accept:
+ * @pathid: Path identification number
+ * @msglim_reqstd: The number of outstanding messages requested.
+ * @user_data: Data specified by the iucv_connect function.
+ * @flags1: Contains options for this path.
+ * - IPPRTY (0x20) Specifies if you want to send priority message.
+ * - IPRMDATA (0x80) Specifies whether your program can handle a message
+ * in the parameter list.
+ * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
+ * established.
+ * @handle: Address of handler.
+ * @pgm_data: Application data passed to interrupt handlers.
+ * @flags1_out: Pointer to an int. If not NULL, on return the options for
+ * the path are stored at the given location:
+ * - IPPRTY (0x20) Indicates you may send a priority message.
+ * @msglim: Pointer to an __u16. If not NULL, on return the maximum
+ * number of outstanding messages is stored at the given
+ * location.
+ *
+ * This function is issued after the user receives a Connection Pending external
+ * interrupt and now wishes to complete the IUCV communication path.
+ * Returns:
+ * return code from CP
+ */
+int
+iucv_accept(__u16 pathid, __u16 msglim_reqstd,
+ __u8 user_data[16], int flags1,
+ iucv_handle_t handle, void *pgm_data,
+ int *flags1_out, __u16 * msglim)
+{
+ ulong b2f0_result = 0;
+ ulong flags;
+ struct list_head *lh;
+ handler *h = NULL;
+ iparml_control *parm;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "pathid = %d", pathid);
+
+ /* Checking if handle is valid */
+ spin_lock_irqsave (&iucv_lock, flags);
+ list_for_each(lh, &iucv_handler_table) {
+ if ((handler *)handle == list_entry(lh, handler, list)) {
+ h = (handler *)handle;
+ break;
+ }
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ if (!h) {
+ if (handle)
+ printk(KERN_WARNING
+ "%s: Handler not found in iucv_handler_table.\n",
+ __FUNCTION__);
+ else
+ printk(KERN_WARNING
+ "%s: NULL handle passed by application.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ parm = (iparml_control *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->ipmsglim = msglim_reqstd;
+ if (user_data)
+ memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
+
+ parm->ipflags1 = (__u8)flags1;
+ b2f0_result = b2f0(ACCEPT, parm);
+
+ if (!b2f0_result) {
+ if (msglim)
+ *msglim = parm->ipmsglim;
+ if (pgm_data)
+ h->pgm_data = pgm_data;
+ if (flags1_out)
+ *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
+ }
+ release_param(parm);
+
+ iucv_debug(1, "exiting");
+ return b2f0_result;
+}
+
+/**
+ * iucv_connect:
+ * @pathid: Path identification number
+ * @msglim_reqstd: Number of outstanding messages requested
+ * @user_data: 16-byte user data
+ * @userid: 8-byte of user identification
+ * @system_name: 8-byte identifying the system name
+ * @flags1: Specifies options for this path:
+ * - IPPRTY (0x20) Specifies if you want to send priority message.
+ * - IPRMDATA (0x80) Specifies whether your program can handle a message
+ * in the parameter list.
+ * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
+ * established.
+ * - IPLOCAL (0x01) Allows an application to force the partner to be on the
+ * local system. If local is specified then target class
+ * cannot be specified.
+ * @flags1_out: Pointer to an int. If not NULL, on return the options for
+ * the path are stored at the given location:
+ * - IPPRTY (0x20) Indicates you may send a priority message.
+ * @msglim: Pointer to an __u16. If not NULL, on return the maximum
+ * number of outstanding messages is stored at the given
+ * location.
+ * @handle: Address of handler.
+ * @pgm_data: Application data to be passed to interrupt handlers.
+ *
+ * This function establishes an IUCV path. Although the connect may complete
+ * successfully, you are not able to use the path until you receive an IUCV
+ * Connection Complete external interrupt.
+ * Returns: return code from CP, or one of the following
+ * - ENOMEM
+ * - return code from iucv_declare_buffer
+ * - EINVAL - invalid handle passed by application
+ * - EINVAL - pathid address is NULL
+ * - ENOMEM - pathid table storage allocation failed
+ * - return code from internal function add_pathid
+ */
+int
+iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
+ __u8 user_data[16], __u8 userid[8],
+ __u8 system_name[8], int flags1,
+ int *flags1_out, __u16 * msglim,
+ iucv_handle_t handle, void *pgm_data)
+{
+ iparml_control *parm;
+ iparml_control local_parm;
+ struct list_head *lh;
+ ulong b2f0_result = 0;
+ ulong flags;
+ int add_pathid_result = 0;
+ handler *h = NULL;
+ __u8 no_memory[16] = "NO MEMORY";
+
+ iucv_debug(1, "entering");
+
+ /* Checking if handle is valid */
+ spin_lock_irqsave (&iucv_lock, flags);
+ list_for_each(lh, &iucv_handler_table) {
+ if ((handler *)handle == list_entry(lh, handler, list)) {
+ h = (handler *)handle;
+ break;
+ }
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ if (!h) {
+ if (handle)
+ printk(KERN_WARNING
+ "%s: Handler not found in iucv_handler_table.\n",
+ __FUNCTION__);
+ else
+ printk(KERN_WARNING
+ "%s: NULL handle passed by application.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (pathid == NULL) {
+ printk(KERN_WARNING "%s: NULL pathid pointer\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ parm = (iparml_control *)grab_param();
+
+ parm->ipmsglim = msglim_reqstd;
+
+ if (user_data)
+ memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
+
+ if (userid) {
+ memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
+ ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
+ EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
+ }
+
+ if (system_name) {
+ memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
+ ASCEBC(parm->iptarget, sizeof(parm->iptarget));
+ EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
+ }
+
+ /* In order to establish an IUCV connection, the procedure is:
+ *
+ * b2f0(CONNECT)
+ * take the ippathid from the b2f0 call
+ * register the handler to the ippathid
+ *
+ * Unfortunately, the ConnectionEstablished message gets sent after the
+ * b2f0(CONNECT) call but before the register is handled.
+ *
+ * In order for this race condition to be eliminated, the IUCV Control
+ * Interrupts must be disabled for the above procedure.
+ *
+ * David Kennedy <dkennedy@linuxcare.com>
+ */
+
+ /* Enable everything but IUCV Control messages */
+ iucv_setmask(~(AllInterrupts));
+ messagesDisabled = 1;
+
+ spin_lock_irqsave (&iucv_lock, flags);
+ parm->ipflags1 = (__u8)flags1;
+ b2f0_result = b2f0(CONNECT, parm);
+ memcpy(&local_parm, parm, sizeof(local_parm));
+ release_param(parm);
+ parm = &local_parm;
+ if (!b2f0_result)
+ add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
+ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ if (b2f0_result) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ return b2f0_result;
+ }
+
+ *pathid = parm->ippathid;
+
+ /* Enable everything again */
+ iucv_setmask(IUCVControlInterruptsFlag);
+
+ if (msglim)
+ *msglim = parm->ipmsglim;
+ if (flags1_out)
+ *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
+
+ if (add_pathid_result) {
+ iucv_sever(*pathid, no_memory);
+ printk(KERN_WARNING "%s: add_pathid failed with rc ="
+ " %d\n", __FUNCTION__, add_pathid_result);
+ return(add_pathid_result);
+ }
+
+ iucv_debug(1, "exiting");
+ return b2f0_result;
+}
+
+/**
+ * iucv_purge:
+ * @pathid: Path identification number
+ * @msgid: Message ID of message to purge.
+ * @srccls: Message class of the message to purge.
+ * @audit: Pointer to an __u32. If not NULL, on return, information about
+ * asynchronous errors that may have affected the normal completion
+ * of this message ist stored at the given location.
+ *
+ * Cancels a message you have sent.
+ * Returns: return code from CP
+ */
+int
+iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
+{
+ iparml_purge *parm;
+ ulong b2f0_result = 0;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "pathid = %d", pathid);
+
+ parm = (iparml_purge *)grab_param();
+
+ parm->ipmsgid = msgid;
+ parm->ippathid = pathid;
+ parm->ipsrccls = srccls;
+ parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
+ b2f0_result = b2f0(PURGE, parm);
+
+ if (!b2f0_result && audit) {
+ memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
+ /* parm->ipaudit has only 3 bytes */
+ *audit >>= 8;
+ }
+
+ release_param(parm);
+
+ iucv_debug(1, "b2f0_result = %ld", b2f0_result);
+ iucv_debug(1, "exiting");
+ return b2f0_result;
+}
+
+/**
+ * iucv_query_generic:
+ * @want_maxconn: Flag, describing which value is to be returned.
+ *
+ * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
+ *
+ * Returns: The buffersize, if want_maxconn is 0; the maximum number of
+ * connections, if want_maxconn is 1 or an error-code < 0 on failure.
+ */
+static int
+iucv_query_generic(int want_maxconn)
+{
+ iparml_purge *parm = (iparml_purge *)grab_param();
+ int bufsize, maxconn;
+ int ccode;
+
+ /**
+ * Call b2f0 and store R0 (max buffer size),
+ * R1 (max connections) and CC.
+ */
+ asm volatile (
+ "LRA 1,0(%4)\n\t"
+ "LR 0,%3\n\t"
+ ".long 0xb2f01000\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ "ST 0,%1\n\t"
+ "ST 1,%2\n\t"
+ : "=d" (ccode), "=m" (bufsize), "=m" (maxconn)
+ : "d" (QUERY), "a" (parm)
+ : "0", "1", "cc"
+ );
+ release_param(parm);
+
+ if (ccode)
+ return -EPERM;
+ if (want_maxconn)
+ return maxconn;
+ return bufsize;
+}
+
+/**
+ * iucv_query_maxconn:
+ *
+ * Determines the maximum number of connections thay may be established.
+ *
+ * Returns: Maximum number of connections that can be.
+ */
+ulong
+iucv_query_maxconn(void)
+{
+ return iucv_query_generic(1);
+}
+
+/**
+ * iucv_query_bufsize:
+ *
+ * Determines the size of the external interrupt buffer.
+ *
+ * Returns: Size of external interrupt buffer.
+ */
+ulong
+iucv_query_bufsize (void)
+{
+ return iucv_query_generic(0);
+}
+
+/**
+ * iucv_quiesce:
+ * @pathid: Path identification number
+ * @user_data: 16-byte user data
+ *
+ * Temporarily suspends incoming messages on an IUCV path.
+ * You can later reactivate the path by invoking the iucv_resume function.
+ * Returns: return code from CP
+ */
+int
+iucv_quiesce (__u16 pathid, __u8 user_data[16])
+{
+ iparml_control *parm;
+ ulong b2f0_result = 0;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "pathid = %d", pathid);
+
+ parm = (iparml_control *)grab_param();
+
+ memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
+ parm->ippathid = pathid;
+
+ b2f0_result = b2f0(QUIESCE, parm);
+ release_param(parm);
+
+ iucv_debug(1, "b2f0_result = %ld", b2f0_result);
+ iucv_debug(1, "exiting");
+
+ return b2f0_result;
+}
+
+/**
+ * iucv_receive:
+ * @pathid: Path identification number.
+ * @buffer: Address of buffer to receive. Must be below 2G.
+ * @buflen: Length of buffer to receive.
+ * @msgid: Specifies the message ID.
+ * @trgcls: Specifies target class.
+ * @flags1_out: Receives options for path on return.
+ * - IPNORPY (0x10) Specifies whether a reply is required
+ * - IPPRTY (0x20) Specifies if you want to send priority message
+ * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
+ * @residual_buffer: Receives the address of buffer updated by the number
+ * of bytes you have received on return.
+ * @residual_length: On return, receives one of the following values:
+ * - 0 If the receive buffer is the same length as
+ * the message.
+ * - Remaining bytes in buffer If the receive buffer is longer than the
+ * message.
+ * - Remaining bytes in message If the receive buffer is shorter than the
+ * message.
+ *
+ * This function receives messages that are being sent to you over established
+ * paths.
+ * Returns: return code from CP IUCV call; If the receive buffer is shorter
+ * than the message, always 5
+ * -EINVAL - buffer address is pointing to NULL
+ */
+int
+iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
+ void *buffer, ulong buflen,
+ int *flags1_out, ulong * residual_buffer, ulong * residual_length)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+ int moved = 0; /* number of bytes moved from parmlist to buffer */
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ipbfadr1 = (__u32) (addr_t) buffer;
+ parm->ipbfln1f = (__u32) ((ulong) buflen);
+ parm->ipmsgid = msgid;
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
+
+ b2f0_result = b2f0(RECEIVE, parm);
+
+ if (!b2f0_result || b2f0_result == 5) {
+ if (flags1_out) {
+ iucv_debug(2, "*flags1_out = %d", *flags1_out);
+ *flags1_out = (parm->ipflags1 & (~0x07));
+ iucv_debug(2, "*flags1_out = %d", *flags1_out);
+ }
+
+ if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
+ if (residual_length)
+ *residual_length = parm->ipbfln1f;
+
+ if (residual_buffer)
+ *residual_buffer = parm->ipbfadr1;
+ } else {
+ moved = min_t (unsigned long, buflen, 8);
+
+ memcpy ((char *) buffer,
+ (char *) &parm->ipbfadr1, moved);
+
+ if (buflen < 8)
+ b2f0_result = 5;
+
+ if (residual_length)
+ *residual_length = abs (buflen - 8);
+
+ if (residual_buffer)
+ *residual_buffer = (ulong) (buffer + moved);
+ }
+ }
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_receive_array
+ * Purpose: This function receives messages that are being sent to you
+ * over established paths.
+ * Input: pathid - path identification number
+ * buffer - address of array of buffers
+ * buflen - total length of buffers
+ * msgid - specifies the message ID.
+ * trgcls - specifies target class
+ * Output:
+ * flags1_out: Options for path.
+ * IPNORPY - 0x10 specifies whether a reply is required
+ * IPPRTY - 0x20 specifies if you want to send priority message
+ * IPRMDATA - 0x80 specifies the data is contained in the parameter list
+ * residual_buffer - address points to the current list entry IUCV
+ * is working on.
+ * residual_length -
+ * Contains one of the following values, if the receive buffer is:
+ * The same length as the message, this field is zero.
+ * Longer than the message, this field contains the number of
+ * bytes remaining in the buffer.
+ * Shorter than the message, this field contains the residual
+ * count (that is, the number of bytes remaining in the
+ * message that does not fit into the buffer. In this case
+ * b2f0_result = 5.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+ */
+int
+iucv_receive_array (__u16 pathid,
+ __u32 msgid, __u32 trgcls,
+ iucv_array_t * buffer, ulong buflen,
+ int *flags1_out,
+ ulong * residual_buffer, ulong * residual_length)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+ int i = 0, moved = 0, need_to_move = 8, dyn_len;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ipbfadr1 = (__u32) ((ulong) buffer);
+ parm->ipbfln1f = (__u32) buflen;
+ parm->ipmsgid = msgid;
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
+
+ b2f0_result = b2f0(RECEIVE, parm);
+
+ if (!b2f0_result || b2f0_result == 5) {
+
+ if (flags1_out) {
+ iucv_debug(2, "*flags1_out = %d", *flags1_out);
+ *flags1_out = (parm->ipflags1 & (~0x07));
+ iucv_debug(2, "*flags1_out = %d", *flags1_out);
+ }
+
+ if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
+
+ if (residual_length)
+ *residual_length = parm->ipbfln1f;
+
+ if (residual_buffer)
+ *residual_buffer = parm->ipbfadr1;
+
+ } else {
+ /* copy msg from parmlist to users array. */
+
+ while ((moved < 8) && (moved < buflen)) {
+ dyn_len =
+ min_t (unsigned int,
+ (buffer + i)->length, need_to_move);
+
+ memcpy ((char *)((ulong)((buffer + i)->address)),
+ ((char *) &parm->ipbfadr1) + moved,
+ dyn_len);
+
+ moved += dyn_len;
+ need_to_move -= dyn_len;
+
+ (buffer + i)->address =
+ (__u32)
+ ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
+ + dyn_len);
+
+ (buffer + i)->length -= dyn_len;
+ i++;
+ }
+
+ if (need_to_move) /* buflen < 8 bytes */
+ b2f0_result = 5;
+
+ if (residual_length)
+ *residual_length = abs (buflen - 8);
+
+ if (residual_buffer) {
+ if (!moved)
+ *residual_buffer = (ulong) buffer;
+ else
+ *residual_buffer =
+ (ulong) (buffer + (i - 1));
+ }
+
+ }
+ }
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+ return b2f0_result;
+}
+
+/**
+ * iucv_reject:
+ * @pathid: Path identification number.
+ * @msgid: Message ID of the message to reject.
+ * @trgcls: Target class of the message to reject.
+ * Returns: return code from CP
+ *
+ * Refuses a specified message. Between the time you are notified of a
+ * message and the time that you complete the message, the message may
+ * be rejected.
+ */
+int
+iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
+{
+ iparml_db *parm;
+ ulong b2f0_result = 0;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "pathid = %d", pathid);
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->ipmsgid = msgid;
+ parm->iptrgcls = trgcls;
+ parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
+
+ b2f0_result = b2f0(REJECT, parm);
+ release_param(parm);
+
+ iucv_debug(1, "b2f0_result = %ld", b2f0_result);
+ iucv_debug(1, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_reply
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * Input: pathid - path identification number
+ * msgid - specifies the message ID.
+ * trgcls - specifies target class
+ * flags1 - option for path
+ * IPPRTY- 0x20 - specifies if you want to send priority message
+ * buffer - address of reply buffer
+ * buflen - length of reply buffer
+ * Output: ipbfadr2 - Address of buffer updated by the number
+ * of bytes you have moved.
+ * ipbfln2f - Contains one of the following values:
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+ * the number of bytes remaining in the buffer.
+ * If the answer buffer is shorter than the reply, this field contains
+ * a residual count (that is, the number of bytes remianing in the
+ * reply that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+ */
+int
+iucv_reply (__u16 pathid,
+ __u32 msgid, __u32 trgcls,
+ int flags1,
+ void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ipbfadr2 = (__u32) ((ulong) buffer);
+ parm->ipbfln2f = (__u32) buflen; /* length of message */
+ parm->ippathid = pathid;
+ parm->ipmsgid = msgid;
+ parm->iptrgcls = trgcls;
+ parm->ipflags1 = (__u8) flags1; /* priority message */
+
+ b2f0_result = b2f0(REPLY, parm);
+
+ if ((!b2f0_result) || (b2f0_result == 5)) {
+ if (ipbfadr2)
+ *ipbfadr2 = parm->ipbfadr2;
+ if (ipbfln2f)
+ *ipbfln2f = parm->ipbfln2f;
+ }
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_reply_array
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * The array identifies a list of addresses and lengths of
+ * discontiguous buffers that contains the reply data.
+ * Input: pathid - path identification number
+ * msgid - specifies the message ID.
+ * trgcls - specifies target class
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * buffer - address of array of reply buffers
+ * buflen - total length of reply buffers
+ * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
+ * ipbfln2f - Contains one of the following values:
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+ * the number of bytes remaining in the buffer.
+ * If the answer buffer is shorter than the reply, this field contains
+ * a residual count (that is, the number of bytes remianing in the
+ * reply that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+*/
+int
+iucv_reply_array (__u16 pathid,
+ __u32 msgid, __u32 trgcls,
+ int flags1,
+ iucv_array_t * buffer,
+ ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ipbfadr2 = (__u32) ((ulong) buffer);
+ parm->ipbfln2f = buflen; /* length of message */
+ parm->ippathid = pathid;
+ parm->ipmsgid = msgid;
+ parm->iptrgcls = trgcls;
+ parm->ipflags1 = (IPANSLST | flags1);
+
+ b2f0_result = b2f0(REPLY, parm);
+
+ if ((!b2f0_result) || (b2f0_result == 5)) {
+
+ if (ipbfadr2)
+ *ipbfadr2 = parm->ipbfadr2;
+ if (ipbfln2f)
+ *ipbfln2f = parm->ipbfln2f;
+ }
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_reply_prmmsg
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * Prmmsg signifies the data is moved into the
+ * parameter list.
+ * Input: pathid - path identification number
+ * msgid - specifies the message ID.
+ * trgcls - specifies target class
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * prmmsg - 8-bytes of data to be placed into the parameter
+ * list.
+ * Output: NA
+ * Return: b2f0_result - return code from CP
+*/
+int
+iucv_reply_prmmsg (__u16 pathid,
+ __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
+{
+ iparml_dpl *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ parm = (iparml_dpl *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->ipmsgid = msgid;
+ parm->iptrgcls = trgcls;
+ memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
+ parm->ipflags1 = (IPRMDATA | flags1);
+
+ b2f0_result = b2f0(REPLY, parm);
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/**
+ * iucv_resume:
+ * @pathid: Path identification number
+ * @user_data: 16-byte of user data
+ *
+ * This function restores communication over a quiesced path.
+ * Returns: return code from CP
+ */
+int
+iucv_resume (__u16 pathid, __u8 user_data[16])
+{
+ iparml_control *parm;
+ ulong b2f0_result = 0;
+
+ iucv_debug(1, "entering");
+ iucv_debug(1, "pathid = %d", pathid);
+
+ parm = (iparml_control *)grab_param();
+
+ memcpy (parm->ipuser, user_data, sizeof (*user_data));
+ parm->ippathid = pathid;
+
+ b2f0_result = b2f0(RESUME, parm);
+ release_param(parm);
+
+ iucv_debug(1, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send
+ * Purpose: sends messages
+ * Input: pathid - ushort, pathid
+ * msgid - ulong *, id of message returned to caller
+ * trgcls - ulong, target message class
+ * srccls - ulong, source message class
+ * msgtag - ulong, message tag
+ * flags1 - Contains options for this path.
+ * IPPRTY - Ox20 - specifies if you want to send a priority message.
+ * buffer - pointer to buffer
+ * buflen - ulong, length of buffer
+ * Output: b2f0_result - return code from b2f0 call
+ * msgid - returns message id
+ */
+int
+iucv_send (__u16 pathid, __u32 * msgid,
+ __u32 trgcls, __u32 srccls,
+ __u32 msgtag, int flags1, void *buffer, ulong buflen)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ipbfadr1 = (__u32) ((ulong) buffer);
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipbfln1f = (__u32) buflen; /* length of message */
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
+
+ b2f0_result = b2f0(SEND, parm);
+
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send_array
+ * Purpose: This function transmits data to another application.
+ * The contents of buffer is the address of the array of
+ * addresses and lengths of discontiguous buffers that hold
+ * the message text. This is a one-way message and the
+ * receiver will not reply to the message.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - specifies a tag to be associated witht the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * buffer - address of array of send buffers
+ * buflen - total length of send buffers
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+ */
+int
+iucv_send_array (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls,
+ __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipbfadr1 = (__u32) ((ulong) buffer);
+ parm->ipbfln1f = (__u32) buflen; /* length of message */
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
+ b2f0_result = b2f0(SEND, parm);
+
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send_prmmsg
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a one-way message and the
+ * receiver will not reply to the message.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - specifies a tag to be associated with the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * prmmsg - 8-bytes of data to be placed into parameter list
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+*/
+int
+iucv_send_prmmsg (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
+{
+ iparml_dpl *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ parm = (iparml_dpl *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
+ memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
+
+ b2f0_result = b2f0(SEND, parm);
+
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send2way
+ * Purpose: This function transmits data to another application.
+ * Data to be transmitted is in a buffer. The receiver
+ * of the send is expected to reply to the message and
+ * a buffer is provided into which IUCV moves the reply
+ * to this message.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - specifies a tag associated with the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * buffer - address of send buffer
+ * buflen - length of send buffer
+ * ansbuf - address of buffer to reply with
+ * anslen - length of buffer to reply with
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer or ansbuf address is NULL
+ */
+int
+iucv_send2way (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls,
+ __u32 msgtag,
+ int flags1,
+ void *buffer, ulong buflen, void *ansbuf, ulong anslen)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer || !ansbuf)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipbfadr1 = (__u32) ((ulong) buffer);
+ parm->ipbfln1f = (__u32) buflen; /* length of message */
+ parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
+ parm->ipbfln2f = (__u32) anslen;
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipflags1 = flags1; /* priority message */
+
+ b2f0_result = b2f0(SEND, parm);
+
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send2way_array
+ * Purpose: This function transmits data to another application.
+ * The contents of buffer is the address of the array of
+ * addresses and lengths of discontiguous buffers that hold
+ * the message text. The receiver of the send is expected to
+ * reply to the message and a buffer is provided into which
+ * IUCV moves the reply to this message.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - spcifies a tag to be associated with the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * buffer - address of array of send buffers
+ * buflen - total length of send buffers
+ * ansbuf - address of buffer to reply with
+ * anslen - length of buffer to reply with
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+ */
+int
+iucv_send2way_array (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls,
+ __u32 msgtag,
+ int flags1,
+ iucv_array_t * buffer,
+ ulong buflen, iucv_array_t * ansbuf, ulong anslen)
+{
+ iparml_db *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!buffer || !ansbuf)
+ return -EINVAL;
+
+ parm = (iparml_db *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipbfadr1 = (__u32) ((ulong) buffer);
+ parm->ipbfln1f = (__u32) buflen; /* length of message */
+ parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
+ parm->ipbfln2f = (__u32) anslen;
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
+ b2f0_result = b2f0(SEND, parm);
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send2way_prmmsg
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a two-way message and the
+ * receiver of the message is expected to reply. A buffer
+ * is provided into which IUCV moves the reply to this
+ * message.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - specifies a tag to be associated with the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * prmmsg - 8-bytes of data to be placed in parameter list
+ * ansbuf - address of buffer to reply with
+ * anslen - length of buffer to reply with
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - buffer address is NULL
+*/
+int
+iucv_send2way_prmmsg (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls,
+ __u32 msgtag,
+ ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
+{
+ iparml_dpl *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!ansbuf)
+ return -EINVAL;
+
+ parm = (iparml_dpl *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
+ parm->ipbfln2f = (__u32) anslen;
+ parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
+ memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
+
+ b2f0_result = b2f0(SEND, parm);
+
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+
+ return b2f0_result;
+}
+
+/*
+ * Name: iucv_send2way_prmmsg_array
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a two-way message and the
+ * receiver of the message is expected to reply. A buffer
+ * is provided into which IUCV moves the reply to this
+ * message. The contents of ansbuf is the address of the
+ * array of addresses and lengths of discontiguous buffers
+ * that contain the reply.
+ * Input: pathid - path identification number
+ * trgcls - specifies target class
+ * srccls - specifies the source message class
+ * msgtag - specifies a tag to be associated with the message
+ * flags1 - option for path
+ * IPPRTY- specifies if you want to send priority message
+ * prmmsg - 8-bytes of data to be placed into the parameter list
+ * ansbuf - address of buffer to reply with
+ * anslen - length of buffer to reply with
+ * Output: msgid - specifies the message ID.
+ * Return: b2f0_result - return code from CP
+ * (-EINVAL) - ansbuf address is NULL
+ */
+int
+iucv_send2way_prmmsg_array (__u16 pathid,
+ __u32 * msgid,
+ __u32 trgcls,
+ __u32 srccls,
+ __u32 msgtag,
+ int flags1,
+ __u8 prmmsg[8],
+ iucv_array_t * ansbuf, ulong anslen)
+{
+ iparml_dpl *parm;
+ ulong b2f0_result;
+
+ iucv_debug(2, "entering");
+
+ if (!ansbuf)
+ return -EINVAL;
+
+ parm = (iparml_dpl *)grab_param();
+
+ parm->ippathid = pathid;
+ parm->iptrgcls = trgcls;
+ parm->ipsrccls = srccls;
+ parm->ipmsgtag = msgtag;
+ parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
+ parm->ipbfln2f = (__u32) anslen;
+ parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
+ memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
+ b2f0_result = b2f0(SEND, parm);
+ if ((!b2f0_result) && (msgid))
+ *msgid = parm->ipmsgid;
+ release_param(parm);
+
+ iucv_debug(2, "exiting");
+ return b2f0_result;
+}
+
+void
+iucv_setmask_cpuid (void *result)
+{
+ iparml_set_mask *parm;
+
+ iucv_debug(1, "entering");
+ parm = (iparml_set_mask *)grab_param();
+ parm->ipmask = *((__u8*)result);
+ *((ulong *)result) = b2f0(SETMASK, parm);
+ release_param(parm);
+
+ iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
+ iucv_debug(1, "exiting");
+}
+
+/*
+ * Name: iucv_setmask
+ * Purpose: This function enables or disables the following IUCV
+ * external interruptions: Nonpriority and priority message
+ * interrupts, nonpriority and priority reply interrupts.
+ * Input: SetMaskFlag - options for interrupts
+ * 0x80 - Nonpriority_MessagePendingInterruptsFlag
+ * 0x40 - Priority_MessagePendingInterruptsFlag
+ * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
+ * 0x10 - Priority_MessageCompletionInterruptsFlag
+ * 0x08 - IUCVControlInterruptsFlag
+ * Output: NA
+ * Return: b2f0_result - return code from CP
+*/
+int
+iucv_setmask (int SetMaskFlag)
+{
+ union {
+ ulong result;
+ __u8 param;
+ } u;
+ int cpu;
+
+ u.param = SetMaskFlag;
+ cpu = get_cpu();
+ smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
+ put_cpu();
+
+ return u.result;
+}
+
+/**
+ * iucv_sever:
+ * @pathid: Path identification number
+ * @user_data: 16-byte of user data
+ *
+ * This function terminates an iucv path.
+ * Returns: return code from CP
+ */
+int
+iucv_sever(__u16 pathid, __u8 user_data[16])
+{
+ iparml_control *parm;
+ ulong b2f0_result = 0;
+
+ iucv_debug(1, "entering");
+ parm = (iparml_control *)grab_param();
+
+ memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
+ parm->ippathid = pathid;
+
+ b2f0_result = b2f0(SEVER, parm);
+
+ if (!b2f0_result)
+ iucv_remove_pathid(pathid);
+ release_param(parm);
+
+ iucv_debug(1, "exiting");
+ return b2f0_result;
+}
+
+/*
+ * Interrupt Handlers
+ *******************************************************************************/
+
+/**
+ * iucv_irq_handler:
+ * @regs: Current registers
+ * @code: irq code
+ *
+ * Handles external interrupts coming in from CP.
+ * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
+ */
+static void
+iucv_irq_handler(struct pt_regs *regs, __u16 code)
+{
+ iucv_irqdata *irqdata;
+
+ irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
+ if (!irqdata) {
+ printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
+ return;
+ }
+
+ memcpy(&irqdata->data, iucv_external_int_buffer,
+ sizeof(iucv_GeneralInterrupt));
+
+ spin_lock(&iucv_irq_queue_lock);
+ list_add_tail(&irqdata->queue, &iucv_irq_queue);
+ spin_unlock(&iucv_irq_queue_lock);
+
+ tasklet_schedule(&iucv_tasklet);
+}
+
+/**
+ * iucv_do_int:
+ * @int_buf: Pointer to copy of external interrupt buffer
+ *
+ * The workhorse for handling interrupts queued by iucv_irq_handler().
+ * This function is called from the bottom half iucv_tasklet_handler().
+ */
+static void
+iucv_do_int(iucv_GeneralInterrupt * int_buf)
+{
+ handler *h = NULL;
+ struct list_head *lh;
+ ulong flags;
+ iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
+ __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
+ int rc = 0, j = 0;
+ __u8 no_listener[16] = "NO LISTENER";
+
+ iucv_debug(2, "entering, pathid %d, type %02X",
+ int_buf->ippathid, int_buf->iptype);
+ iucv_dumpit("External Interrupt Buffer:",
+ int_buf, sizeof(iucv_GeneralInterrupt));
+
+ ASCEBC (no_listener, 16);
+
+ if (int_buf->iptype != 01) {
+ if ((int_buf->ippathid) > (max_connections - 1)) {
+ printk(KERN_WARNING "%s: Got interrupt with pathid %d"
+ " > max_connections (%ld)\n", __FUNCTION__,
+ int_buf->ippathid, max_connections - 1);
+ } else {
+ h = iucv_pathid_table[int_buf->ippathid];
+ interrupt = h->interrupt_table;
+ iucv_dumpit("Handler:", h, sizeof(handler));
+ }
+ }
+
+ /* end of if statement */
+ switch (int_buf->iptype) {
+ case 0x01: /* connection pending */
+ if (messagesDisabled) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ }
+ spin_lock_irqsave(&iucv_lock, flags);
+ list_for_each(lh, &iucv_handler_table) {
+ h = list_entry(lh, handler, list);
+ memcpy(temp_buff1, &(int_buf->ipvmid), 24);
+ memcpy(temp_buff2, &(h->id.userid), 24);
+ for (j = 0; j < 24; j++) {
+ temp_buff1[j] &= (h->id.mask)[j];
+ temp_buff2[j] &= (h->id.mask)[j];
+ }
+
+ iucv_dumpit("temp_buff1:",
+ temp_buff1, sizeof(temp_buff1));
+ iucv_dumpit("temp_buff2",
+ temp_buff2, sizeof(temp_buff2));
+
+ if (!memcmp (temp_buff1, temp_buff2, 24)) {
+
+ iucv_debug(2,
+ "found a matching handler");
+ break;
+ } else
+ h = NULL;
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ if (h) {
+ /* ADD PATH TO PATHID TABLE */
+ rc = iucv_add_pathid(int_buf->ippathid, h);
+ if (rc) {
+ iucv_sever (int_buf->ippathid,
+ no_listener);
+ iucv_debug(1,
+ "add_pathid failed, rc = %d",
+ rc);
+ } else {
+ interrupt = h->interrupt_table;
+ if (interrupt->ConnectionPending) {
+ EBCASC (int_buf->ipvmid, 8);
+ interrupt->ConnectionPending(
+ (iucv_ConnectionPending *)int_buf,
+ h->pgm_data);
+ } else
+ iucv_sever(int_buf->ippathid,
+ no_listener);
+ }
+ } else
+ iucv_sever(int_buf->ippathid, no_listener);
+ break;
+
+ case 0x02: /*connection complete */
+ if (messagesDisabled) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ }
+ if (h) {
+ if (interrupt->ConnectionComplete)
+ {
+ interrupt->ConnectionComplete(
+ (iucv_ConnectionComplete *)int_buf,
+ h->pgm_data);
+ }
+ else
+ iucv_debug(1,
+ "ConnectionComplete not called");
+ } else
+ iucv_sever(int_buf->ippathid, no_listener);
+ break;
+
+ case 0x03: /* connection severed */
+ if (messagesDisabled) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ }
+ if (h) {
+ if (interrupt->ConnectionSevered)
+ interrupt->ConnectionSevered(
+ (iucv_ConnectionSevered *)int_buf,
+ h->pgm_data);
+
+ else
+ iucv_sever (int_buf->ippathid, no_listener);
+ } else
+ iucv_sever(int_buf->ippathid, no_listener);
+ break;
+
+ case 0x04: /* connection quiesced */
+ if (messagesDisabled) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ }
+ if (h) {
+ if (interrupt->ConnectionQuiesced)
+ interrupt->ConnectionQuiesced(
+ (iucv_ConnectionQuiesced *)int_buf,
+ h->pgm_data);
+ else
+ iucv_debug(1,
+ "ConnectionQuiesced not called");
+ }
+ break;
+
+ case 0x05: /* connection resumed */
+ if (messagesDisabled) {
+ iucv_setmask(~0);
+ messagesDisabled = 0;
+ }
+ if (h) {
+ if (interrupt->ConnectionResumed)
+ interrupt->ConnectionResumed(
+ (iucv_ConnectionResumed *)int_buf,
+ h->pgm_data);
+ else
+ iucv_debug(1,
+ "ConnectionResumed not called");
+ }
+ break;
+
+ case 0x06: /* priority message complete */
+ case 0x07: /* nonpriority message complete */
+ if (h) {
+ if (interrupt->MessageComplete)
+ interrupt->MessageComplete(
+ (iucv_MessageComplete *)int_buf,
+ h->pgm_data);
+ else
+ iucv_debug(2,
+ "MessageComplete not called");
+ }
+ break;
+
+ case 0x08: /* priority message pending */
+ case 0x09: /* nonpriority message pending */
+ if (h) {
+ if (interrupt->MessagePending)
+ interrupt->MessagePending(
+ (iucv_MessagePending *) int_buf,
+ h->pgm_data);
+ else
+ iucv_debug(2,
+ "MessagePending not called");
+ }
+ break;
+ default: /* unknown iucv type */
+ printk(KERN_WARNING "%s: unknown iucv interrupt\n",
+ __FUNCTION__);
+ break;
+ } /* end switch */
+
+ iucv_debug(2, "exiting pathid %d, type %02X",
+ int_buf->ippathid, int_buf->iptype);
+
+ return;
+}
+
+/**
+ * iucv_tasklet_handler:
+ *
+ * This function loops over the queue of irq buffers and runs iucv_do_int()
+ * on every queue element.
+ */
+static void
+iucv_tasklet_handler(unsigned long ignored)
+{
+ struct list_head head;
+ struct list_head *next;
+ ulong flags;
+
+ spin_lock_irqsave(&iucv_irq_queue_lock, flags);
+ list_add(&head, &iucv_irq_queue);
+ list_del_init(&iucv_irq_queue);
+ spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
+
+ next = head.next;
+ while (next != &head) {
+ iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
+
+ next = next->next;
+ iucv_do_int(&p->data);
+ kfree(p);
+ }
+
+ return;
+}
+
+subsys_initcall(iucv_init);
+module_exit(iucv_exit);
+
+/**
+ * Export all public stuff
+ */
+EXPORT_SYMBOL (iucv_bus);
+EXPORT_SYMBOL (iucv_root);
+EXPORT_SYMBOL (iucv_accept);
+EXPORT_SYMBOL (iucv_connect);
+#if 0
+EXPORT_SYMBOL (iucv_purge);
+EXPORT_SYMBOL (iucv_query_maxconn);
+EXPORT_SYMBOL (iucv_query_bufsize);
+EXPORT_SYMBOL (iucv_quiesce);
+#endif
+EXPORT_SYMBOL (iucv_receive);
+#if 0
+EXPORT_SYMBOL (iucv_receive_array);
+#endif
+EXPORT_SYMBOL (iucv_reject);
+#if 0
+EXPORT_SYMBOL (iucv_reply);
+EXPORT_SYMBOL (iucv_reply_array);
+EXPORT_SYMBOL (iucv_resume);
+#endif
+EXPORT_SYMBOL (iucv_reply_prmmsg);
+EXPORT_SYMBOL (iucv_send);
+#if 0
+EXPORT_SYMBOL (iucv_send2way);
+EXPORT_SYMBOL (iucv_send2way_array);
+EXPORT_SYMBOL (iucv_send_array);
+EXPORT_SYMBOL (iucv_send2way_prmmsg);
+EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
+EXPORT_SYMBOL (iucv_send_prmmsg);
+EXPORT_SYMBOL (iucv_setmask);
+#endif
+EXPORT_SYMBOL (iucv_sever);
+EXPORT_SYMBOL (iucv_register_program);
+EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
new file mode 100644
index 000000000000..198330217eff
--- /dev/null
+++ b/drivers/s390/net/iucv.h
@@ -0,0 +1,849 @@
+/*
+ * drivers/s390/net/iucv.h
+ * IUCV base support.
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Corporation
+ * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
+ * Xenia Tkatschow (xenia@us.ibm.com)
+ *
+ *
+ * Functionality:
+ * To explore any of the IUCV functions, one must first register
+ * their program using iucv_register_program(). Once your program has
+ * successfully completed a register, it can exploit the other functions.
+ * For furthur reference on all IUCV functionality, refer to the
+ * CP Programming Services book, also available on the web
+ * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
+ *
+ * Definition of Return Codes
+ * -All positive return codes including zero are reflected back
+ * from CP except for iucv_register_program. The definition of each
+ * return code can be found in CP Programming Services book.
+ * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
+ * - Return Code of:
+ * (-EINVAL) Invalid value
+ * (-ENOMEM) storage allocation failed
+ * pgmask defined in iucv_register_program will be set depending on input
+ * paramters.
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/debug.h>
+
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_INDEX 1
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_INDEX 1
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_INDEX 2
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(iucv_dbf_##name,level,text); \
+ } while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+#define IUCV_DBF_TEXT_(name,level,text...) \
+ do { \
+ char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(iucv_dbf_txt_buf, text); \
+ debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
+ put_cpu_var(iucv_dbf_txt_buf); \
+ } while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+ debug_sprintf_event(iucv_dbf_trace, level, text ); \
+ } while (0)
+
+/**
+ * some more debug stuff
+ */
+#define IUCV_HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+static inline void
+iucv_hex_dump(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ printk("\n");
+ printk("%02x ", *(buf + i));
+ }
+ printk("\n");
+}
+/**
+ * end of debug stuff
+ */
+
+#define uchar unsigned char
+#define ushort unsigned short
+#define ulong unsigned long
+#define iucv_handle_t void *
+
+/* flags1:
+ * All flags are defined in the field IPFLAGS1 of each function
+ * and can be found in CP Programming Services.
+ * IPLOCAL - Indicates the connect can only be satisfied on the
+ * local system
+ * IPPRTY - Indicates a priority message
+ * IPQUSCE - Indicates you do not want to receive messages on a
+ * path until an iucv_resume is issued
+ * IPRMDATA - Indicates that the message is in the parameter list
+ */
+#define IPLOCAL 0x01
+#define IPPRTY 0x20
+#define IPQUSCE 0x40
+#define IPRMDATA 0x80
+
+/* flags1_out:
+ * All flags are defined in the output field of IPFLAGS1 for each function
+ * and can be found in CP Programming Services.
+ * IPNORPY - Specifies this is a one-way message and no reply is expected.
+ * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
+ */
+#define IPNORPY 0x10
+
+#define Nonpriority_MessagePendingInterruptsFlag 0x80
+#define Priority_MessagePendingInterruptsFlag 0x40
+#define Nonpriority_MessageCompletionInterruptsFlag 0x20
+#define Priority_MessageCompletionInterruptsFlag 0x10
+#define IUCVControlInterruptsFlag 0x08
+#define AllInterrupts 0xf8
+/*
+ * Mapping of external interrupt buffers should be used with the corresponding
+ * interrupt types.
+ * Names: iucv_ConnectionPending -> connection pending
+ * iucv_ConnectionComplete -> connection complete
+ * iucv_ConnectionSevered -> connection severed
+ * iucv_ConnectionQuiesced -> connection quiesced
+ * iucv_ConnectionResumed -> connection resumed
+ * iucv_MessagePending -> message pending
+ * iucv_MessageComplete -> message complete
+ */
+typedef struct {
+ u16 ippathid;
+ uchar ipflags1;
+ uchar iptype;
+ u16 ipmsglim;
+ u16 res1;
+ uchar ipvmid[8];
+ uchar ipuser[16];
+ u32 res3;
+ uchar ippollfg;
+ uchar res4[3];
+} iucv_ConnectionPending;
+
+typedef struct {
+ u16 ippathid;
+ uchar ipflags1;
+ uchar iptype;
+ u16 ipmsglim;
+ u16 res1;
+ uchar res2[8];
+ uchar ipuser[16];
+ u32 res3;
+ uchar ippollfg;
+ uchar res4[3];
+} iucv_ConnectionComplete;
+
+typedef struct {
+ u16 ippathid;
+ uchar res1;
+ uchar iptype;
+ u32 res2;
+ uchar res3[8];
+ uchar ipuser[16];
+ u32 res4;
+ uchar ippollfg;
+ uchar res5[3];
+} iucv_ConnectionSevered;
+
+typedef struct {
+ u16 ippathid;
+ uchar res1;
+ uchar iptype;
+ u32 res2;
+ uchar res3[8];
+ uchar ipuser[16];
+ u32 res4;
+ uchar ippollfg;
+ uchar res5[3];
+} iucv_ConnectionQuiesced;
+
+typedef struct {
+ u16 ippathid;
+ uchar res1;
+ uchar iptype;
+ u32 res2;
+ uchar res3[8];
+ uchar ipuser[16];
+ u32 res4;
+ uchar ippollfg;
+ uchar res5[3];
+} iucv_ConnectionResumed;
+
+typedef struct {
+ u16 ippathid;
+ uchar ipflags1;
+ uchar iptype;
+ u32 ipmsgid;
+ u32 iptrgcls;
+ union u2 {
+ u32 iprmmsg1_u32;
+ uchar iprmmsg1[4];
+ } ln1msg1;
+ union u1 {
+ u32 ipbfln1f;
+ uchar iprmmsg2[4];
+ } ln1msg2;
+ u32 res1[3];
+ u32 ipbfln2f;
+ uchar ippollfg;
+ uchar res2[3];
+} iucv_MessagePending;
+
+typedef struct {
+ u16 ippathid;
+ uchar ipflags1;
+ uchar iptype;
+ u32 ipmsgid;
+ u32 ipaudit;
+ uchar iprmmsg[8];
+ u32 ipsrccls;
+ u32 ipmsgtag;
+ u32 res;
+ u32 ipbfln2f;
+ uchar ippollfg;
+ uchar res2[3];
+} iucv_MessageComplete;
+
+/*
+ * iucv_interrupt_ops_t: Is a vector of functions that handle
+ * IUCV interrupts.
+ * Parameter list:
+ * eib - is a pointer to a 40-byte area described
+ * with one of the structures above.
+ * pgm_data - this data is strictly for the
+ * interrupt handler that is passed by
+ * the application. This may be an address
+ * or token.
+*/
+typedef struct {
+ void (*ConnectionPending) (iucv_ConnectionPending * eib,
+ void *pgm_data);
+ void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
+ void *pgm_data);
+ void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
+ void *pgm_data);
+ void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
+ void *pgm_data);
+ void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
+ void *pgm_data);
+ void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
+ void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
+} iucv_interrupt_ops_t;
+
+/*
+ *iucv_array_t : Defines buffer array.
+ * Inside the array may be 31- bit addresses and 31-bit lengths.
+*/
+typedef struct {
+ u32 address;
+ u32 length;
+} iucv_array_t __attribute__ ((aligned (8)));
+
+extern struct bus_type iucv_bus;
+extern struct device *iucv_root;
+
+/* -prototypes- */
+/*
+ * Name: iucv_register_program
+ * Purpose: Registers an application with IUCV
+ * Input: prmname - user identification
+ * userid - machine identification
+ * pgmmask - indicates which bits in the prmname and userid combined will be
+ * used to determine who is given control
+ * ops - address of vector of interrupt handlers
+ * pgm_data- application data passed to interrupt handlers
+ * Output: NA
+ * Return: address of handler
+ * (0) - Error occurred, registration not completed.
+ * NOTE: Exact cause of failure will be recorded in syslog.
+*/
+iucv_handle_t iucv_register_program (uchar pgmname[16],
+ uchar userid[8],
+ uchar pgmmask[24],
+ iucv_interrupt_ops_t * ops,
+ void *pgm_data);
+
+/*
+ * Name: iucv_unregister_program
+ * Purpose: Unregister application with IUCV
+ * Input: address of handler
+ * Output: NA
+ * Return: (0) - Normal return
+ * (-EINVAL) - Internal error, wild pointer
+*/
+int iucv_unregister_program (iucv_handle_t handle);
+
+/*
+ * Name: iucv_accept
+ * Purpose: This function is issued after the user receives a Connection Pending external
+ * interrupt and now wishes to complete the IUCV communication path.
+ * Input: pathid - u16 , Path identification number
+ * msglim_reqstd - u16, The number of outstanding messages requested.
+ * user_data - uchar[16], Data specified by the iucv_connect function.
+ * flags1 - int, Contains options for this path.
+ * -IPPRTY - 0x20- Specifies if you want to send priority message.
+ * -IPRMDATA - 0x80, Specifies whether your program can handle a message
+ * in the parameter list.
+ * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
+ * established.
+ * handle - iucv_handle_t, Address of handler.
+ * pgm_data - void *, Application data passed to interrupt handlers.
+ * flags1_out - int * Contains information about the path
+ * - IPPRTY - 0x20, Indicates you may send priority messages.
+ * msglim - *u16, Number of outstanding messages.
+ * Output: return code from CP IUCV call.
+*/
+
+int iucv_accept (u16 pathid,
+ u16 msglim_reqstd,
+ uchar user_data[16],
+ int flags1,
+ iucv_handle_t handle,
+ void *pgm_data, int *flags1_out, u16 * msglim);
+
+/*
+ * Name: iucv_connect
+ * Purpose: This function establishes an IUCV path. Although the connect may complete
+ * successfully, you are not able to use the path until you receive an IUCV
+ * Connection Complete external interrupt.
+ * Input: pathid - u16 *, Path identification number
+ * msglim_reqstd - u16, Number of outstanding messages requested
+ * user_data - uchar[16], 16-byte user data
+ * userid - uchar[8], User identification
+ * system_name - uchar[8], 8-byte identifying the system name
+ * flags1 - int, Contains options for this path.
+ * -IPPRTY - 0x20, Specifies if you want to send priority message.
+ * -IPRMDATA - 0x80, Specifies whether your program can handle a message
+ * in the parameter list.
+ * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
+ * established.
+ * -IPLOCAL - 0X01, Allows an application to force the partner to be on
+ * the local system. If local is specified then target class cannot be
+ * specified.
+ * flags1_out - int * Contains information about the path
+ * - IPPRTY - 0x20, Indicates you may send priority messages.
+ * msglim - * u16, Number of outstanding messages
+ * handle - iucv_handle_t, Address of handler
+ * pgm_data - void *, Application data passed to interrupt handlers
+ * Output: return code from CP IUCV call
+ * rc - return code from iucv_declare_buffer
+ * -EINVAL - Invalid handle passed by application
+ * -EINVAL - Pathid address is NULL
+ * add_pathid_result - Return code from internal function add_pathid
+*/
+int
+ iucv_connect (u16 * pathid,
+ u16 msglim_reqstd,
+ uchar user_data[16],
+ uchar userid[8],
+ uchar system_name[8],
+ int flags1,
+ int *flags1_out,
+ u16 * msglim, iucv_handle_t handle, void *pgm_data);
+
+/*
+ * Name: iucv_purge
+ * Purpose: This function cancels a message that you have sent.
+ * Input: pathid - Path identification number.
+ * msgid - Specifies the message ID of the message to be purged.
+ * srccls - Specifies the source message class.
+ * Output: audit - Contains information about asynchronous error
+ * that may have affected the normal completion
+ * of this message.
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
+/*
+ * Name: iucv_query_maxconn
+ * Purpose: This function determines the maximum number of communication paths you
+ * may establish.
+ * Return: maxconn - ulong, Maximum number of connection the virtual machine may
+ * establish.
+*/
+ulong iucv_query_maxconn (void);
+
+/*
+ * Name: iucv_query_bufsize
+ * Purpose: This function determines how large an external interrupt
+ * buffer IUCV requires to store information.
+ * Return: bufsize - ulong, Size of external interrupt buffer.
+ */
+ulong iucv_query_bufsize (void);
+
+/*
+ * Name: iucv_quiesce
+ * Purpose: This function temporarily suspends incoming messages on an
+ * IUCV path. You can later reactivate the path by invoking
+ * the iucv_resume function.
+ * Input: pathid - Path identification number
+ * user_data - 16-bytes of user data
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_quiesce (u16 pathid, uchar user_data[16]);
+
+/*
+ * Name: iucv_receive
+ * Purpose: This function receives messages that are being sent to you
+ * over established paths. Data will be returned in buffer for length of
+ * buflen.
+ * Input:
+ * pathid - Path identification number.
+ * buffer - Address of buffer to receive.
+ * buflen - Length of buffer to receive.
+ * msgid - Specifies the message ID.
+ * trgcls - Specifies target class.
+ * Output:
+ * flags1_out: int *, Contains information about this path.
+ * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
+ * expected.
+ * IPPRTY - 0x20 Specifies if you want to send priority message.
+ * IPRMDATA - 0x80 specifies the data is contained in the parameter list
+ * residual_buffer - address of buffer updated by the number
+ * of bytes you have received.
+ * residual_length -
+ * Contains one of the following values, if the receive buffer is:
+ * The same length as the message, this field is zero.
+ * Longer than the message, this field contains the number of
+ * bytes remaining in the buffer.
+ * Shorter than the message, this field contains the residual
+ * count (that is, the number of bytes remaining in the
+ * message that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - buffer address is pointing to NULL
+*/
+int iucv_receive (u16 pathid,
+ u32 msgid,
+ u32 trgcls,
+ void *buffer,
+ ulong buflen,
+ int *flags1_out,
+ ulong * residual_buffer, ulong * residual_length);
+
+ /*
+ * Name: iucv_receive_array
+ * Purpose: This function receives messages that are being sent to you
+ * over established paths. Data will be returned in first buffer for
+ * length of first buffer.
+ * Input: pathid - Path identification number.
+ * msgid - specifies the message ID.
+ * trgcls - Specifies target class.
+ * buffer - Address of array of buffers.
+ * buflen - Total length of buffers.
+ * Output:
+ * flags1_out: int *, Contains information about this path.
+ * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
+ * expected.
+ * IPPRTY - 0x20 Specifies if you want to send priority message.
+ * IPRMDATA - 0x80 specifies the data is contained in the parameter list
+ * residual_buffer - address points to the current list entry IUCV
+ * is working on.
+ * residual_length -
+ * Contains one of the following values, if the receive buffer is:
+ * The same length as the message, this field is zero.
+ * Longer than the message, this field contains the number of
+ * bytes remaining in the buffer.
+ * Shorter than the message, this field contains the residual
+ * count (that is, the number of bytes remaining in the
+ * message that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+ */
+int iucv_receive_array (u16 pathid,
+ u32 msgid,
+ u32 trgcls,
+ iucv_array_t * buffer,
+ ulong buflen,
+ int *flags1_out,
+ ulong * residual_buffer, ulong * residual_length);
+
+/*
+ * Name: iucv_reject
+ * Purpose: The reject function refuses a specified message. Between the
+ * time you are notified of a message and the time that you
+ * complete the message, the message may be rejected.
+ * Input: pathid - Path identification number.
+ * msgid - Specifies the message ID.
+ * trgcls - Specifies target class.
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
+
+/*
+ * Name: iucv_reply
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * Input: pathid - Path identification number.
+ * msgid - Specifies the message ID.
+ * trgcls - Specifies target class.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20, Specifies if you want to send priority message.
+ * buffer - Address of reply buffer.
+ * buflen - Length of reply buffer.
+ * Output: residual_buffer - Address of buffer updated by the number
+ * of bytes you have moved.
+ * residual_length - Contains one of the following values:
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+ * the number of bytes remaining in the buffer.
+ * If the answer buffer is shorter than the reply, this field contains
+ * a residual count (that is, the number of bytes remianing in the
+ * reply that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_reply (u16 pathid,
+ u32 msgid,
+ u32 trgcls,
+ int flags1,
+ void *buffer, ulong buflen, ulong * residual_buffer,
+ ulong * residual_length);
+
+/*
+ * Name: iucv_reply_array
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * The array identifies a list of addresses and lengths of
+ * discontiguous buffers that contains the reply data.
+ * Input: pathid - Path identification number
+ * msgid - Specifies the message ID.
+ * trgcls - Specifies target class.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20, Specifies if you want to send priority message.
+ * buffer - Address of array of reply buffers.
+ * buflen - Total length of reply buffers.
+ * Output: residual_buffer - Address of buffer which IUCV is currently working on.
+ * residual_length - Contains one of the following values:
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+ * the number of bytes remaining in the buffer.
+ * If the answer buffer is shorter than the reply, this field contains
+ * a residual count (that is, the number of bytes remianing in the
+ * reply that does not fit into the buffer. In this
+ * case b2f0_result = 5.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_reply_array (u16 pathid,
+ u32 msgid,
+ u32 trgcls,
+ int flags1,
+ iucv_array_t * buffer,
+ ulong buflen, ulong * residual_address,
+ ulong * residual_length);
+
+/*
+ * Name: iucv_reply_prmmsg
+ * Purpose: This function responds to the two-way messages that you
+ * receive. You must identify completely the message to
+ * which you wish to reply. ie, pathid, msgid, and trgcls.
+ * Prmmsg signifies the data is moved into the
+ * parameter list.
+ * Input: pathid - Path identification number.
+ * msgid - Specifies the message ID.
+ * trgcls - Specifies target class.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 Specifies if you want to send priority message.
+ * prmmsg - 8-bytes of data to be placed into the parameter.
+ * list.
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_reply_prmmsg (u16 pathid,
+ u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
+
+/*
+ * Name: iucv_resume
+ * Purpose: This function restores communications over a quiesced path
+ * Input: pathid - Path identification number.
+ * user_data - 16-bytes of user data.
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_resume (u16 pathid, uchar user_data[16]);
+
+/*
+ * Name: iucv_send
+ * Purpose: This function transmits data to another application.
+ * Data to be transmitted is in a buffer and this is a
+ * one-way message and the receiver will not reply to the
+ * message.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 Specifies if you want to send priority message.
+ * buffer - Address of send buffer.
+ * buflen - Length of send buffer.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_send (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
+
+/*
+ * Name: iucv_send_array
+ * Purpose: This function transmits data to another application.
+ * The contents of buffer is the address of the array of
+ * addresses and lengths of discontiguous buffers that hold
+ * the message text. This is a one-way message and the
+ * receiver will not reply to the message.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated witht the message.
+ * flags1 - Option for path.
+ * IPPRTY- specifies if you want to send priority message.
+ * buffer - Address of array of send buffers.
+ * buflen - Total length of send buffers.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_send_array (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls,
+ u32 msgtag,
+ int flags1, iucv_array_t * buffer, ulong buflen);
+
+/*
+ * Name: iucv_send_prmmsg
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a one-way message and the
+ * receiver will not reply to the message.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 specifies if you want to send priority message.
+ * prmmsg - 8-bytes of data to be placed into parameter list.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_send_prmmsg (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
+
+/*
+ * Name: iucv_send2way
+ * Purpose: This function transmits data to another application.
+ * Data to be transmitted is in a buffer. The receiver
+ * of the send is expected to reply to the message and
+ * a buffer is provided into which IUCV moves the reply
+ * to this message.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 Specifies if you want to send priority message.
+ * buffer - Address of send buffer.
+ * buflen - Length of send buffer.
+ * ansbuf - Address of buffer into which IUCV moves the reply of
+ * this message.
+ * anslen - Address of length of buffer.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer or ansbuf address is NULL.
+*/
+int iucv_send2way (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls,
+ u32 msgtag,
+ int flags1,
+ void *buffer, ulong buflen, void *ansbuf, ulong anslen);
+
+/*
+ * Name: iucv_send2way_array
+ * Purpose: This function transmits data to another application.
+ * The contents of buffer is the address of the array of
+ * addresses and lengths of discontiguous buffers that hold
+ * the message text. The receiver of the send is expected to
+ * reply to the message and a buffer is provided into which
+ * IUCV moves the reply to this message.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 Specifies if you want to send priority message.
+ * buffer - Sddress of array of send buffers.
+ * buflen - Total length of send buffers.
+ * ansbuf - Address of array of buffer into which IUCV moves the reply
+ * of this message.
+ * anslen - Address of length reply buffers.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_send2way_array (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls,
+ u32 msgtag,
+ int flags1,
+ iucv_array_t * buffer,
+ ulong buflen, iucv_array_t * ansbuf, ulong anslen);
+
+/*
+ * Name: iucv_send2way_prmmsg
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a two-way message and the
+ * receiver of the message is expected to reply. A buffer
+ * is provided into which IUCV moves the reply to this
+ * message.
+ * Input: pathid - Rath identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 Specifies if you want to send priority message.
+ * prmmsg - 8-bytes of data to be placed in parameter list.
+ * ansbuf - Address of buffer into which IUCV moves the reply of
+ * this message.
+ * anslen - Address of length of buffer.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Buffer address is NULL.
+*/
+int iucv_send2way_prmmsg (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls,
+ u32 msgtag,
+ ulong flags1,
+ uchar prmmsg[8], void *ansbuf, ulong anslen);
+
+/*
+ * Name: iucv_send2way_prmmsg_array
+ * Purpose: This function transmits data to another application.
+ * Prmmsg specifies that the 8-bytes of data are to be moved
+ * into the parameter list. This is a two-way message and the
+ * receiver of the message is expected to reply. A buffer
+ * is provided into which IUCV moves the reply to this
+ * message. The contents of ansbuf is the address of the
+ * array of addresses and lengths of discontiguous buffers
+ * that contain the reply.
+ * Input: pathid - Path identification number.
+ * trgcls - Specifies target class.
+ * srccls - Specifies the source message class.
+ * msgtag - Specifies a tag to be associated with the message.
+ * flags1 - Option for path.
+ * IPPRTY- 0x20 specifies if you want to send priority message.
+ * prmmsg - 8-bytes of data to be placed into the parameter list.
+ * ansbuf - Address of array of buffer into which IUCV moves the reply
+ * of this message.
+ * anslen - Address of length of reply buffers.
+ * Output: msgid - Specifies the message ID.
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Ansbuf address is NULL.
+*/
+int iucv_send2way_prmmsg_array (u16 pathid,
+ u32 * msgid,
+ u32 trgcls,
+ u32 srccls,
+ u32 msgtag,
+ int flags1,
+ uchar prmmsg[8],
+ iucv_array_t * ansbuf, ulong anslen);
+
+/*
+ * Name: iucv_setmask
+ * Purpose: This function enables or disables the following IUCV
+ * external interruptions: Nonpriority and priority message
+ * interrupts, nonpriority and priority reply interrupts.
+ * Input: SetMaskFlag - options for interrupts
+ * 0x80 - Nonpriority_MessagePendingInterruptsFlag
+ * 0x40 - Priority_MessagePendingInterruptsFlag
+ * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
+ * 0x10 - Priority_MessageCompletionInterruptsFlag
+ * 0x08 - IUCVControlInterruptsFlag
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+*/
+int iucv_setmask (int SetMaskFlag);
+
+/*
+ * Name: iucv_sever
+ * Purpose: This function terminates an IUCV path.
+ * Input: pathid - Path identification number.
+ * user_data - 16-bytes of user data.
+ * Output: NA
+ * Return: Return code from CP IUCV call.
+ * (-EINVAL) - Interal error, wild pointer.
+*/
+int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
new file mode 100644
index 000000000000..0f76e945b984
--- /dev/null
+++ b/drivers/s390/net/lcs.c
@@ -0,0 +1,2347 @@
+/*
+ * linux/drivers/s390/net/lcs.c
+ *
+ * Linux for S/390 Lan Channel Station Network Driver
+ *
+ * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Original Code written by
+ * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ * Rewritten by
+ * Frank Pavlic (pavlic@de.ibm.com) and
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/trdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/delay.h>
+#include <net/arp.h>
+#include <net/ip.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/timex.h>
+#include <linux/device.h>
+#include <asm/ccwgroup.h>
+
+#include "lcs.h"
+#include "cu3088.h"
+
+
+#if !defined(CONFIG_NET_ETHERNET) && \
+ !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
+#error Cannot compile lcs.c without some net devices switched on.
+#endif
+
+/**
+ * initialization string for output
+ */
+#define VERSION_LCS_C "$Revision: 1.96 $"
+
+static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
+static char debug_buffer[255];
+
+/**
+ * Some prototypes.
+ */
+static void lcs_tasklet(unsigned long);
+static void lcs_start_kernel_thread(struct lcs_card *card);
+static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
+static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *lcs_dbf_setup;
+static debug_info_t *lcs_dbf_trace;
+
+/**
+ * LCS Debug Facility functions
+ */
+static void
+lcs_unregister_debug_facility(void)
+{
+ if (lcs_dbf_setup)
+ debug_unregister(lcs_dbf_setup);
+ if (lcs_dbf_trace)
+ debug_unregister(lcs_dbf_trace);
+}
+
+static int
+lcs_register_debug_facility(void)
+{
+ lcs_dbf_setup = debug_register("lcs_setup", 1, 1, 8);
+ lcs_dbf_trace = debug_register("lcs_trace", 1, 2, 8);
+ if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
+ PRINT_ERR("Not enough memory for debug facility.\n");
+ lcs_unregister_debug_facility();
+ return -ENOMEM;
+ }
+ debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(lcs_dbf_setup, 4);
+ debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(lcs_dbf_trace, 4);
+ return 0;
+}
+
+/**
+ * Allocate io buffers.
+ */
+static int
+lcs_alloc_channel(struct lcs_channel *channel)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ichalloc");
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ /* alloc memory fo iobuffer */
+ channel->iob[cnt].data = (void *)
+ kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
+ if (channel->iob[cnt].data == NULL)
+ break;
+ memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE);
+ channel->iob[cnt].state = BUF_STATE_EMPTY;
+ }
+ if (cnt < LCS_NUM_BUFFS) {
+ /* Not all io buffers could be allocated. */
+ LCS_DBF_TEXT(2, setup, "echalloc");
+ while (cnt-- > 0)
+ kfree(channel->iob[cnt].data);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * Free io buffers.
+ */
+static void
+lcs_free_channel(struct lcs_channel *channel)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ichfree");
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ if (channel->iob[cnt].data != NULL)
+ kfree(channel->iob[cnt].data);
+ channel->iob[cnt].data = NULL;
+ }
+}
+
+/*
+ * Cleanup channel.
+ */
+static void
+lcs_cleanup_channel(struct lcs_channel *channel)
+{
+ LCS_DBF_TEXT(3, setup, "cleanch");
+ /* Kill write channel tasklets. */
+ tasklet_kill(&channel->irq_tasklet);
+ /* Free channel buffers. */
+ lcs_free_channel(channel);
+}
+
+/**
+ * LCS free memory for card and channels.
+ */
+static void
+lcs_free_card(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, setup, "remcard");
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+ kfree(card);
+}
+
+/**
+ * LCS alloc memory for card and channels
+ */
+static struct lcs_card *
+lcs_alloc_card(void)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, setup, "alloclcs");
+
+ card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
+ if (card == NULL)
+ return NULL;
+ memset(card, 0, sizeof(struct lcs_card));
+ card->lan_type = LCS_FRAME_TYPE_AUTO;
+ card->pkt_seq = 0;
+ card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
+ /* Allocate io buffers for the read channel. */
+ rc = lcs_alloc_channel(&card->read);
+ if (rc){
+ LCS_DBF_TEXT(2, setup, "iccwerr");
+ lcs_free_card(card);
+ return NULL;
+ }
+ /* Allocate io buffers for the write channel. */
+ rc = lcs_alloc_channel(&card->write);
+ if (rc) {
+ LCS_DBF_TEXT(2, setup, "iccwerr");
+ lcs_cleanup_channel(&card->read);
+ lcs_free_card(card);
+ return NULL;
+ }
+
+#ifdef CONFIG_IP_MULTICAST
+ INIT_LIST_HEAD(&card->ipm_list);
+#endif
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+ return card;
+}
+
+/*
+ * Setup read channel.
+ */
+static void
+lcs_setup_read_ccws(struct lcs_card *card)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ireadccw");
+ /* Setup read ccws. */
+ memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
+ card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
+ card->read.ccws[cnt].flags =
+ CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
+ /*
+ * Note: we have allocated the buffer with GFP_DMA, so
+ * we do not need to do set_normalized_cda.
+ */
+ card->read.ccws[cnt].cda =
+ (__u32) __pa(card->read.iob[cnt].data);
+ ((struct lcs_header *)
+ card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
+ card->read.iob[cnt].callback = lcs_get_frames_cb;
+ card->read.iob[cnt].state = BUF_STATE_READY;
+ card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
+ }
+ card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
+ card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
+ card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
+ /* Last ccw is a tic (transfer in channel). */
+ card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+ card->read.ccws[LCS_NUM_BUFFS].cda =
+ (__u32) __pa(card->read.ccws);
+ /* Setg initial state of the read channel. */
+ card->read.state = CH_STATE_INIT;
+
+ card->read.io_idx = 0;
+ card->read.buf_idx = 0;
+}
+
+static void
+lcs_setup_read(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(3, setup, "initread");
+
+ lcs_setup_read_ccws(card);
+ /* Initialize read channel tasklet. */
+ card->read.irq_tasklet.data = (unsigned long) &card->read;
+ card->read.irq_tasklet.func = lcs_tasklet;
+ /* Initialize waitqueue. */
+ init_waitqueue_head(&card->read.wait_q);
+}
+
+/*
+ * Setup write channel.
+ */
+static void
+lcs_setup_write_ccws(struct lcs_card *card)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(3, setup, "iwritccw");
+ /* Setup write ccws. */
+ memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
+ card->write.ccws[cnt].count = 0;
+ card->write.ccws[cnt].flags =
+ CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
+ /*
+ * Note: we have allocated the buffer with GFP_DMA, so
+ * we do not need to do set_normalized_cda.
+ */
+ card->write.ccws[cnt].cda =
+ (__u32) __pa(card->write.iob[cnt].data);
+ }
+ /* Last ccw is a tic (transfer in channel). */
+ card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+ card->write.ccws[LCS_NUM_BUFFS].cda =
+ (__u32) __pa(card->write.ccws);
+ /* Set initial state of the write channel. */
+ card->read.state = CH_STATE_INIT;
+
+ card->write.io_idx = 0;
+ card->write.buf_idx = 0;
+}
+
+static void
+lcs_setup_write(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(3, setup, "initwrit");
+
+ lcs_setup_write_ccws(card);
+ /* Initialize write channel tasklet. */
+ card->write.irq_tasklet.data = (unsigned long) &card->write;
+ card->write.irq_tasklet.func = lcs_tasklet;
+ /* Initialize waitqueue. */
+ init_waitqueue_head(&card->write.wait_q);
+}
+
+static void
+lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ card->thread_allowed_mask = threads;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+static inline int
+lcs_threads_running(struct lcs_card *card, unsigned long threads)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ rc = (card->thread_running_mask & threads);
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+static int
+lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
+{
+ return wait_event_interruptible(card->wait_q,
+ lcs_threads_running(card, threads) == 0);
+}
+
+static inline int
+lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ if ( !(card->thread_allowed_mask & thread) ||
+ (card->thread_start_mask & thread) ) {
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return -EPERM;
+ }
+ card->thread_start_mask |= thread;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return 0;
+}
+
+static void
+lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ card->thread_running_mask &= ~thread;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+
+static inline int
+__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ if (card->thread_start_mask & thread){
+ if ((card->thread_allowed_mask & thread) &&
+ !(card->thread_running_mask & thread)){
+ rc = 1;
+ card->thread_start_mask &= ~thread;
+ card->thread_running_mask |= thread;
+ } else
+ rc = -EPERM;
+ }
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+static int
+lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+ int rc = 0;
+ wait_event(card->wait_q,
+ (rc = __lcs_do_run_thread(card, thread)) >= 0);
+ return rc;
+}
+
+static int
+lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
+ (u8) card->thread_start_mask,
+ (u8) card->thread_allowed_mask,
+ (u8) card->thread_running_mask);
+ rc = (card->thread_start_mask & thread);
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+/**
+ * Initialize channels,card and state machines.
+ */
+static void
+lcs_setup_card(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, setup, "initcard");
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+
+ lcs_setup_read(card);
+ lcs_setup_write(card);
+ /* Set cards initial state. */
+ card->state = DEV_STATE_DOWN;
+ card->tx_buffer = NULL;
+ card->tx_emitted = 0;
+
+ /* Initialize kernel thread task used for LGW commands. */
+ INIT_WORK(&card->kernel_thread_starter,
+ (void *)lcs_start_kernel_thread,card);
+ card->thread_start_mask = 0;
+ card->thread_allowed_mask = 0;
+ card->thread_running_mask = 0;
+ init_waitqueue_head(&card->wait_q);
+ spin_lock_init(&card->lock);
+ spin_lock_init(&card->ipm_lock);
+ spin_lock_init(&card->mask_lock);
+#ifdef CONFIG_IP_MULTICAST
+ INIT_LIST_HEAD(&card->ipm_list);
+#endif
+ INIT_LIST_HEAD(&card->lancmd_waiters);
+}
+
+static inline void
+lcs_clear_multicast_list(struct lcs_card *card)
+{
+#ifdef CONFIG_IP_MULTICAST
+ struct lcs_ipm_list *ipm;
+ unsigned long flags;
+
+ /* Free multicast list. */
+ LCS_DBF_TEXT(3, setup, "clmclist");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ while (!list_empty(&card->ipm_list)){
+ ipm = list_entry(card->ipm_list.next,
+ struct lcs_ipm_list, list);
+ list_del(&ipm->list);
+ if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ lcs_send_delipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ }
+ kfree(ipm);
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+#endif
+}
+/**
+ * Cleanup channels,card and state machines.
+ */
+static void
+lcs_cleanup_card(struct lcs_card *card)
+{
+
+ LCS_DBF_TEXT(3, setup, "cleancrd");
+ LCS_DBF_HEX(2,setup,&card,sizeof(void*));
+
+ if (card->dev != NULL)
+ free_netdev(card->dev);
+ /* Cleanup channels. */
+ lcs_cleanup_channel(&card->write);
+ lcs_cleanup_channel(&card->read);
+}
+
+/**
+ * Start channel.
+ */
+static int
+lcs_start_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start(channel->ccwdev,
+ channel->ccws + channel->io_idx, 0, 0,
+ DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
+ if (rc == 0)
+ channel->state = CH_STATE_RUNNING;
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
+ PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
+ }
+ return rc;
+}
+
+static int
+lcs_clear_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4,trace,"clearch");
+ LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
+ return rc;
+ }
+ wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
+ channel->state = CH_STATE_STOPPED;
+ return rc;
+}
+
+
+/**
+ * Stop channel.
+ */
+static int
+lcs_stop_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ if (channel->state == CH_STATE_STOPPED)
+ return 0;
+ LCS_DBF_TEXT(4,trace,"haltsch");
+ LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
+ channel->state = CH_STATE_INIT;
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id);
+ return rc;
+ }
+ /* Asynchronous halt initialted. Wait for its completion. */
+ wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
+ lcs_clear_channel(channel);
+ return 0;
+}
+
+/**
+ * start read and write channel
+ */
+static int
+lcs_start_channels(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "chstart");
+ /* start read channel */
+ rc = lcs_start_channel(&card->read);
+ if (rc)
+ return rc;
+ /* start write channel */
+ rc = lcs_start_channel(&card->write);
+ if (rc)
+ lcs_stop_channel(&card->read);
+ return rc;
+}
+
+/**
+ * stop read and write channel
+ */
+static int
+lcs_stop_channels(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, trace, "chhalt");
+ lcs_stop_channel(&card->read);
+ lcs_stop_channel(&card->write);
+ return 0;
+}
+
+/**
+ * Get empty buffer.
+ */
+static struct lcs_buffer *
+__lcs_get_buffer(struct lcs_channel *channel)
+{
+ int index;
+
+ LCS_DBF_TEXT(5, trace, "_getbuff");
+ index = channel->io_idx;
+ do {
+ if (channel->iob[index].state == BUF_STATE_EMPTY) {
+ channel->iob[index].state = BUF_STATE_LOCKED;
+ return channel->iob + index;
+ }
+ index = (index + 1) & (LCS_NUM_BUFFS - 1);
+ } while (index != channel->io_idx);
+ return NULL;
+}
+
+static struct lcs_buffer *
+lcs_get_buffer(struct lcs_channel *channel)
+{
+ struct lcs_buffer *buffer;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(5, trace, "getbuff");
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer = __lcs_get_buffer(channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ return buffer;
+}
+
+/**
+ * Resume channel program if the channel is suspended.
+ */
+static int
+__lcs_resume_channel(struct lcs_channel *channel)
+{
+ int rc;
+
+ if (channel->state != CH_STATE_SUSPENDED)
+ return 0;
+ if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
+ return 0;
+ LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id);
+ rc = ccw_device_resume(channel->ccwdev);
+ if (rc) {
+ LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
+ PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
+ } else
+ channel->state = CH_STATE_RUNNING;
+ return rc;
+
+}
+
+/**
+ * Make a buffer ready for processing.
+ */
+static inline void
+__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
+{
+ int prev, next;
+
+ LCS_DBF_TEXT(5, trace, "rdybits");
+ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+ next = (index + 1) & (LCS_NUM_BUFFS - 1);
+ /* Check if we may clear the suspend bit of this buffer. */
+ if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
+ /* Check if we have to set the PCI bit. */
+ if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
+ /* Suspend bit of the previous buffer is not set. */
+ channel->ccws[index].flags |= CCW_FLAG_PCI;
+ /* Suspend bit of the next buffer is set. */
+ channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
+ }
+}
+
+static int
+lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ unsigned long flags;
+ int index, rc;
+
+ LCS_DBF_TEXT(5, trace, "rdybuff");
+ if (buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED)
+ BUG();
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer->state = BUF_STATE_READY;
+ index = buffer - channel->iob;
+ /* Set length. */
+ channel->ccws[index].count = buffer->count;
+ /* Check relevant PCI/suspend bits. */
+ __lcs_ready_buffer_bits(channel, index);
+ rc = __lcs_resume_channel(channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ return rc;
+}
+
+/**
+ * Mark the buffer as processed. Take care of the suspend bit
+ * of the previous buffer. This function is called from
+ * interrupt context, so the lock must not be taken.
+ */
+static int
+__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ int index, prev, next;
+
+ LCS_DBF_TEXT(5, trace, "prcsbuff");
+ if (buffer->state != BUF_STATE_READY)
+ BUG();
+ buffer->state = BUF_STATE_PROCESSED;
+ index = buffer - channel->iob;
+ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+ next = (index + 1) & (LCS_NUM_BUFFS - 1);
+ /* Set the suspend bit and clear the PCI bit of this buffer. */
+ channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
+ channel->ccws[index].flags &= ~CCW_FLAG_PCI;
+ /* Check the suspend bit of the previous buffer. */
+ if (channel->iob[prev].state == BUF_STATE_READY) {
+ /*
+ * Previous buffer is in state ready. It might have
+ * happened in lcs_ready_buffer that the suspend bit
+ * has not been cleared to avoid an endless loop.
+ * Do it now.
+ */
+ __lcs_ready_buffer_bits(channel, prev);
+ }
+ /* Clear PCI bit of next buffer. */
+ channel->ccws[next].flags &= ~CCW_FLAG_PCI;
+ return __lcs_resume_channel(channel);
+}
+
+/**
+ * Put a processed buffer back to state empty.
+ */
+static void
+lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ unsigned long flags;
+
+ LCS_DBF_TEXT(5, trace, "relbuff");
+ if (buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED)
+ BUG();
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer->state = BUF_STATE_EMPTY;
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+}
+
+/**
+ * Get buffer for a lan command.
+ */
+static struct lcs_buffer *
+lcs_get_lancmd(struct lcs_card *card, int count)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(4, trace, "getlncmd");
+ /* Get buffer and wait if none is available. */
+ wait_event(card->write.wait_q,
+ ((buffer = lcs_get_buffer(&card->write)) != NULL));
+ count += sizeof(struct lcs_header);
+ *(__u16 *)(buffer->data + count) = 0;
+ buffer->count = count + sizeof(__u16);
+ buffer->callback = lcs_release_buffer;
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->offset = count;
+ cmd->type = LCS_FRAME_TYPE_CONTROL;
+ cmd->slot = 0;
+ return buffer;
+}
+
+
+static void
+lcs_get_reply(struct lcs_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ atomic_inc(&reply->refcnt);
+}
+
+static void
+lcs_put_reply(struct lcs_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ if (atomic_dec_and_test(&reply->refcnt)) {
+ kfree(reply);
+ }
+
+}
+
+static struct lcs_reply *
+lcs_alloc_reply(struct lcs_cmd *cmd)
+{
+ struct lcs_reply *reply;
+
+ LCS_DBF_TEXT(4, trace, "getreply");
+
+ reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
+ if (!reply)
+ return NULL;
+ memset(reply,0,sizeof(struct lcs_reply));
+ atomic_set(&reply->refcnt,1);
+ reply->sequence_no = cmd->sequence_no;
+ reply->received = 0;
+ reply->rc = 0;
+ init_waitqueue_head(&reply->wait_q);
+
+ return reply;
+}
+
+/**
+ * Notifier function for lancmd replies. Called from read irq.
+ */
+static void
+lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ struct list_head *l, *n;
+ struct lcs_reply *reply;
+
+ LCS_DBF_TEXT(4, trace, "notiwait");
+ spin_lock(&card->lock);
+ list_for_each_safe(l, n, &card->lancmd_waiters) {
+ reply = list_entry(l, struct lcs_reply, list);
+ if (reply->sequence_no == cmd->sequence_no) {
+ lcs_get_reply(reply);
+ list_del_init(&reply->list);
+ if (reply->callback != NULL)
+ reply->callback(card, cmd);
+ reply->received = 1;
+ reply->rc = cmd->return_code;
+ wake_up(&reply->wait_q);
+ lcs_put_reply(reply);
+ break;
+ }
+ }
+ spin_unlock(&card->lock);
+}
+
+/**
+ * Emit buffer of a lan comand.
+ */
+void
+lcs_lancmd_timeout(unsigned long data)
+{
+ struct lcs_reply *reply, *list_reply, *r;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "timeout");
+ reply = (struct lcs_reply *) data;
+ spin_lock_irqsave(&reply->card->lock, flags);
+ list_for_each_entry_safe(list_reply, r,
+ &reply->card->lancmd_waiters,list) {
+ if (reply == list_reply) {
+ lcs_get_reply(reply);
+ list_del_init(&reply->list);
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+ reply->received = 1;
+ reply->rc = -ETIME;
+ wake_up(&reply->wait_q);
+ lcs_put_reply(reply);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+}
+
+static int
+lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
+ void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
+{
+ struct lcs_reply *reply;
+ struct lcs_cmd *cmd;
+ struct timer_list timer;
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4, trace, "sendcmd");
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->return_code = 0;
+ cmd->sequence_no = card->sequence_no++;
+ reply = lcs_alloc_reply(cmd);
+ if (!reply)
+ return -ENOMEM;
+ reply->callback = reply_callback;
+ reply->card = card;
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->lancmd_waiters);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ buffer->callback = lcs_release_buffer;
+ rc = lcs_ready_buffer(&card->write, buffer);
+ if (rc)
+ return rc;
+ init_timer(&timer);
+ timer.function = lcs_lancmd_timeout;
+ timer.data = (unsigned long) reply;
+ timer.expires = jiffies + HZ*card->lancmd_timeout;
+ add_timer(&timer);
+ wait_event(reply->wait_q, reply->received);
+ del_timer_sync(&timer);
+ LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
+ rc = reply->rc;
+ lcs_put_reply(reply);
+ return rc ? -EIO : 0;
+}
+
+/**
+ * LCS startup command
+ */
+static int
+lcs_send_startup(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "startup");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STARTUP;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS shutdown command
+ */
+static int
+lcs_send_shutdown(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "shutdown");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_SHUTDOWN;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS lanstat command
+ */
+static void
+__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "statcb");
+ memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
+}
+
+static int
+lcs_send_lanstat(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2,trace, "cmdstat");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ /* Setup lanstat command. */
+ cmd->cmd_code = LCS_CMD_LANSTAT;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
+}
+
+/**
+ * send stoplan command
+ */
+static int
+lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdstpln");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STOPLAN;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send startlan command
+ */
+static void
+__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "srtlancb");
+ card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
+ card->portno = cmd->cmd.lcs_std_cmd.portno;
+}
+
+static int
+lcs_send_startlan(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdstaln");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STARTLAN;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
+}
+
+#ifdef CONFIG_IP_MULTICAST
+/**
+ * send setipm command (Multicast)
+ */
+static int
+lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdsetim");
+ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_SETIPM;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+ LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send delipm command (Multicast)
+ */
+static int
+lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmddelim");
+ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_DELIPM;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+ LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * check if multicast is supported by LCS
+ */
+static void
+__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "chkmccb");
+ card->ip_assists_supported =
+ cmd->cmd.lcs_qipassist.ip_assists_supported;
+ card->ip_assists_enabled =
+ cmd->cmd.lcs_qipassist.ip_assists_enabled;
+}
+
+static int
+lcs_check_multicast_support(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "cmdqipa");
+ /* Send query ipassist. */
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_QIPASSIST;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
+ if (rc != 0) {
+ PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
+ return -EOPNOTSUPP;
+ }
+ /* Print out supported assists: IPv6 */
+ PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
+ "with" : "without");
+ /* Print out supported assist: Multicast */
+ PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
+ "with" : "without");
+ if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
+/**
+ * set or del multicast address on LCS card
+ */
+static void
+lcs_fix_multicast_list(struct lcs_card *card)
+{
+ struct list_head failed_list;
+ struct lcs_ipm_list *ipm, *tmp;
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4,trace, "fixipm");
+ INIT_LIST_HEAD(&failed_list);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+list_modified:
+ list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
+ switch (ipm->ipm_state) {
+ case LCS_IPM_STATE_SET_REQUIRED:
+ /* del from ipm_list so noone else can tamper with
+ * this entry */
+ list_del_init(&ipm->list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ rc = lcs_send_setipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ if (rc) {
+ PRINT_INFO("Adding multicast address failed."
+ "Table possibly full!\n");
+ /* store ipm in failed list -> will be added
+ * to ipm_list again, so a retry will be done
+ * during the next call of this function */
+ list_add_tail(&ipm->list, &failed_list);
+ } else {
+ ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
+ /* re-insert into ipm_list */
+ list_add_tail(&ipm->list, &card->ipm_list);
+ }
+ goto list_modified;
+ case LCS_IPM_STATE_DEL_REQUIRED:
+ list_del(&ipm->list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ lcs_send_delipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ kfree(ipm);
+ goto list_modified;
+ case LCS_IPM_STATE_ON_CARD:
+ break;
+ }
+ }
+ /* re-insert all entries from the failed_list into ipm_list */
+ list_for_each_entry(ipm, &failed_list, list) {
+ list_del_init(&ipm->list);
+ list_add_tail(&ipm->list, &card->ipm_list);
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ if (card->state == DEV_STATE_UP)
+ netif_wake_queue(card->dev);
+}
+
+/**
+ * get mac address for the relevant Multicast address
+ */
+static void
+lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
+{
+ LCS_DBF_TEXT(4,trace, "getmac");
+ if (dev->type == ARPHRD_IEEE802_TR)
+ ip_tr_mc_map(ipm, mac);
+ else
+ ip_eth_mc_map(ipm, mac);
+}
+
+/**
+ * function called by net device to handle multicast address relevant things
+ */
+static inline void
+lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+ struct ip_mc_list *im4;
+ struct list_head *l;
+ struct lcs_ipm_list *ipm;
+ unsigned long flags;
+ char buf[MAX_ADDR_LEN];
+
+ LCS_DBF_TEXT(4, trace, "remmclst");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ list_for_each(l, &card->ipm_list) {
+ ipm = list_entry(l, struct lcs_ipm_list, list);
+ for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
+ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+ if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
+ (memcmp(buf, &ipm->ipm.mac_addr,
+ LCS_MAC_LENGTH) == 0) )
+ break;
+ }
+ if (im4 == NULL)
+ ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+static inline struct lcs_ipm_list *
+lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
+{
+ struct lcs_ipm_list *tmp, *ipm = NULL;
+ struct list_head *l;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "chkmcent");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ list_for_each(l, &card->ipm_list) {
+ tmp = list_entry(l, struct lcs_ipm_list, list);
+ if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
+ (memcmp(buf, &tmp->ipm.mac_addr,
+ LCS_MAC_LENGTH) == 0) ) {
+ ipm = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ return ipm;
+}
+
+static inline void
+lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+
+ struct ip_mc_list *im4;
+ struct lcs_ipm_list *ipm;
+ char buf[MAX_ADDR_LEN];
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "setmclst");
+ for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+ ipm = lcs_check_addr_entry(card, im4, buf);
+ if (ipm != NULL)
+ continue; /* Address already in list. */
+ ipm = (struct lcs_ipm_list *)
+ kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+ if (ipm == NULL) {
+ PRINT_INFO("Not enough memory to add "
+ "new multicast entry!\n");
+ break;
+ }
+ memset(ipm, 0, sizeof(struct lcs_ipm_list));
+ memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
+ ipm->ipm.ip_addr = im4->multiaddr;
+ ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ list_add(&ipm->list, &card->ipm_list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ }
+}
+
+static int
+lcs_register_mc_addresses(void *data)
+{
+ struct lcs_card *card;
+ struct in_device *in4_dev;
+
+ card = (struct lcs_card *) data;
+ daemonize("regipm");
+
+ if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "regmulti");
+
+ in4_dev = in_dev_get(card->dev);
+ if (in4_dev == NULL)
+ goto out;
+ read_lock(&in4_dev->mc_list_lock);
+ lcs_remove_mc_addresses(card,in4_dev);
+ lcs_set_mc_addresses(card, in4_dev);
+ read_unlock(&in4_dev->mc_list_lock);
+ in_dev_put(in4_dev);
+
+ lcs_fix_multicast_list(card);
+out:
+ lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
+ return 0;
+}
+/**
+ * function called by net device to
+ * handle multicast address relevant things
+ */
+static void
+lcs_set_multicast_list(struct net_device *dev)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(4, trace, "setmulti");
+ card = (struct lcs_card *) dev->priv;
+
+ if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) {
+ schedule_work(&card->kernel_thread_starter);
+ }
+}
+
+#endif /* CONFIG_IP_MULTICAST */
+
+static long
+lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!IS_ERR(irb))
+ return 0;
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
+ break;
+ case -ETIMEDOUT:
+ PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
+ break;
+ default:
+ PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
+ cdev->dev.bus_id);
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT(2, trace, " rc???");
+ }
+ return PTR_ERR(irb);
+}
+
+
+/**
+ * IRQ Handler for LCS channels
+ */
+static void
+lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct lcs_card *card;
+ struct lcs_channel *channel;
+ int index;
+
+ if (lcs_check_irb_error(cdev, irb))
+ return;
+
+ card = CARD_FROM_DEV(cdev);
+ if (card->read.ccwdev == cdev)
+ channel = &card->read;
+ else
+ channel = &card->write;
+
+ LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
+ LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat);
+ LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl);
+
+ /* How far in the ccw chain have we processed? */
+ if ((channel->state != CH_STATE_INIT) &&
+ (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
+ index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
+ - channel->ccws;
+ if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
+ (irb->scsw.cstat | SCHN_STAT_PCI))
+ /* Bloody io subsystem tells us lies about cpa... */
+ index = (index - 1) & (LCS_NUM_BUFFS - 1);
+ while (channel->io_idx != index) {
+ __lcs_processed_buffer(channel,
+ channel->iob + channel->io_idx);
+ channel->io_idx =
+ (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
+ }
+ }
+
+ if ((irb->scsw.dstat & DEV_STAT_DEV_END) ||
+ (irb->scsw.dstat & DEV_STAT_CHN_END) ||
+ (irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
+ /* Mark channel as stopped. */
+ channel->state = CH_STATE_STOPPED;
+ else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
+ /* CCW execution stopped on a suspend bit. */
+ channel->state = CH_STATE_SUSPENDED;
+
+ if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
+ if (irb->scsw.cc != 0) {
+ ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ return;
+ }
+ /* The channel has been stopped by halt_IO. */
+ channel->state = CH_STATE_HALTED;
+ }
+
+ if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ channel->state = CH_STATE_CLEARED;
+ }
+ /* Do the rest in the tasklet. */
+ tasklet_schedule(&channel->irq_tasklet);
+}
+
+/**
+ * Tasklet for IRQ handler
+ */
+static void
+lcs_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct lcs_channel *channel;
+ struct lcs_buffer *iob;
+ int buf_idx;
+ int rc;
+
+ channel = (struct lcs_channel *) data;
+ LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id);
+
+ /* Check for processed buffers. */
+ iob = channel->iob;
+ buf_idx = channel->buf_idx;
+ while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
+ /* Do the callback thing. */
+ if (iob[buf_idx].callback != NULL)
+ iob[buf_idx].callback(channel, iob + buf_idx);
+ buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
+ }
+ channel->buf_idx = buf_idx;
+
+ if (channel->state == CH_STATE_STOPPED)
+ // FIXME: what if rc != 0 ??
+ rc = lcs_start_channel(channel);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ if (channel->state == CH_STATE_SUSPENDED &&
+ channel->iob[channel->io_idx].state == BUF_STATE_READY) {
+ // FIXME: what if rc != 0 ??
+ rc = __lcs_resume_channel(channel);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ /* Something happened on the channel. Wake up waiters. */
+ wake_up(&channel->wait_q);
+}
+
+/**
+ * Finish current tx buffer and make it ready for transmit.
+ */
+static void
+__lcs_emit_txbuffer(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(5, trace, "emittx");
+ *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
+ card->tx_buffer->count += 2;
+ lcs_ready_buffer(&card->write, card->tx_buffer);
+ card->tx_buffer = NULL;
+ card->tx_emitted++;
+}
+
+/**
+ * Callback for finished tx buffers.
+ */
+static void
+lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(5, trace, "txbuffcb");
+ /* Put buffer back to pool. */
+ lcs_release_buffer(channel, buffer);
+ card = (struct lcs_card *)
+ ((char *) channel - offsetof(struct lcs_card, write));
+ spin_lock(&card->lock);
+ card->tx_emitted--;
+ if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+ /*
+ * Last running tx buffer has finished. Submit partially
+ * filled current buffer.
+ */
+ __lcs_emit_txbuffer(card);
+ spin_unlock(&card->lock);
+}
+
+/**
+ * Packet transmit function called by network stack
+ */
+static int
+__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lcs_header *header;
+
+ LCS_DBF_TEXT(5, trace, "hardxmit");
+ if (skb == NULL) {
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ return -EIO;
+ }
+ if (card->state != DEV_STATE_UP) {
+ dev_kfree_skb(skb);
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ card->stats.tx_carrier_errors++;
+ return 0;
+ }
+ if (netif_queue_stopped(dev) ) {
+ card->stats.tx_dropped++;
+ return -EBUSY;
+ }
+ if (card->tx_buffer != NULL &&
+ card->tx_buffer->count + sizeof(struct lcs_header) +
+ skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
+ /* skb too big for current tx buffer. */
+ __lcs_emit_txbuffer(card);
+ if (card->tx_buffer == NULL) {
+ /* Get new tx buffer */
+ card->tx_buffer = lcs_get_buffer(&card->write);
+ if (card->tx_buffer == NULL) {
+ card->stats.tx_dropped++;
+ return -EBUSY;
+ }
+ card->tx_buffer->callback = lcs_txbuffer_cb;
+ card->tx_buffer->count = 0;
+ }
+ header = (struct lcs_header *)
+ (card->tx_buffer->data + card->tx_buffer->count);
+ card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
+ header->offset = card->tx_buffer->count;
+ header->type = card->lan_type;
+ header->slot = card->portno;
+ memcpy(header + 1, skb->data, skb->len);
+ card->stats.tx_bytes += skb->len;
+ card->stats.tx_packets++;
+ dev_kfree_skb(skb);
+ if (card->tx_emitted <= 0)
+ /* If this is the first tx buffer emit it immediately. */
+ __lcs_emit_txbuffer(card);
+ return 0;
+}
+
+static int
+lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(5, trace, "pktxmit");
+ card = (struct lcs_card *) dev->priv;
+ spin_lock(&card->lock);
+ rc = __lcs_start_xmit(card, skb, dev);
+ spin_unlock(&card->lock);
+ return rc;
+}
+
+/**
+ * send startlan and lanstat command to make LCS device ready
+ */
+static int
+lcs_startlan_auto(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "strtauto");
+#ifdef CONFIG_NET_ETHERNET
+ card->lan_type = LCS_FRAME_TYPE_ENET;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+
+#endif
+#ifdef CONFIG_TR
+ card->lan_type = LCS_FRAME_TYPE_TR;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+#endif
+#ifdef CONFIG_FDDI
+ card->lan_type = LCS_FRAME_TYPE_FDDI;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+#endif
+ return -EIO;
+}
+
+static int
+lcs_startlan(struct lcs_card *card)
+{
+ int rc, i;
+
+ LCS_DBF_TEXT(2, trace, "startlan");
+ rc = 0;
+ if (card->portno != LCS_INVALID_PORT_NO) {
+ if (card->lan_type == LCS_FRAME_TYPE_AUTO)
+ rc = lcs_startlan_auto(card);
+ else
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ } else {
+ for (i = 0; i <= 16; i++) {
+ card->portno = i;
+ if (card->lan_type != LCS_FRAME_TYPE_AUTO)
+ rc = lcs_send_startlan(card,
+ LCS_INITIATOR_TCPIP);
+ else
+ /* autodetecting lan type */
+ rc = lcs_startlan_auto(card);
+ if (rc == 0)
+ break;
+ }
+ }
+ if (rc == 0)
+ return lcs_send_lanstat(card);
+ return rc;
+}
+
+/**
+ * LCS detect function
+ * setup channels and make them I/O ready
+ */
+static int
+lcs_detect(struct lcs_card *card)
+{
+ int rc = 0;
+
+ LCS_DBF_TEXT(2, setup, "lcsdetct");
+ /* start/reset card */
+ if (card->dev)
+ netif_stop_queue(card->dev);
+ rc = lcs_stop_channels(card);
+ if (rc == 0) {
+ rc = lcs_start_channels(card);
+ if (rc == 0) {
+ rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ rc = lcs_startlan(card);
+ }
+ }
+ if (rc == 0) {
+ card->state = DEV_STATE_UP;
+ } else {
+ card->state = DEV_STATE_DOWN;
+ card->write.state = CH_STATE_INIT;
+ card->read.state = CH_STATE_INIT;
+ }
+ return rc;
+}
+
+/**
+ * reset card
+ */
+static int
+lcs_resetcard(struct lcs_card *card)
+{
+ int retries;
+
+ LCS_DBF_TEXT(2, trace, "rescard");
+ for (retries = 0; retries < 10; retries++) {
+ if (lcs_detect(card) == 0) {
+ netif_wake_queue(card->dev);
+ card->state = DEV_STATE_UP;
+ PRINT_INFO("LCS device %s successfully restarted!\n",
+ card->dev->name);
+ return 0;
+ }
+ msleep(3000);
+ }
+ PRINT_ERR("Error in Reseting LCS card!\n");
+ return -EIO;
+}
+
+
+/**
+ * LCS Stop card
+ */
+static int
+lcs_stopcard(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(3, setup, "stopcard");
+
+ if (card->read.state != CH_STATE_STOPPED &&
+ card->write.state != CH_STATE_STOPPED &&
+ card->state == DEV_STATE_UP) {
+ lcs_clear_multicast_list(card);
+ rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
+ rc = lcs_send_shutdown(card);
+ }
+ rc = lcs_stop_channels(card);
+ card->state = DEV_STATE_DOWN;
+
+ return rc;
+}
+
+/**
+ * LGW initiated commands
+ */
+static int
+lcs_lgw_startlan_thread(void *data)
+{
+ struct lcs_card *card;
+
+ card = (struct lcs_card *) data;
+ daemonize("lgwstpln");
+
+ if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "lgwstpln");
+ if (card->dev)
+ netif_stop_queue(card->dev);
+ if (lcs_startlan(card) == 0) {
+ netif_wake_queue(card->dev);
+ card->state = DEV_STATE_UP;
+ PRINT_INFO("LCS Startlan for device %s succeeded!\n",
+ card->dev->name);
+
+ } else
+ PRINT_ERR("LCS Startlan for device %s failed!\n",
+ card->dev->name);
+ lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD);
+ return 0;
+}
+
+/**
+ * Send startup command initiated by Lan Gateway
+ */
+static int
+lcs_lgw_startup_thread(void *data)
+{
+ int rc;
+
+ struct lcs_card *card;
+
+ card = (struct lcs_card *) data;
+ daemonize("lgwstaln");
+
+ if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "lgwstaln");
+ if (card->dev)
+ netif_stop_queue(card->dev);
+ rc = lcs_send_startup(card, LCS_INITIATOR_LGW);
+ if (rc != 0) {
+ PRINT_ERR("Startup for LCS device %s initiated " \
+ "by LGW failed!\nReseting card ...\n",
+ card->dev->name);
+ /* do a card reset */
+ rc = lcs_resetcard(card);
+ if (rc == 0)
+ goto Done;
+ }
+ rc = lcs_startlan(card);
+ if (rc == 0) {
+ netif_wake_queue(card->dev);
+ card->state = DEV_STATE_UP;
+ }
+Done:
+ if (rc == 0)
+ PRINT_INFO("LCS Startup for device %s succeeded!\n",
+ card->dev->name);
+ else
+ PRINT_ERR("LCS Startup for device %s failed!\n",
+ card->dev->name);
+ lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD);
+ return 0;
+}
+
+
+/**
+ * send stoplan command initiated by Lan Gateway
+ */
+static int
+lcs_lgw_stoplan_thread(void *data)
+{
+ struct lcs_card *card;
+ int rc;
+
+ card = (struct lcs_card *) data;
+ daemonize("lgwstop");
+
+ if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "lgwstop");
+ if (card->dev)
+ netif_stop_queue(card->dev);
+ if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0)
+ PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n",
+ card->dev->name);
+ else
+ PRINT_ERR("Stoplan %s initiated by LGW failed!\n",
+ card->dev->name);
+ /*Try to reset the card, stop it on failure */
+ rc = lcs_resetcard(card);
+ if (rc != 0)
+ rc = lcs_stopcard(card);
+ lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD);
+ return rc;
+}
+
+/**
+ * Kernel Thread helper functions for LGW initiated commands
+ */
+static void
+lcs_start_kernel_thread(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(5, trace, "krnthrd");
+ if (lcs_do_start_thread(card, LCS_STARTUP_THREAD))
+ kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD);
+ if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD))
+ kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD);
+ if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD))
+ kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD);
+#ifdef CONFIG_IP_MULTICAST
+ if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
+ kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD);
+#endif
+}
+
+/**
+ * Process control frames.
+ */
+static void
+lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(5, trace, "getctrl");
+ if (cmd->initiator == LCS_INITIATOR_LGW) {
+ switch(cmd->cmd_code) {
+ case LCS_CMD_STARTUP:
+ if (!lcs_set_thread_start_bit(card,
+ LCS_STARTUP_THREAD))
+ schedule_work(&card->kernel_thread_starter);
+ break;
+ case LCS_CMD_STARTLAN:
+ if (!lcs_set_thread_start_bit(card,
+ LCS_STARTLAN_THREAD))
+ schedule_work(&card->kernel_thread_starter);
+ break;
+ case LCS_CMD_STOPLAN:
+ if (!lcs_set_thread_start_bit(card,
+ LCS_STOPLAN_THREAD))
+ schedule_work(&card->kernel_thread_starter);
+ break;
+ default:
+ PRINT_INFO("UNRECOGNIZED LGW COMMAND\n");
+ break;
+ }
+ } else
+ lcs_notify_lancmd_waiters(card, cmd);
+}
+
+/**
+ * Unpack network packet.
+ */
+static void
+lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
+{
+ struct sk_buff *skb;
+
+ LCS_DBF_TEXT(5, trace, "getskb");
+ if (card->dev == NULL ||
+ card->state != DEV_STATE_UP)
+ /* The card isn't up. Ignore the packet. */
+ return;
+
+ skb = dev_alloc_skb(skb_len);
+ if (skb == NULL) {
+ PRINT_ERR("LCS: alloc_skb failed for device=%s\n",
+ card->dev->name);
+ card->stats.rx_dropped++;
+ return;
+ }
+ skb->dev = card->dev;
+ memcpy(skb_put(skb, skb_len), skb_data, skb_len);
+ skb->protocol = card->lan_type_trans(skb, card->dev);
+ card->stats.rx_bytes += skb_len;
+ card->stats.rx_packets++;
+ *((__u32 *)skb->cb) = ++card->pkt_seq;
+ netif_rx(skb);
+}
+
+/**
+ * LCS main routine to get packets and lancmd replies from the buffers
+ */
+static void
+lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ struct lcs_card *card;
+ struct lcs_header *lcs_hdr;
+ __u16 offset;
+
+ LCS_DBF_TEXT(5, trace, "lcsgtpkt");
+ lcs_hdr = (struct lcs_header *) buffer->data;
+ if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
+ LCS_DBF_TEXT(4, trace, "-eiogpkt");
+ return;
+ }
+ card = (struct lcs_card *)
+ ((char *) channel - offsetof(struct lcs_card, read));
+ offset = 0;
+ while (lcs_hdr->offset != 0) {
+ if (lcs_hdr->offset <= 0 ||
+ lcs_hdr->offset > LCS_IOBUFFERSIZE ||
+ lcs_hdr->offset < offset) {
+ /* Offset invalid. */
+ card->stats.rx_length_errors++;
+ card->stats.rx_errors++;
+ return;
+ }
+ /* What kind of frame is it? */
+ if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+ /* Control frame. */
+ lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
+ else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+ lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+ lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+ /* Normal network packet. */
+ lcs_get_skb(card, (char *)(lcs_hdr + 1),
+ lcs_hdr->offset - offset -
+ sizeof(struct lcs_header));
+ else
+ /* Unknown frame type. */
+ ; // FIXME: error message ?
+ /* Proceed to next frame. */
+ offset = lcs_hdr->offset;
+ lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
+ lcs_hdr = (struct lcs_header *) (buffer->data + offset);
+ }
+ /* The buffer is now empty. Make it ready again. */
+ lcs_ready_buffer(&card->read, buffer);
+}
+
+/**
+ * get network statistics for ifconfig and other user programs
+ */
+static struct net_device_stats *
+lcs_getstats(struct net_device *dev)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(4, trace, "netstats");
+ card = (struct lcs_card *) dev->priv;
+ return &card->stats;
+}
+
+/**
+ * stop lcs device
+ * This function will be called by user doing ifconfig xxx down
+ */
+static int
+lcs_stop_device(struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "stopdev");
+ card = (struct lcs_card *) dev->priv;
+ netif_stop_queue(dev);
+ dev->flags &= ~IFF_UP;
+ rc = lcs_stopcard(card);
+ if (rc)
+ PRINT_ERR("Try it again!\n ");
+ return rc;
+}
+
+/**
+ * start lcs device and make it runnable
+ * This function will be called by user doing ifconfig xxx up
+ */
+static int
+lcs_open_device(struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "opendev");
+ card = (struct lcs_card *) dev->priv;
+ /* initialize statistics */
+ rc = lcs_detect(card);
+ if (rc) {
+ PRINT_ERR("LCS:Error in opening device!\n");
+
+ } else {
+ dev->flags |= IFF_UP;
+ netif_wake_queue(dev);
+ card->state = DEV_STATE_UP;
+ }
+ return rc;
+}
+
+/**
+ * show function for portno called by cat or similar things
+ */
+static ssize_t
+lcs_portno_show (struct device *dev, char *buf)
+{
+ struct lcs_card *card;
+
+ card = (struct lcs_card *)dev->driver_data;
+
+ if (!card)
+ return 0;
+
+ return sprintf(buf, "%d\n", card->portno);
+}
+
+/**
+ * store the value which is piped to file portno
+ */
+static ssize_t
+lcs_portno_store (struct device *dev, const char *buf, size_t count)
+{
+ struct lcs_card *card;
+ int value;
+
+ card = (struct lcs_card *)dev->driver_data;
+
+ if (!card)
+ return 0;
+
+ sscanf(buf, "%u", &value);
+ /* TODO: sanity checks */
+ card->portno = value;
+
+ return count;
+
+}
+
+static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
+
+static ssize_t
+lcs_type_show(struct device *dev, char *buf)
+{
+ struct ccwgroup_device *cgdev;
+
+ cgdev = to_ccwgroupdev(dev);
+ if (!cgdev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
+
+static ssize_t
+lcs_timeout_show(struct device *dev, char *buf)
+{
+ struct lcs_card *card;
+
+ card = (struct lcs_card *)dev->driver_data;
+
+ return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
+}
+
+static ssize_t
+lcs_timeout_store (struct device *dev, const char *buf, size_t count)
+{
+ struct lcs_card *card;
+ int value;
+
+ card = (struct lcs_card *)dev->driver_data;
+
+ if (!card)
+ return 0;
+
+ sscanf(buf, "%u", &value);
+ /* TODO: sanity checks */
+ card->lancmd_timeout = value;
+
+ return count;
+
+}
+
+DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
+
+static struct attribute * lcs_attrs[] = {
+ &dev_attr_portno.attr,
+ &dev_attr_type.attr,
+ &dev_attr_lancmd_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group lcs_attr_group = {
+ .attrs = lcs_attrs,
+};
+
+/**
+ * lcs_probe_device is called on establishing a new ccwgroup_device.
+ */
+static int
+lcs_probe_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+ int ret;
+
+ if (!get_device(&ccwgdev->dev))
+ return -ENODEV;
+
+ LCS_DBF_TEXT(2, setup, "add_dev");
+ card = lcs_alloc_card();
+ if (!card) {
+ PRINT_ERR("Allocation of lcs card failed\n");
+ put_device(&ccwgdev->dev);
+ return -ENOMEM;
+ }
+ ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+ if (ret) {
+ PRINT_ERR("Creating attributes failed");
+ lcs_free_card(card);
+ put_device(&ccwgdev->dev);
+ return ret;
+ }
+ ccwgdev->dev.driver_data = card;
+ ccwgdev->cdev[0]->handler = lcs_irq;
+ ccwgdev->cdev[1]->handler = lcs_irq;
+ return 0;
+}
+
+static int
+lcs_register_netdev(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(2, setup, "regnetdv");
+ card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ if (card->dev->reg_state != NETREG_UNINITIALIZED)
+ return 0;
+ SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
+ return register_netdev(card->dev);
+}
+
+/**
+ * lcs_new_device will be called by setting the group device online.
+ */
+
+static int
+lcs_new_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+ struct net_device *dev=NULL;
+ enum lcs_dev_states recover_state;
+ int rc;
+
+ card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ if (!card)
+ return -ENODEV;
+
+ LCS_DBF_TEXT(2, setup, "newdev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ card->read.ccwdev = ccwgdev->cdev[0];
+ card->write.ccwdev = ccwgdev->cdev[1];
+
+ recover_state = card->state;
+ ccw_device_set_online(card->read.ccwdev);
+ ccw_device_set_online(card->write.ccwdev);
+
+ LCS_DBF_TEXT(3, setup, "lcsnewdv");
+
+ lcs_setup_card(card);
+ rc = lcs_detect(card);
+ if (rc) {
+ LCS_DBF_TEXT(2, setup, "dtctfail");
+ PRINT_WARN("Detection of LCS card failed with return code "
+ "%d (0x%x)\n", rc, rc);
+ lcs_stopcard(card);
+ goto out;
+ }
+ if (card->dev) {
+ LCS_DBF_TEXT(2, setup, "samedev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ goto netdev_out;
+ }
+ switch (card->lan_type) {
+#ifdef CONFIG_NET_ETHERNET
+ case LCS_FRAME_TYPE_ENET:
+ card->lan_type_trans = eth_type_trans;
+ dev = alloc_etherdev(0);
+ break;
+#endif
+#ifdef CONFIG_TR
+ case LCS_FRAME_TYPE_TR:
+ card->lan_type_trans = tr_type_trans;
+ dev = alloc_trdev(0);
+ break;
+#endif
+#ifdef CONFIG_FDDI
+ case LCS_FRAME_TYPE_FDDI:
+ card->lan_type_trans = fddi_type_trans;
+ dev = alloc_fddidev(0);
+ break;
+#endif
+ default:
+ LCS_DBF_TEXT(3, setup, "errinit");
+ PRINT_ERR("LCS: Initialization failed\n");
+ PRINT_ERR("LCS: No device found!\n");
+ goto out;
+ }
+ if (!dev)
+ goto out;
+ card->dev = dev;
+netdev_out:
+ card->dev->priv = card;
+ card->dev->open = lcs_open_device;
+ card->dev->stop = lcs_stop_device;
+ card->dev->hard_start_xmit = lcs_start_xmit;
+ card->dev->get_stats = lcs_getstats;
+ SET_MODULE_OWNER(dev);
+ if (lcs_register_netdev(ccwgdev) != 0)
+ goto out;
+ memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+#ifdef CONFIG_IP_MULTICAST
+ if (!lcs_check_multicast_support(card))
+ card->dev->set_multicast_list = lcs_set_multicast_list;
+#endif
+ netif_stop_queue(card->dev);
+ lcs_set_allowed_threads(card,0xffffffff);
+ if (recover_state == DEV_STATE_RECOVER) {
+ lcs_set_multicast_list(card->dev);
+ card->dev->flags |= IFF_UP;
+ netif_wake_queue(card->dev);
+ card->state = DEV_STATE_UP;
+ } else
+ lcs_stopcard(card);
+
+ return 0;
+out:
+
+ ccw_device_set_offline(card->read.ccwdev);
+ ccw_device_set_offline(card->write.ccwdev);
+ return -ENODEV;
+}
+
+/**
+ * lcs_shutdown_device, called when setting the group device offline.
+ */
+static int
+lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+ enum lcs_dev_states recover_state;
+ int ret;
+
+ LCS_DBF_TEXT(3, setup, "shtdndev");
+ card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ if (!card)
+ return -ENODEV;
+ lcs_set_allowed_threads(card, 0);
+ if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
+ return -ERESTARTSYS;
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ recover_state = card->state;
+
+ ret = lcs_stop_device(card->dev);
+ ret = ccw_device_set_offline(card->read.ccwdev);
+ ret = ccw_device_set_offline(card->write.ccwdev);
+ if (recover_state == DEV_STATE_UP) {
+ card->state = DEV_STATE_RECOVER;
+ }
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/**
+ * lcs_remove_device, free buffers and card
+ */
+static void
+lcs_remove_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+
+ card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ if (!card)
+ return;
+
+ PRINT_INFO("Removing lcs group device ....\n");
+ LCS_DBF_TEXT(3, setup, "remdev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ if (ccwgdev->state == CCWGROUP_ONLINE) {
+ lcs_shutdown_device(ccwgdev);
+ }
+ if (card->dev)
+ unregister_netdev(card->dev);
+ sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+ lcs_cleanup_card(card);
+ lcs_free_card(card);
+ put_device(&ccwgdev->dev);
+}
+
+/**
+ * LCS ccwgroup driver registration
+ */
+static struct ccwgroup_driver lcs_group_driver = {
+ .owner = THIS_MODULE,
+ .name = "lcs",
+ .max_slaves = 2,
+ .driver_id = 0xD3C3E2,
+ .probe = lcs_probe_device,
+ .remove = lcs_remove_device,
+ .set_online = lcs_new_device,
+ .set_offline = lcs_shutdown_device,
+};
+
+/**
+ * LCS Module/Kernel initialization function
+ */
+static int
+__init lcs_init_module(void)
+{
+ int rc;
+
+ PRINT_INFO("Loading %s\n",version);
+ rc = lcs_register_debug_facility();
+ LCS_DBF_TEXT(0, setup, "lcsinit");
+ if (rc) {
+ PRINT_ERR("Initialization failed\n");
+ return rc;
+ }
+
+ rc = register_cu3088_discipline(&lcs_group_driver);
+ if (rc) {
+ PRINT_ERR("Initialization failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+
+/**
+ * LCS module cleanup function
+ */
+static void
+__exit lcs_cleanup_module(void)
+{
+ PRINT_INFO("Terminating lcs module.\n");
+ LCS_DBF_TEXT(0, trace, "cleanup");
+ unregister_cu3088_discipline(&lcs_group_driver);
+ lcs_unregister_debug_facility();
+}
+
+module_init(lcs_init_module);
+module_exit(lcs_cleanup_module);
+
+MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
new file mode 100644
index 000000000000..a7f348ef1b08
--- /dev/null
+++ b/drivers/s390/net/lcs.h
@@ -0,0 +1,321 @@
+/*lcs.h*/
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <asm/ccwdev.h>
+
+#define VERSION_LCS_H "$Revision: 1.19 $"
+
+#define LCS_DBF_TEXT(level, name, text) \
+ do { \
+ debug_text_event(lcs_dbf_##name, level, text); \
+ } while (0)
+
+#define LCS_DBF_HEX(level,name,addr,len) \
+do { \
+ debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+#define LCS_DBF_TEXT_(level,name,text...) \
+do { \
+ sprintf(debug_buffer, text); \
+ debug_text_event(lcs_dbf_##name,level, debug_buffer);\
+} while (0)
+
+/**
+ * some more definitions for debug or output stuff
+ */
+#define PRINTK_HEADER " lcs: "
+
+/**
+ * sysfs related stuff
+ */
+#define CARD_FROM_DEV(cdev) \
+ (struct lcs_card *) \
+ ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
+/**
+ * CCW commands used in this driver
+ */
+#define LCS_CCW_WRITE 0x01
+#define LCS_CCW_READ 0x02
+#define LCS_CCW_TRANSFER 0x08
+
+/**
+ * LCS device status primitives
+ */
+#define LCS_CMD_STARTLAN 0x01
+#define LCS_CMD_STOPLAN 0x02
+#define LCS_CMD_LANSTAT 0x04
+#define LCS_CMD_STARTUP 0x07
+#define LCS_CMD_SHUTDOWN 0x08
+#define LCS_CMD_QIPASSIST 0xb2
+#define LCS_CMD_SETIPM 0xb4
+#define LCS_CMD_DELIPM 0xb5
+
+#define LCS_INITIATOR_TCPIP 0x00
+#define LCS_INITIATOR_LGW 0x01
+#define LCS_STD_CMD_SIZE 16
+#define LCS_MULTICAST_CMD_SIZE 404
+
+/**
+ * LCS IPASSIST MASKS,only used when multicast is switched on
+ */
+/* Not supported by LCS */
+#define LCS_IPASS_ARP_PROCESSING 0x0001
+#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
+#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
+#define LCS_IPASS_IP_FILTERING 0x0010
+/* Supported by lcs 3172 */
+#define LCS_IPASS_IPV6_SUPPORT 0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
+
+/**
+ * LCS sense byte definitions
+ */
+#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
+#define LCS_SENSE_EQUIPMENT_CHECK 0x10
+#define LCS_SENSE_BUS_OUT_CHECK 0x20
+#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
+#define LCS_SENSE_CMD_REJECT 0x80
+#define LCS_SENSE_RESETTING_EVENT 0x0080
+#define LCS_SENSE_DEVICE_ONLINE 0x0020
+
+/**
+ * LCS packet type definitions
+ */
+#define LCS_FRAME_TYPE_CONTROL 0
+#define LCS_FRAME_TYPE_ENET 1
+#define LCS_FRAME_TYPE_TR 2
+#define LCS_FRAME_TYPE_FDDI 7
+#define LCS_FRAME_TYPE_AUTO -1
+
+/**
+ * some more definitions,we will sort them later
+ */
+#define LCS_ILLEGAL_OFFSET 0xffff
+#define LCS_IOBUFFERSIZE 0x5000
+#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */
+#define LCS_MAC_LENGTH 6
+#define LCS_INVALID_PORT_NO -1
+#define LCS_LANCMD_TIMEOUT_DEFAULT 5
+
+/**
+ * Multicast state
+ */
+#define LCS_IPM_STATE_SET_REQUIRED 0
+#define LCS_IPM_STATE_DEL_REQUIRED 1
+#define LCS_IPM_STATE_ON_CARD 2
+
+/**
+ * LCS IP Assist declarations
+ * seems to be only used for multicast
+ */
+#define LCS_IPASS_ARP_PROCESSING 0x0001
+#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
+#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
+#define LCS_IPASS_IP_FILTERING 0x0010
+#define LCS_IPASS_IPV6_SUPPORT 0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
+
+/**
+ * LCS Buffer states
+ */
+enum lcs_buffer_states {
+ BUF_STATE_EMPTY, /* buffer is empty */
+ BUF_STATE_LOCKED, /* buffer is locked, don't touch */
+ BUF_STATE_READY, /* buffer is ready for read/write */
+ BUF_STATE_PROCESSED,
+};
+
+/**
+ * LCS Channel State Machine declarations
+ */
+enum lcs_channel_states {
+ CH_STATE_INIT,
+ CH_STATE_HALTED,
+ CH_STATE_STOPPED,
+ CH_STATE_RUNNING,
+ CH_STATE_SUSPENDED,
+ CH_STATE_CLEARED,
+};
+
+/**
+ * LCS device state machine
+ */
+enum lcs_dev_states {
+ DEV_STATE_DOWN,
+ DEV_STATE_UP,
+ DEV_STATE_RECOVER,
+};
+
+enum lcs_threads {
+ LCS_SET_MC_THREAD = 1,
+ LCS_STARTLAN_THREAD = 2,
+ LCS_STOPLAN_THREAD = 4,
+ LCS_STARTUP_THREAD = 8,
+};
+/**
+ * LCS struct declarations
+ */
+struct lcs_header {
+ __u16 offset;
+ __u8 type;
+ __u8 slot;
+} __attribute__ ((packed));
+
+struct lcs_ip_mac_pair {
+ __u32 ip_addr;
+ __u8 mac_addr[LCS_MAC_LENGTH];
+ __u8 reserved[2];
+} __attribute__ ((packed));
+
+struct lcs_ipm_list {
+ struct list_head list;
+ struct lcs_ip_mac_pair ipm;
+ __u8 ipm_state;
+};
+
+struct lcs_cmd {
+ __u16 offset;
+ __u8 type;
+ __u8 slot;
+ __u8 cmd_code;
+ __u8 initiator;
+ __u16 sequence_no;
+ __u16 return_code;
+ union {
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u16 parameter_count;
+ __u8 operator_flags[3];
+ __u8 reserved[3];
+ } lcs_std_cmd;
+ struct {
+ __u16 unused1;
+ __u16 buff_size;
+ __u8 unused2[6];
+ } lcs_startup;
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u8 unused[10];
+ __u8 mac_addr[LCS_MAC_LENGTH];
+ __u32 num_packets_deblocked;
+ __u32 num_packets_blocked;
+ __u32 num_packets_tx_on_lan;
+ __u32 num_tx_errors_detected;
+ __u32 num_tx_packets_disgarded;
+ __u32 num_packets_rx_from_lan;
+ __u32 num_rx_errors_detected;
+ __u32 num_rx_discarded_nobuffs_avail;
+ __u32 num_rx_packets_too_large;
+ } lcs_lanstat_cmd;
+#ifdef CONFIG_IP_MULTICAST
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u16 num_ip_pairs;
+ __u16 ip_assists_supported;
+ __u16 ip_assists_enabled;
+ __u16 version;
+ struct {
+ struct lcs_ip_mac_pair
+ ip_mac_pair[32];
+ __u32 response_data;
+ } lcs_ipass_ctlmsg __attribute ((packed));
+ } lcs_qipassist __attribute__ ((packed));
+#endif /*CONFIG_IP_MULTICAST */
+ } cmd __attribute__ ((packed));
+} __attribute__ ((packed));
+
+/**
+ * Forward declarations.
+ */
+struct lcs_card;
+struct lcs_channel;
+
+/**
+ * Definition of an lcs buffer.
+ */
+struct lcs_buffer {
+ enum lcs_buffer_states state;
+ void *data;
+ int count;
+ /* Callback for completion notification. */
+ void (*callback)(struct lcs_channel *, struct lcs_buffer *);
+};
+
+struct lcs_reply {
+ struct list_head list;
+ __u16 sequence_no;
+ atomic_t refcnt;
+ /* Callback for completion notification. */
+ void (*callback)(struct lcs_card *, struct lcs_cmd *);
+ wait_queue_head_t wait_q;
+ struct lcs_card *card;
+ int received;
+ int rc;
+};
+
+/**
+ * Definition of an lcs channel
+ */
+struct lcs_channel {
+ enum lcs_channel_states state;
+ struct ccw_device *ccwdev;
+ struct ccw1 ccws[LCS_NUM_BUFFS + 1];
+ wait_queue_head_t wait_q;
+ struct tasklet_struct irq_tasklet;
+ struct lcs_buffer iob[LCS_NUM_BUFFS];
+ int io_idx;
+ int buf_idx;
+};
+
+
+/**
+ * definition of the lcs card
+ */
+struct lcs_card {
+ spinlock_t lock;
+ spinlock_t ipm_lock;
+ enum lcs_dev_states state;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ unsigned short (*lan_type_trans)(struct sk_buff *skb,
+ struct net_device *dev);
+ struct lcs_channel read;
+ struct lcs_channel write;
+ struct lcs_buffer *tx_buffer;
+ int tx_emitted;
+ struct list_head lancmd_waiters;
+ int lancmd_timeout;
+
+ struct work_struct kernel_thread_starter;
+ spinlock_t mask_lock;
+ unsigned long thread_start_mask;
+ unsigned long thread_running_mask;
+ unsigned long thread_allowed_mask;
+ wait_queue_head_t wait_q;
+
+#ifdef CONFIG_IP_MULTICAST
+ struct list_head ipm_list;
+#endif
+ __u8 mac[LCS_MAC_LENGTH];
+ __u16 ip_assists_supported;
+ __u16 ip_assists_enabled;
+ __s8 lan_type;
+ __u32 pkt_seq;
+ __u16 sequence_no;
+ __s16 portno;
+ /* Some info copied from probeinfo */
+ u8 device_forced;
+ u8 max_port_no;
+ u8 hint_port_no;
+ s16 port_protocol_no;
+} __attribute__ ((aligned(8)));
+
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
new file mode 100644
index 000000000000..16e8e69afb10
--- /dev/null
+++ b/drivers/s390/net/netiucv.c
@@ -0,0 +1,2149 @@
+/*
+ * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
+ *
+ * IUCV network driver
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *
+ * Driverfs integration and all bugs therein by Cornelia Huck(cohuck@de.ibm.com)
+ *
+ * Documentation used:
+ * the source of the original IUCV driver by:
+ * Stefan Hegewald <hegewald@de.ibm.com>
+ * Hartmut Penner <hpenner@de.ibm.com>
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "iucv.h"
+#include "fsm.h"
+
+MODULE_AUTHOR
+ ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
+
+
+#define PRINTK_HEADER " iucv: " /* for debugging */
+
+static struct device_driver netiucv_driver = {
+ .name = "netiucv",
+ .bus = &iucv_bus,
+};
+
+/**
+ * Per connection profiling data
+ */
+struct connection_profile {
+ unsigned long maxmulti;
+ unsigned long maxcqueue;
+ unsigned long doios_single;
+ unsigned long doios_multi;
+ unsigned long txlen;
+ unsigned long tx_time;
+ struct timespec send_stamp;
+ unsigned long tx_pending;
+ unsigned long tx_max_pending;
+};
+
+/**
+ * Representation of one iucv connection
+ */
+struct iucv_connection {
+ struct iucv_connection *next;
+ iucv_handle_t handle;
+ __u16 pathid;
+ struct sk_buff *rx_buff;
+ struct sk_buff *tx_buff;
+ struct sk_buff_head collect_queue;
+ struct sk_buff_head commit_queue;
+ spinlock_t collect_lock;
+ int collect_len;
+ int max_buffsize;
+ fsm_timer timer;
+ fsm_instance *fsm;
+ struct net_device *netdev;
+ struct connection_profile prof;
+ char userid[9];
+};
+
+/**
+ * Linked list of all connection structs.
+ */
+static struct iucv_connection *iucv_connections;
+
+/**
+ * Representation of event-data for the
+ * connection state machine.
+ */
+struct iucv_event {
+ struct iucv_connection *conn;
+ void *data;
+};
+
+/**
+ * Private part of the network device structure
+ */
+struct netiucv_priv {
+ struct net_device_stats stats;
+ unsigned long tbusy;
+ fsm_instance *fsm;
+ struct iucv_connection *conn;
+ struct device *dev;
+};
+
+/**
+ * Link level header for a packet.
+ */
+typedef struct ll_header_t {
+ __u16 next;
+} ll_header;
+
+#define NETIUCV_HDRLEN (sizeof(ll_header))
+#define NETIUCV_BUFSIZE_MAX 32768
+#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
+#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
+#define NETIUCV_MTU_DEFAULT 9216
+#define NETIUCV_QUEUELEN_DEFAULT 50
+#define NETIUCV_TIMEOUT_5SEC 5000
+
+/**
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static __inline__ void netiucv_clear_busy(struct net_device *dev)
+{
+ clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
+ netif_wake_queue(dev);
+}
+
+static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
+}
+
+static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static __u8 iucvMagic[16] = {
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
+};
+
+/**
+ * This mask means the 16-byte IUCV "magic" and the origin userid must
+ * match exactly as specified in order to give connection_pending()
+ * control.
+ */
+static __u8 netiucv_mask[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+/**
+ * Convert an iucv userId to its printable
+ * form (strip whitespace at end).
+ *
+ * @param An iucv userId
+ *
+ * @returns The printable string (static data!!)
+ */
+static __inline__ char *
+netiucv_printname(char *name)
+{
+ static char tmp[9];
+ char *p = tmp;
+ memcpy(tmp, name, 8);
+ tmp[8] = '\0';
+ while (*p && (!isspace(*p)))
+ p++;
+ *p = '\0';
+ return tmp;
+}
+
+/**
+ * States of the interface statemachine.
+ */
+enum dev_states {
+ DEV_STATE_STOPPED,
+ DEV_STATE_STARTWAIT,
+ DEV_STATE_STOPWAIT,
+ DEV_STATE_RUNNING,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_STATES
+};
+
+static const char *dev_state_names[] = {
+ "Stopped",
+ "StartWait",
+ "StopWait",
+ "Running",
+};
+
+/**
+ * Events of the interface statemachine.
+ */
+enum dev_events {
+ DEV_EVENT_START,
+ DEV_EVENT_STOP,
+ DEV_EVENT_CONUP,
+ DEV_EVENT_CONDOWN,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_EVENTS
+};
+
+static const char *dev_event_names[] = {
+ "Start",
+ "Stop",
+ "Connection up",
+ "Connection down",
+};
+
+/**
+ * Events of the connection statemachine
+ */
+enum conn_events {
+ /**
+ * Events, representing callbacks from
+ * lowlevel iucv layer)
+ */
+ CONN_EVENT_CONN_REQ,
+ CONN_EVENT_CONN_ACK,
+ CONN_EVENT_CONN_REJ,
+ CONN_EVENT_CONN_SUS,
+ CONN_EVENT_CONN_RES,
+ CONN_EVENT_RX,
+ CONN_EVENT_TXDONE,
+
+ /**
+ * Events, representing errors return codes from
+ * calls to lowlevel iucv layer
+ */
+
+ /**
+ * Event, representing timer expiry.
+ */
+ CONN_EVENT_TIMER,
+
+ /**
+ * Events, representing commands from upper levels.
+ */
+ CONN_EVENT_START,
+ CONN_EVENT_STOP,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CONN_EVENTS,
+};
+
+static const char *conn_event_names[] = {
+ "Remote connection request",
+ "Remote connection acknowledge",
+ "Remote connection reject",
+ "Connection suspended",
+ "Connection resumed",
+ "Data received",
+ "Data sent",
+
+ "Timer",
+
+ "Start",
+ "Stop",
+};
+
+/**
+ * States of the connection statemachine.
+ */
+enum conn_states {
+ /**
+ * Connection not assigned to any device,
+ * initial state, invalid
+ */
+ CONN_STATE_INVALID,
+
+ /**
+ * Userid assigned but not operating
+ */
+ CONN_STATE_STOPPED,
+
+ /**
+ * Connection registered,
+ * no connection request sent yet,
+ * no connection request received
+ */
+ CONN_STATE_STARTWAIT,
+
+ /**
+ * Connection registered and connection request sent,
+ * no acknowledge and no connection request received yet.
+ */
+ CONN_STATE_SETUPWAIT,
+
+ /**
+ * Connection up and running idle
+ */
+ CONN_STATE_IDLE,
+
+ /**
+ * Data sent, awaiting CONN_EVENT_TXDONE
+ */
+ CONN_STATE_TX,
+
+ /**
+ * Error during registration.
+ */
+ CONN_STATE_REGERR,
+
+ /**
+ * Error during registration.
+ */
+ CONN_STATE_CONNERR,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CONN_STATES,
+};
+
+static const char *conn_state_names[] = {
+ "Invalid",
+ "Stopped",
+ "StartWait",
+ "SetupWait",
+ "Idle",
+ "TX",
+ "Terminating",
+ "Registration error",
+ "Connect error",
+};
+
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *iucv_dbf_setup = NULL;
+static debug_info_t *iucv_dbf_data = NULL;
+static debug_info_t *iucv_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+static void
+iucv_unregister_dbf_views(void)
+{
+ if (iucv_dbf_setup)
+ debug_unregister(iucv_dbf_setup);
+ if (iucv_dbf_data)
+ debug_unregister(iucv_dbf_data);
+ if (iucv_dbf_trace)
+ debug_unregister(iucv_dbf_trace);
+}
+static int
+iucv_register_dbf_views(void)
+{
+ iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
+ IUCV_DBF_SETUP_INDEX,
+ IUCV_DBF_SETUP_NR_AREAS,
+ IUCV_DBF_SETUP_LEN);
+ iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
+ IUCV_DBF_DATA_INDEX,
+ IUCV_DBF_DATA_NR_AREAS,
+ IUCV_DBF_DATA_LEN);
+ iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
+ IUCV_DBF_TRACE_INDEX,
+ IUCV_DBF_TRACE_NR_AREAS,
+ IUCV_DBF_TRACE_LEN);
+
+ if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
+ (iucv_dbf_trace == NULL)) {
+ iucv_unregister_dbf_views();
+ return -ENOMEM;
+ }
+ debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
+
+ debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
+
+ debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
+
+ return 0;
+}
+
+/**
+ * Callback-wrappers, called from lowlevel iucv layer.
+ *****************************************************************************/
+
+static void
+netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+
+ fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
+}
+
+static void
+netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
+}
+
+static void
+netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
+}
+
+static void
+netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+}
+
+static void
+netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
+}
+
+static void
+netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
+}
+
+static void
+netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = (void *)eib;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
+}
+
+static iucv_interrupt_ops_t netiucv_ops = {
+ .ConnectionPending = netiucv_callback_connreq,
+ .ConnectionComplete = netiucv_callback_connack,
+ .ConnectionSevered = netiucv_callback_connrej,
+ .ConnectionQuiesced = netiucv_callback_connsusp,
+ .ConnectionResumed = netiucv_callback_connres,
+ .MessagePending = netiucv_callback_rx,
+ .MessageComplete = netiucv_callback_txdone
+};
+
+/**
+ * Dummy NOP action for all statemachines
+ */
+static void
+fsm_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/**
+ * Actions of the connection statemachine
+ *****************************************************************************/
+
+/**
+ * Helper function for conn_action_rx()
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ *
+ * @param conn The connection where this skb has been received.
+ * @param pskb The received skb.
+ */
+//static __inline__ void
+static void
+netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
+{
+ struct net_device *dev = conn->netdev;
+ struct netiucv_priv *privptr = dev->priv;
+ __u16 offset = 0;
+
+ skb_put(pskb, NETIUCV_HDRLEN);
+ pskb->dev = dev;
+ pskb->ip_summed = CHECKSUM_NONE;
+ pskb->protocol = ntohs(ETH_P_IP);
+
+ while (1) {
+ struct sk_buff *skb;
+ ll_header *header = (ll_header *)pskb->data;
+
+ if (!header->next)
+ break;
+
+ skb_pull(pskb, NETIUCV_HDRLEN);
+ header->next -= offset;
+ offset += header->next;
+ header->next -= NETIUCV_HDRLEN;
+ if (skb_tailroom(pskb) < header->next) {
+ PRINT_WARN("%s: Illegal next field in iucv header: "
+ "%d > %d\n",
+ dev->name, header->next, skb_tailroom(pskb));
+ IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
+ header->next, skb_tailroom(pskb));
+ return;
+ }
+ skb_put(pskb, header->next);
+ pskb->mac.raw = pskb->data;
+ skb = dev_alloc_skb(pskb->len);
+ if (!skb) {
+ PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
+ dev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "Out of memory in netiucv_unpack_skb\n");
+ privptr->stats.rx_dropped++;
+ return;
+ }
+ memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
+ skb->mac.raw = skb->data;
+ skb->dev = pskb->dev;
+ skb->protocol = pskb->protocol;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ /*
+ * Since receiving is always initiated from a tasklet (in iucv.c),
+ * we must use netif_rx_ni() instead of netif_rx()
+ */
+ netif_rx_ni(skb);
+ dev->last_rx = jiffies;
+ privptr->stats.rx_packets++;
+ privptr->stats.rx_bytes += skb->len;
+ skb_pull(pskb, header->next);
+ skb_put(pskb, NETIUCV_HDRLEN);
+ }
+}
+
+static void
+conn_action_rx(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
+ struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
+
+ __u32 msglen = eib->ln1msg2.ipbfln1f;
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+
+ if (!conn->netdev) {
+ /* FRITZ: How to tell iucv LL to drop the msg? */
+ PRINT_WARN("Received data for unlinked connection\n");
+ IUCV_DBF_TEXT(data, 2,
+ "Received data for unlinked connection\n");
+ return;
+ }
+ if (msglen > conn->max_buffsize) {
+ /* FRITZ: How to tell iucv LL to drop the msg? */
+ privptr->stats.rx_dropped++;
+ PRINT_WARN("msglen %d > max_buffsize %d\n",
+ msglen, conn->max_buffsize);
+ IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
+ msglen, conn->max_buffsize);
+ return;
+ }
+ conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
+ conn->rx_buff->len = 0;
+ rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
+ conn->rx_buff->data, msglen, NULL, NULL, NULL);
+ if (rc || msglen < 5) {
+ privptr->stats.rx_errors++;
+ PRINT_WARN("iucv_receive returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+ return;
+ }
+ netiucv_unpack_skb(conn, conn->rx_buff);
+}
+
+static void
+conn_action_txdone(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
+ struct netiucv_priv *privptr = NULL;
+ /* Shut up, gcc! skb is always below 2G. */
+ __u32 single_flag = eib->ipmsgtag;
+ __u32 txbytes = 0;
+ __u32 txpackets = 0;
+ __u32 stat_maxcq = 0;
+ struct sk_buff *skb;
+ unsigned long saveflags;
+ ll_header header;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+
+ if (conn && conn->netdev && conn->netdev->priv)
+ privptr = (struct netiucv_priv *)conn->netdev->priv;
+ conn->prof.tx_pending--;
+ if (single_flag) {
+ if ((skb = skb_dequeue(&conn->commit_queue))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ if (privptr) {
+ privptr->stats.tx_packets++;
+ privptr->stats.tx_bytes +=
+ (skb->len - NETIUCV_HDRLEN
+ - NETIUCV_HDRLEN);
+ }
+ }
+ }
+ conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
+ conn->tx_buff->len = 0;
+ spin_lock_irqsave(&conn->collect_lock, saveflags);
+ while ((skb = skb_dequeue(&conn->collect_queue))) {
+ header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
+ NETIUCV_HDRLEN);
+ memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
+ txbytes += skb->len;
+ txpackets++;
+ stat_maxcq++;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+ if (conn->collect_len > conn->prof.maxmulti)
+ conn->prof.maxmulti = conn->collect_len;
+ conn->collect_len = 0;
+ spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+ if (conn->tx_buff->len) {
+ int rc;
+
+ header.next = 0;
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
+ NETIUCV_HDRLEN);
+
+ conn->prof.send_stamp = xtime;
+ rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
+ conn->tx_buff->data, conn->tx_buff->len);
+ conn->prof.doios_multi++;
+ conn->prof.txlen += conn->tx_buff->len;
+ conn->prof.tx_pending++;
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc) {
+ conn->prof.tx_pending--;
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ if (privptr)
+ privptr->stats.tx_errors += txpackets;
+ PRINT_WARN("iucv_send returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ } else {
+ if (privptr) {
+ privptr->stats.tx_packets += txpackets;
+ privptr->stats.tx_bytes += txbytes;
+ }
+ if (stat_maxcq > conn->prof.maxcqueue)
+ conn->prof.maxcqueue = stat_maxcq;
+ }
+ } else
+ fsm_newstate(fi, CONN_STATE_IDLE);
+}
+
+static void
+conn_action_connaccept(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+ int rc;
+ __u16 msglimit;
+ __u8 udata[16];
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
+ conn->handle, conn, NULL, &msglimit);
+ if (rc) {
+ PRINT_WARN("%s: IUCV accept failed with error %d\n",
+ netdev->name, rc);
+ IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
+ return;
+ }
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ conn->pathid = eib->ippathid;
+ netdev->tx_queue_len = msglimit;
+ fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void
+conn_action_connreject(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ struct net_device *netdev = conn->netdev;
+ iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
+ __u8 udata[16];
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ iucv_sever(eib->ippathid, udata);
+ if (eib->ippathid != conn->pathid) {
+ PRINT_INFO("%s: IR Connection Pending; "
+ "pathid %d does not match original pathid %d\n",
+ netdev->name, eib->ippathid, conn->pathid);
+ IUCV_DBF_TEXT_(data, 2,
+ "connreject: IR pathid %d, conn. pathid %d\n",
+ eib->ippathid, conn->pathid);
+ iucv_sever(conn->pathid, udata);
+ }
+}
+
+static void
+conn_action_connack(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ if (eib->ippathid != conn->pathid) {
+ PRINT_INFO("%s: IR Connection Complete; "
+ "pathid %d does not match original pathid %d\n",
+ netdev->name, eib->ippathid, conn->pathid);
+ IUCV_DBF_TEXT_(data, 2,
+ "connack: IR pathid %d, conn. pathid %d\n",
+ eib->ippathid, conn->pathid);
+ conn->pathid = eib->ippathid;
+ }
+ netdev->tx_queue_len = eib->ipmsglim;
+ fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void
+conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = (struct iucv_connection *)arg;
+ __u8 udata[16];
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ fsm_deltimer(&conn->timer);
+ iucv_sever(conn->pathid, udata);
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+}
+
+static void
+conn_action_connsever(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+ __u8 udata[16];
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ fsm_deltimer(&conn->timer);
+ iucv_sever(conn->pathid, udata);
+ PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "conn_action_connsever: Remote dropped connection\n");
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void
+conn_action_start(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ __u16 msglimit;
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ if (!conn->handle) {
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
+ conn->handle =
+ iucv_register_program(iucvMagic, conn->userid,
+ netiucv_mask,
+ &netiucv_ops, conn);
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ if (!conn->handle) {
+ fsm_newstate(fi, CONN_STATE_REGERR);
+ conn->handle = NULL;
+ IUCV_DBF_TEXT(setup, 2,
+ "NULL from iucv_register_program\n");
+ return;
+ }
+
+ PRINT_DEBUG("%s('%s'): registered successfully\n",
+ conn->netdev->name, conn->userid);
+ }
+
+ PRINT_DEBUG("%s('%s'): connecting ...\n",
+ conn->netdev->name, conn->userid);
+
+ /* We must set the state before calling iucv_connect because the callback
+ * handler could be called at any point after the connection request is
+ * sent */
+
+ fsm_newstate(fi, CONN_STATE_SETUPWAIT);
+ rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
+ conn->userid, iucv_host, 0, NULL, &msglimit,
+ conn->handle, conn);
+ switch (rc) {
+ case 0:
+ conn->netdev->tx_queue_len = msglimit;
+ fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+ CONN_EVENT_TIMER, conn);
+ return;
+ case 11:
+ PRINT_INFO("%s: User %s is currently not available.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ return;
+ case 12:
+ PRINT_INFO("%s: User %s is currently not ready.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ return;
+ case 13:
+ PRINT_WARN("%s: Too many IUCV connections.\n",
+ conn->netdev->name);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 14:
+ PRINT_WARN(
+ "%s: User %s has too many IUCV connections.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 15:
+ PRINT_WARN(
+ "%s: No IUCV authorization in CP directory.\n",
+ conn->netdev->name);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ default:
+ PRINT_WARN("%s: iucv_connect returned error %d\n",
+ conn->netdev->name, rc);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ }
+ IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
+ iucv_unregister_program(conn->handle);
+ conn->handle = NULL;
+}
+
+static void
+netiucv_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void
+conn_action_stop(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_STOPPED);
+ netiucv_purge_skb_queue(&conn->collect_queue);
+ if (conn->handle)
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
+ iucv_unregister_program(conn->handle);
+ conn->handle = NULL;
+ netiucv_purge_skb_queue(&conn->commit_queue);
+ fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void
+conn_action_inval(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_connection *conn = ev->conn;
+ struct net_device *netdev = conn->netdev;
+
+ PRINT_WARN("%s: Cannot connect without username\n",
+ netdev->name);
+ IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
+}
+
+static const fsm_node conn_fsm[] = {
+ { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
+ { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
+
+ { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
+
+ { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
+ { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+ { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
+ { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
+ { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
+ { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
+
+ { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
+ { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
+
+ { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
+ { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
+};
+
+static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
+
+
+/**
+ * Actions for interface - statemachine.
+ *****************************************************************************/
+
+/**
+ * Startup connection by sending CONN_EVENT_START to it.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct netiucv_priv *privptr = dev->priv;
+ struct iucv_event ev;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ ev.conn = privptr->conn;
+ fsm_newstate(fi, DEV_STATE_STARTWAIT);
+ fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
+}
+
+/**
+ * Shutdown connection by sending CONN_EVENT_STOP to it.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct netiucv_priv *privptr = dev->priv;
+ struct iucv_event ev;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ ev.conn = privptr->conn;
+
+ fsm_newstate(fi, DEV_STATE_STOPWAIT);
+ fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection is up and running.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_connup(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct netiucv_priv *privptr = dev->priv;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_STARTWAIT:
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ PRINT_INFO("%s: connected with remote side %s\n",
+ dev->name, privptr->conn->userid);
+ IUCV_DBF_TEXT(setup, 3,
+ "connection is up and running\n");
+ break;
+ case DEV_STATE_STOPWAIT:
+ PRINT_INFO(
+ "%s: got connection UP event during shutdown!\n",
+ dev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "dev_action_connup: in DEV_STATE_STOPWAIT\n");
+ break;
+ }
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection has been shutdown.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_conndown(fsm_instance *fi, int event, void *arg)
+{
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_RUNNING:
+ fsm_newstate(fi, DEV_STATE_STARTWAIT);
+ break;
+ case DEV_STATE_STOPWAIT:
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ IUCV_DBF_TEXT(setup, 3, "connection is down\n");
+ break;
+ }
+}
+
+static const fsm_node dev_fsm[] = {
+ { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
+
+ { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
+
+ { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
+
+ { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
+ { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
+};
+
+static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
+
+/**
+ * Transmit a packet.
+ * This is a helper function for netiucv_tx().
+ *
+ * @param conn Connection to be used for sending.
+ * @param skb Pointer to struct sk_buff of packet to send.
+ * The linklevel header has already been set up
+ * by netiucv_tx().
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
+ unsigned long saveflags;
+ ll_header header;
+ int rc = 0;
+
+ if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
+ int l = skb->len + NETIUCV_HDRLEN;
+
+ spin_lock_irqsave(&conn->collect_lock, saveflags);
+ if (conn->collect_len + l >
+ (conn->max_buffsize - NETIUCV_HDRLEN)) {
+ rc = -EBUSY;
+ IUCV_DBF_TEXT(data, 2,
+ "EBUSY from netiucv_transmit_skb\n");
+ } else {
+ atomic_inc(&skb->users);
+ skb_queue_tail(&conn->collect_queue, skb);
+ conn->collect_len += l;
+ }
+ spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+ } else {
+ struct sk_buff *nskb = skb;
+ /**
+ * Copy the skb to a new allocated skb in lowmem only if the
+ * data is located above 2G in memory or tailroom is < 2.
+ */
+ unsigned long hi =
+ ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
+ int copied = 0;
+ if (hi || (skb_tailroom(skb) < 2)) {
+ nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
+ NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
+ if (!nskb) {
+ PRINT_WARN("%s: Could not allocate tx_skb\n",
+ conn->netdev->name);
+ IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
+ rc = -ENOMEM;
+ return rc;
+ } else {
+ skb_reserve(nskb, NETIUCV_HDRLEN);
+ memcpy(skb_put(nskb, skb->len),
+ skb->data, skb->len);
+ }
+ copied = 1;
+ }
+ /**
+ * skb now is below 2G and has enough room. Add headers.
+ */
+ header.next = nskb->len + NETIUCV_HDRLEN;
+ memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ header.next = 0;
+ memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+
+ fsm_newstate(conn->fsm, CONN_STATE_TX);
+ conn->prof.send_stamp = xtime;
+
+ rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
+ 0, nskb->data, nskb->len);
+ /* Shut up, gcc! nskb is always below 2G. */
+ conn->prof.doios_single++;
+ conn->prof.txlen += skb->len;
+ conn->prof.tx_pending++;
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc) {
+ struct netiucv_priv *privptr;
+ fsm_newstate(conn->fsm, CONN_STATE_IDLE);
+ conn->prof.tx_pending--;
+ privptr = (struct netiucv_priv *)conn->netdev->priv;
+ if (privptr)
+ privptr->stats.tx_errors++;
+ if (copied)
+ dev_kfree_skb(nskb);
+ else {
+ /**
+ * Remove our headers. They get added
+ * again on retransmit.
+ */
+ skb_pull(skb, NETIUCV_HDRLEN);
+ skb_trim(skb, skb->len - NETIUCV_HDRLEN);
+ }
+ PRINT_WARN("iucv_send returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ } else {
+ if (copied)
+ dev_kfree_skb(skb);
+ atomic_inc(&nskb->users);
+ skb_queue_tail(&conn->commit_queue, nskb);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Interface API for upper network layers
+ *****************************************************************************/
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+netiucv_open(struct net_device *dev) {
+ fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
+ return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int
+netiucv_close(struct net_device *dev) {
+ fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
+ return 0;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * @param skb Pointer to buffer containing the packet.
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 if packet consumed, !0 if packet rejected.
+ * Note: If we return !0, then the packet is free'd by
+ * the generic network layer.
+ */
+static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int rc = 0;
+ struct netiucv_priv *privptr = dev->priv;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ /**
+ * Some sanity checks ...
+ */
+ if (skb == NULL) {
+ PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
+ IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+ privptr->stats.tx_dropped++;
+ return 0;
+ }
+ if (skb_headroom(skb) < NETIUCV_HDRLEN) {
+ PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
+ dev->name, NETIUCV_HDRLEN);
+ IUCV_DBF_TEXT(data, 2,
+ "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ return 0;
+ }
+
+ /**
+ * If connection is not running, try to restart it
+ * and throw away packet.
+ */
+ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+ fsm_event(privptr->fsm, DEV_EVENT_START, dev);
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ privptr->stats.tx_errors++;
+ privptr->stats.tx_carrier_errors++;
+ return 0;
+ }
+
+ if (netiucv_test_and_set_busy(dev)) {
+ IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+ return -EBUSY;
+ }
+ dev->trans_start = jiffies;
+ if (netiucv_transmit_skb(privptr->conn, skb))
+ rc = 1;
+ netiucv_clear_busy(dev);
+ return rc;
+}
+
+/**
+ * Returns interface statistics of a device.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return Pointer to stats struct of this interface.
+ */
+static struct net_device_stats *
+netiucv_stats (struct net_device * dev)
+{
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return &((struct netiucv_priv *)dev->priv)->stats;
+}
+
+/**
+ * Sets MTU of an interface.
+ *
+ * @param dev Pointer to interface struct.
+ * @param new_mtu The new MTU to use for this interface.
+ *
+ * @return 0 on success, -EINVAL if MTU is out of valid range.
+ * (valid range is 576 .. NETIUCV_MTU_MAX).
+ */
+static int
+netiucv_change_mtu (struct net_device * dev, int new_mtu)
+{
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
+ IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * attributes in sysfs
+ *****************************************************************************/
+
+static ssize_t
+user_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+}
+
+static ssize_t
+user_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+ struct net_device *ndev = priv->conn->netdev;
+ char *p;
+ char *tmp;
+ char username[10];
+ int i;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if (count>9) {
+ PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
+ IUCV_DBF_TEXT_(setup, 2,
+ "%d is length of username\n", (int)count);
+ return -EINVAL;
+ }
+
+ tmp = strsep((char **) &buf, "\n");
+ for (i=0, p=tmp; i<8 && *p; i++, p++) {
+ if (isalnum(*p) || (*p == '$'))
+ username[i]= *p;
+ else if (*p == '\n') {
+ /* trailing lf, grr */
+ break;
+ } else {
+ PRINT_WARN("netiucv: Invalid char %c in username!\n",
+ *p);
+ IUCV_DBF_TEXT_(setup, 2,
+ "username: invalid character %c\n",
+ *p);
+ return -EINVAL;
+ }
+ }
+ while (i<9)
+ username[i++] = ' ';
+ username[9] = '\0';
+
+ if (memcmp(username, priv->conn->userid, 8)) {
+ /* username changed */
+ if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
+ PRINT_WARN(
+ "netiucv: device %s active, connected to %s\n",
+ dev->bus_id, priv->conn->userid);
+ PRINT_WARN("netiucv: user cannot be updated\n");
+ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+ return -EBUSY;
+ }
+ }
+ memcpy(priv->conn->userid, username, 9);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(user, 0644, user_show, user_write);
+
+static ssize_t
+buffer_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+}
+
+static ssize_t
+buffer_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+ struct net_device *ndev = priv->conn->netdev;
+ char *e;
+ int bs1;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if (count >= 39)
+ return -EINVAL;
+
+ bs1 = simple_strtoul(buf, &e, 0);
+
+ if (e && (!isspace(*e))) {
+ PRINT_WARN("netiucv: Invalid character in buffer!\n");
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ return -EINVAL;
+ }
+ if (bs1 > NETIUCV_BUFSIZE_MAX) {
+ PRINT_WARN("netiucv: Given buffer size %d too large.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too large\n",
+ bs1);
+ return -EINVAL;
+ }
+ if ((ndev->flags & IFF_RUNNING) &&
+ (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
+ PRINT_WARN("netiucv: Given buffer size %d too small.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
+ return -EINVAL;
+ }
+ if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
+ PRINT_WARN("netiucv: Given buffer size %d too small.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
+ return -EINVAL;
+ }
+
+ priv->conn->max_buffsize = bs1;
+ if (!(ndev->flags & IFF_RUNNING))
+ ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
+
+ return count;
+
+}
+
+static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
+
+static ssize_t
+dev_fsm_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+}
+
+static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
+
+static ssize_t
+conn_fsm_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+}
+
+static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
+
+static ssize_t
+maxmulti_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+}
+
+static ssize_t
+maxmulti_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.maxmulti = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
+
+static ssize_t
+maxcq_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+}
+
+static ssize_t
+maxcq_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.maxcqueue = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
+
+static ssize_t
+sdoio_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+}
+
+static ssize_t
+sdoio_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.doios_single = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
+
+static ssize_t
+mdoio_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+}
+
+static ssize_t
+mdoio_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ priv->conn->prof.doios_multi = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
+
+static ssize_t
+txlen_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+}
+
+static ssize_t
+txlen_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.txlen = 0;
+ return count;
+}
+
+static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
+
+static ssize_t
+txtime_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+}
+
+static ssize_t
+txtime_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.tx_time = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
+
+static ssize_t
+txpend_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+}
+
+static ssize_t
+txpend_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.tx_pending = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
+
+static ssize_t
+txmpnd_show (struct device *dev, char *buf)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+}
+
+static ssize_t
+txmpnd_write (struct device *dev, const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ priv->conn->prof.tx_max_pending = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
+
+static struct attribute *netiucv_attrs[] = {
+ &dev_attr_buffer.attr,
+ &dev_attr_user.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_attr_group = {
+ .attrs = netiucv_attrs,
+};
+
+static struct attribute *netiucv_stat_attrs[] = {
+ &dev_attr_device_fsm_state.attr,
+ &dev_attr_connection_fsm_state.attr,
+ &dev_attr_max_tx_buffer_used.attr,
+ &dev_attr_max_chained_skbs.attr,
+ &dev_attr_tx_single_write_ops.attr,
+ &dev_attr_tx_multi_write_ops.attr,
+ &dev_attr_netto_bytes.attr,
+ &dev_attr_max_tx_io_time.attr,
+ &dev_attr_tx_pending.attr,
+ &dev_attr_tx_max_pending.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_stat_attr_group = {
+ .name = "stats",
+ .attrs = netiucv_stat_attrs,
+};
+
+static inline int
+netiucv_add_files(struct device *dev)
+{
+ int ret;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
+ if (ret)
+ return ret;
+ ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
+ if (ret)
+ sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+ return ret;
+}
+
+static inline void
+netiucv_remove_files(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
+ sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+}
+
+static int
+netiucv_register_device(struct net_device *ndev)
+{
+ struct netiucv_priv *priv = ndev->priv;
+ struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ int ret;
+
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ if (dev) {
+ memset(dev, 0, sizeof(struct device));
+ snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ /*
+ * The release function could be called after the
+ * module has been unloaded. It's _only_ task is to
+ * free the struct. Therefore, we specify kfree()
+ * directly here. (Probably a little bit obfuscating
+ * but legitime ...).
+ */
+ dev->release = (void (*)(struct device *))kfree;
+ dev->driver = &netiucv_driver;
+ } else
+ return -ENOMEM;
+
+ ret = device_register(dev);
+
+ if (ret)
+ return ret;
+ ret = netiucv_add_files(dev);
+ if (ret)
+ goto out_unreg;
+ priv->dev = dev;
+ dev->driver_data = priv;
+ return 0;
+
+out_unreg:
+ device_unregister(dev);
+ return ret;
+}
+
+static void
+netiucv_unregister_device(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ netiucv_remove_files(dev);
+ device_unregister(dev);
+}
+
+/**
+ * Allocate and initialize a new connection structure.
+ * Add it to the list of netiucv connections;
+ */
+static struct iucv_connection *
+netiucv_new_connection(struct net_device *dev, char *username)
+{
+ struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection *conn =
+ (struct iucv_connection *)
+ kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
+
+ if (conn) {
+ memset(conn, 0, sizeof(struct iucv_connection));
+ skb_queue_head_init(&conn->collect_queue);
+ skb_queue_head_init(&conn->commit_queue);
+ conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
+ conn->netdev = dev;
+
+ conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
+ GFP_KERNEL | GFP_DMA);
+ if (!conn->rx_buff) {
+ kfree(conn);
+ return NULL;
+ }
+ conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
+ GFP_KERNEL | GFP_DMA);
+ if (!conn->tx_buff) {
+ kfree_skb(conn->rx_buff);
+ kfree(conn);
+ return NULL;
+ }
+ conn->fsm = init_fsm("netiucvconn", conn_state_names,
+ conn_event_names, NR_CONN_STATES,
+ NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
+ GFP_KERNEL);
+ if (!conn->fsm) {
+ kfree_skb(conn->tx_buff);
+ kfree_skb(conn->rx_buff);
+ kfree(conn);
+ return NULL;
+ }
+ fsm_settimer(conn->fsm, &conn->timer);
+ fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+
+ if (username) {
+ memcpy(conn->userid, username, 9);
+ fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
+ }
+
+ conn->next = *clist;
+ *clist = conn;
+ }
+ return conn;
+}
+
+/**
+ * Release a connection structure and remove it from the
+ * list of netiucv connections.
+ */
+static void
+netiucv_remove_connection(struct iucv_connection *conn)
+{
+ struct iucv_connection **clist = &iucv_connections;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if (conn == NULL)
+ return;
+ while (*clist) {
+ if (*clist == conn) {
+ *clist = conn->next;
+ if (conn->handle) {
+ iucv_unregister_program(conn->handle);
+ conn->handle = NULL;
+ }
+ fsm_deltimer(&conn->timer);
+ kfree_fsm(conn->fsm);
+ kfree_skb(conn->rx_buff);
+ kfree_skb(conn->tx_buff);
+ return;
+ }
+ clist = &((*clist)->next);
+ }
+}
+
+/**
+ * Release everything of a net device.
+ */
+static void
+netiucv_free_netdevice(struct net_device *dev)
+{
+ struct netiucv_priv *privptr;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ if (!dev)
+ return;
+
+ privptr = (struct netiucv_priv *)dev->priv;
+ if (privptr) {
+ if (privptr->conn)
+ netiucv_remove_connection(privptr->conn);
+ if (privptr->fsm)
+ kfree_fsm(privptr->fsm);
+ privptr->conn = NULL; privptr->fsm = NULL;
+ /* privptr gets freed by free_netdev() */
+ }
+ free_netdev(dev);
+}
+
+/**
+ * Initialize a net device. (Called from kernel in alloc_netdev())
+ */
+static void
+netiucv_setup_netdevice(struct net_device *dev)
+{
+ memset(dev->priv, 0, sizeof(struct netiucv_priv));
+
+ dev->mtu = NETIUCV_MTU_DEFAULT;
+ dev->hard_start_xmit = netiucv_tx;
+ dev->open = netiucv_open;
+ dev->stop = netiucv_close;
+ dev->get_stats = netiucv_stats;
+ dev->change_mtu = netiucv_change_mtu;
+ dev->destructor = netiucv_free_netdevice;
+ dev->hard_header_len = NETIUCV_HDRLEN;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ SET_MODULE_OWNER(dev);
+}
+
+/**
+ * Allocate and initialize everything of a net device.
+ */
+static struct net_device *
+netiucv_init_netdevice(char *username)
+{
+ struct netiucv_priv *privptr;
+ struct net_device *dev;
+
+ dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
+ netiucv_setup_netdevice);
+ if (!dev)
+ return NULL;
+ if (dev_alloc_name(dev, dev->name) < 0) {
+ free_netdev(dev);
+ return NULL;
+ }
+
+ privptr = (struct netiucv_priv *)dev->priv;
+ privptr->fsm = init_fsm("netiucvdev", dev_state_names,
+ dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
+ dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
+ if (!privptr->fsm) {
+ free_netdev(dev);
+ return NULL;
+ }
+ privptr->conn = netiucv_new_connection(dev, username);
+ if (!privptr->conn) {
+ kfree_fsm(privptr->fsm);
+ free_netdev(dev);
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
+ return NULL;
+ }
+ fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
+
+ return dev;
+}
+
+static ssize_t
+conn_write(struct device_driver *drv, const char *buf, size_t count)
+{
+ char *p;
+ char username[10];
+ int i, ret;
+ struct net_device *dev;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if (count>9) {
+ PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+ return -EINVAL;
+ }
+
+ for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
+ if (isalnum(*p) || (*p == '$'))
+ username[i]= *p;
+ else if (*p == '\n') {
+ /* trailing lf, grr */
+ break;
+ } else {
+ PRINT_WARN("netiucv: Invalid character in username!\n");
+ IUCV_DBF_TEXT_(setup, 2,
+ "conn_write: invalid character %c\n", *p);
+ return -EINVAL;
+ }
+ }
+ while (i<9)
+ username[i++] = ' ';
+ username[9] = '\0';
+ dev = netiucv_init_netdevice(username);
+ if (!dev) {
+ PRINT_WARN(
+ "netiucv: Could not allocate network device structure "
+ "for user '%s'\n", netiucv_printname(username));
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
+ return -ENODEV;
+ }
+
+ if ((ret = netiucv_register_device(dev))) {
+ IUCV_DBF_TEXT_(setup, 2,
+ "ret %d from netiucv_register_device\n", ret);
+ goto out_free_ndev;
+ }
+
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev,
+ (struct device*)((struct netiucv_priv*)dev->priv)->dev);
+
+ if ((ret = register_netdev(dev))) {
+ netiucv_unregister_device((struct device*)
+ ((struct netiucv_priv*)dev->priv)->dev);
+ goto out_free_ndev;
+ }
+
+ PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
+
+ return count;
+
+out_free_ndev:
+ PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
+ IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
+ netiucv_free_netdevice(dev);
+ return ret;
+}
+
+DRIVER_ATTR(connection, 0200, NULL, conn_write);
+
+static ssize_t
+remove_write (struct device_driver *drv, const char *buf, size_t count)
+{
+ struct iucv_connection **clist = &iucv_connections;
+ struct net_device *ndev;
+ struct netiucv_priv *priv;
+ struct device *dev;
+ char name[IFNAMSIZ];
+ char *p;
+ int i;
+
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+
+ if (count >= IFNAMSIZ)
+ count = IFNAMSIZ-1;
+
+ for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
+ if ((*p == '\n') | (*p == ' ')) {
+ /* trailing lf, grr */
+ break;
+ } else {
+ name[i]=*p;
+ }
+ }
+ name[i] = '\0';
+
+ while (*clist) {
+ ndev = (*clist)->netdev;
+ priv = (struct netiucv_priv*)ndev->priv;
+ dev = priv->dev;
+
+ if (strncmp(name, ndev->name, count)) {
+ clist = &((*clist)->next);
+ continue;
+ }
+ if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
+ PRINT_WARN(
+ "netiucv: net device %s active with peer %s\n",
+ ndev->name, priv->conn->userid);
+ PRINT_WARN("netiucv: %s cannot be removed\n",
+ ndev->name);
+ IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
+ return -EBUSY;
+ }
+ unregister_netdev(ndev);
+ netiucv_unregister_device(dev);
+ return count;
+ }
+ PRINT_WARN("netiucv: net device %s unknown\n", name);
+ IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+ return -EINVAL;
+}
+
+DRIVER_ATTR(remove, 0200, NULL, remove_write);
+
+static void
+netiucv_banner(void)
+{
+ char vbuf[] = "$Revision: 1.63 $";
+ char *version = vbuf;
+
+ if ((version = strchr(version, ':'))) {
+ char *p = strchr(version + 1, '$');
+ if (p)
+ *p = '\0';
+ } else
+ version = " ??? ";
+ PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
+}
+
+static void __exit
+netiucv_exit(void)
+{
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ while (iucv_connections) {
+ struct net_device *ndev = iucv_connections->netdev;
+ struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
+ struct device *dev = priv->dev;
+
+ unregister_netdev(ndev);
+ netiucv_unregister_device(dev);
+ }
+
+ driver_remove_file(&netiucv_driver, &driver_attr_connection);
+ driver_remove_file(&netiucv_driver, &driver_attr_remove);
+ driver_unregister(&netiucv_driver);
+ iucv_unregister_dbf_views();
+
+ PRINT_INFO("NETIUCV driver unloaded\n");
+ return;
+}
+
+static int __init
+netiucv_init(void)
+{
+ int ret;
+
+ ret = iucv_register_dbf_views();
+ if (ret) {
+ PRINT_WARN("netiucv_init failed, "
+ "iucv_register_dbf_views rc = %d\n", ret);
+ return ret;
+ }
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ ret = driver_register(&netiucv_driver);
+ if (ret) {
+ PRINT_ERR("NETIUCV: failed to register driver.\n");
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
+ iucv_unregister_dbf_views();
+ return ret;
+ }
+
+ /* Add entry for specifying connections. */
+ ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
+ if (!ret) {
+ ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
+ netiucv_banner();
+ } else {
+ PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
+ driver_unregister(&netiucv_driver);
+ iucv_unregister_dbf_views();
+ }
+ return ret;
+}
+
+module_init(netiucv_init);
+module_exit(netiucv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
new file mode 100644
index 000000000000..a341041a6cf7
--- /dev/null
+++ b/drivers/s390/net/qeth.h
@@ -0,0 +1,1162 @@
+#ifndef __QETH_H__
+#define __QETH_H__
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+
+#include <linux/if_tr.h>
+#include <linux/trdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+
+#include <net/ipv6.h>
+#include <linux/in6.h>
+#include <net/if_inet6.h>
+#include <net/addrconf.h>
+
+
+#include <linux/bitops.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include "qeth_mpc.h"
+
+#define VERSION_QETH_H "$Revision: 1.135 $"
+
+#ifdef CONFIG_QETH_IPV6
+#define QETH_VERSION_IPV6 ":IPv6"
+#else
+#define QETH_VERSION_IPV6 ""
+#endif
+#ifdef CONFIG_QETH_VLAN
+#define QETH_VERSION_VLAN ":VLAN"
+#else
+#define QETH_VERSION_VLAN ""
+#endif
+
+/**
+ * Debug Facility stuff
+ */
+#define QETH_DBF_SETUP_NAME "qeth_setup"
+#define QETH_DBF_SETUP_LEN 8
+#define QETH_DBF_SETUP_INDEX 3
+#define QETH_DBF_SETUP_NR_AREAS 1
+#define QETH_DBF_SETUP_LEVEL 5
+
+#define QETH_DBF_MISC_NAME "qeth_misc"
+#define QETH_DBF_MISC_LEN 128
+#define QETH_DBF_MISC_INDEX 1
+#define QETH_DBF_MISC_NR_AREAS 1
+#define QETH_DBF_MISC_LEVEL 2
+
+#define QETH_DBF_DATA_NAME "qeth_data"
+#define QETH_DBF_DATA_LEN 96
+#define QETH_DBF_DATA_INDEX 3
+#define QETH_DBF_DATA_NR_AREAS 1
+#define QETH_DBF_DATA_LEVEL 2
+
+#define QETH_DBF_CONTROL_NAME "qeth_control"
+#define QETH_DBF_CONTROL_LEN 256
+#define QETH_DBF_CONTROL_INDEX 3
+#define QETH_DBF_CONTROL_NR_AREAS 2
+#define QETH_DBF_CONTROL_LEVEL 5
+
+#define QETH_DBF_TRACE_NAME "qeth_trace"
+#define QETH_DBF_TRACE_LEN 8
+#define QETH_DBF_TRACE_INDEX 2
+#define QETH_DBF_TRACE_NR_AREAS 2
+#define QETH_DBF_TRACE_LEVEL 3
+extern debug_info_t *qeth_dbf_trace;
+
+#define QETH_DBF_SENSE_NAME "qeth_sense"
+#define QETH_DBF_SENSE_LEN 64
+#define QETH_DBF_SENSE_INDEX 1
+#define QETH_DBF_SENSE_NR_AREAS 1
+#define QETH_DBF_SENSE_LEVEL 2
+
+#define QETH_DBF_QERR_NAME "qeth_qerr"
+#define QETH_DBF_QERR_LEN 8
+#define QETH_DBF_QERR_INDEX 1
+#define QETH_DBF_QERR_NR_AREAS 2
+#define QETH_DBF_QERR_LEVEL 2
+
+#define QETH_DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(qeth_dbf_##name,level,text); \
+ } while (0)
+
+#define QETH_DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(qeth_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf);
+
+#define QETH_DBF_TEXT_(name,level,text...) \
+ do { \
+ char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \
+ sprintf(dbf_txt_buf, text); \
+ debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \
+ put_cpu_var(qeth_dbf_txt_buf); \
+ } while (0)
+
+#define QETH_DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(qeth_dbf_trace, level, ##text ); \
+ debug_sprintf_event(qeth_dbf_trace, level, text ); \
+ } while (0)
+
+/**
+ * some more debug stuff
+ */
+#define PRINTK_HEADER "qeth: "
+
+#define HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+static inline void
+qeth_hex_dump(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ printk("\n");
+ printk("%02x ", *(buf + i));
+ }
+ printk("\n");
+}
+
+#define SENSE_COMMAND_REJECT_BYTE 0
+#define SENSE_COMMAND_REJECT_FLAG 0x80
+#define SENSE_RESETTING_EVENT_BYTE 1
+#define SENSE_RESETTING_EVENT_FLAG 0x80
+
+#define atomic_swap(a,b) xchg((int *)a.counter, b)
+
+/*
+ * Common IO related definitions
+ */
+extern struct device *qeth_root_dev;
+extern struct ccw_driver qeth_ccw_driver;
+extern struct ccwgroup_driver qeth_ccwgroup_driver;
+
+#define CARD_RDEV(card) card->read.ccwdev
+#define CARD_WDEV(card) card->write.ccwdev
+#define CARD_DDEV(card) card->data.ccwdev
+#define CARD_BUS_ID(card) card->gdev->dev.bus_id
+#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
+#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
+#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
+#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
+
+#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \
+ ((struct ccwgroup_device *)cdev->dev.driver_data)\
+ ->dev.driver_data;
+
+/**
+ * card stuff
+ */
+#ifdef CONFIG_QETH_PERF_STATS
+struct qeth_perf_stats {
+ unsigned int bufs_rec;
+ unsigned int bufs_sent;
+
+ unsigned int skbs_sent_pack;
+ unsigned int bufs_sent_pack;
+
+ unsigned int sc_dp_p;
+ unsigned int sc_p_dp;
+ /* qdio_input_handler: number of times called, time spent in */
+ __u64 inbound_start_time;
+ unsigned int inbound_cnt;
+ unsigned int inbound_time;
+ /* qeth_send_packet: number of times called, time spent in */
+ __u64 outbound_start_time;
+ unsigned int outbound_cnt;
+ unsigned int outbound_time;
+ /* qdio_output_handler: number of times called, time spent in */
+ __u64 outbound_handler_start_time;
+ unsigned int outbound_handler_cnt;
+ unsigned int outbound_handler_time;
+ /* number of calls to and time spent in do_QDIO for inbound queue */
+ __u64 inbound_do_qdio_start_time;
+ unsigned int inbound_do_qdio_cnt;
+ unsigned int inbound_do_qdio_time;
+ /* number of calls to and time spent in do_QDIO for outbound queues */
+ __u64 outbound_do_qdio_start_time;
+ unsigned int outbound_do_qdio_cnt;
+ unsigned int outbound_do_qdio_time;
+ /* eddp data */
+ unsigned int large_send_bytes;
+ unsigned int large_send_cnt;
+ unsigned int sg_skbs_sent;
+ unsigned int sg_frags_sent;
+};
+#endif /* CONFIG_QETH_PERF_STATS */
+
+/* Routing stuff */
+struct qeth_routing_info {
+ enum qeth_routing_types type;
+};
+
+/* IPA stuff */
+struct qeth_ipa_info {
+ __u32 supported_funcs;
+ __u32 enabled_funcs;
+};
+
+static inline int
+qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
+{
+ return (ipa->supported_funcs & func);
+}
+
+static inline int
+qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
+{
+ return (ipa->supported_funcs & ipa->enabled_funcs & func);
+}
+
+#define qeth_adp_supported(c,f) \
+ qeth_is_ipa_supported(&c->options.adp, f)
+#define qeth_adp_enabled(c,f) \
+ qeth_is_ipa_enabled(&c->options.adp, f)
+#define qeth_is_supported(c,f) \
+ qeth_is_ipa_supported(&c->options.ipa4, f)
+#define qeth_is_enabled(c,f) \
+ qeth_is_ipa_enabled(&c->options.ipa4, f)
+#ifdef CONFIG_QETH_IPV6
+#define qeth_is_supported6(c,f) \
+ qeth_is_ipa_supported(&c->options.ipa6, f)
+#define qeth_is_enabled6(c,f) \
+ qeth_is_ipa_enabled(&c->options.ipa6, f)
+#else /* CONFIG_QETH_IPV6 */
+#define qeth_is_supported6(c,f) 0
+#define qeth_is_enabled6(c,f) 0
+#endif /* CONFIG_QETH_IPV6 */
+#define qeth_is_ipafunc_supported(c,prot,f) \
+ (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f)
+#define qeth_is_ipafunc_enabled(c,prot,f) \
+ (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f)
+
+
+#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
+#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
+#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
+#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
+
+#define QETH_MODELLIST_ARRAY \
+ {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \
+ QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
+ QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
+ QETH_MAX_QUEUES,0}, \
+ {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \
+ QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
+ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
+ QETH_MAX_QUEUES,0x103}, \
+ {0,0,0,0,0,0,0,0,0}}
+
+#define QETH_REAL_CARD 1
+#define QETH_VLAN_CARD 2
+#define QETH_BUFSIZE 4096
+
+/**
+ * some more defs
+ */
+#define IF_NAME_LEN 16
+#define QETH_TX_TIMEOUT 100 * HZ
+#define QETH_HEADER_SIZE 32
+#define MAX_PORTNO 15
+#define QETH_FAKE_LL_LEN ETH_HLEN
+#define QETH_FAKE_LL_V6_ADDR_POS 24
+
+/*IPv6 address autoconfiguration stuff*/
+#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
+#define UNIQUE_ID_NOT_BY_CARD 0x10000
+
+/*****************************************************************************/
+/* QDIO queue and buffer handling */
+/*****************************************************************************/
+#define QETH_MAX_QUEUES 4
+#define QETH_IN_BUF_SIZE_DEFAULT 65536
+#define QETH_IN_BUF_COUNT_DEFAULT 16
+#define QETH_IN_BUF_COUNT_MIN 8
+#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
+#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
+ ((card)->qdio.in_buf_pool.buf_count / 2)
+
+/* buffers we have to be behind before we get a PCI */
+#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
+/*enqueued free buffers left before we get a PCI*/
+#define QETH_PCI_THRESHOLD_B(card) 0
+/*not used unless the microcode gets patched*/
+#define QETH_PCI_TIMER_VALUE(card) 3
+
+#define QETH_MIN_INPUT_THRESHOLD 1
+#define QETH_MAX_INPUT_THRESHOLD 500
+#define QETH_MIN_OUTPUT_THRESHOLD 1
+#define QETH_MAX_OUTPUT_THRESHOLD 300
+
+/* priority queing */
+#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
+#define QETH_DEFAULT_QUEUE 2
+#define QETH_NO_PRIO_QUEUEING 0
+#define QETH_PRIO_Q_ING_PREC 1
+#define QETH_PRIO_Q_ING_TOS 2
+#define IP_TOS_LOWDELAY 0x10
+#define IP_TOS_HIGHTHROUGHPUT 0x08
+#define IP_TOS_HIGHRELIABILITY 0x04
+#define IP_TOS_NOTIMPORTANT 0x02
+
+/* Packing */
+#define QETH_LOW_WATERMARK_PACK 2
+#define QETH_HIGH_WATERMARK_PACK 5
+#define QETH_WATERMARK_PACK_FUZZ 1
+
+#define QETH_IP_HEADER_SIZE 40
+
+struct qeth_hdr_layer3 {
+ __u8 id;
+ __u8 flags;
+ __u16 inbound_checksum; /*TSO:__u16 seqno */
+ __u32 token; /*TSO: __u32 reserved */
+ __u16 length;
+ __u8 vlan_prio;
+ __u8 ext_flags;
+ __u16 vlan_id;
+ __u16 frame_offset;
+ __u8 dest_addr[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_layer2 {
+ __u8 id;
+ __u8 flags[3];
+ __u8 port_no;
+ __u8 hdr_length;
+ __u16 pkt_length;
+ __u16 seq_no;
+ __u16 vlan_id;
+ __u32 reserved;
+ __u8 reserved2[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr {
+ union {
+ struct qeth_hdr_layer2 l2;
+ struct qeth_hdr_layer3 l3;
+ } hdr;
+} __attribute__ ((packed));
+
+
+/* flags for qeth_hdr.flags */
+#define QETH_HDR_PASSTHRU 0x10
+#define QETH_HDR_IPV6 0x80
+#define QETH_HDR_CAST_MASK 0x07
+enum qeth_cast_flags {
+ QETH_CAST_UNICAST = 0x06,
+ QETH_CAST_MULTICAST = 0x04,
+ QETH_CAST_BROADCAST = 0x05,
+ QETH_CAST_ANYCAST = 0x07,
+ QETH_CAST_NOCAST = 0x00,
+};
+
+enum qeth_layer2_frame_flags {
+ QETH_LAYER2_FLAG_MULTICAST = 0x01,
+ QETH_LAYER2_FLAG_BROADCAST = 0x02,
+ QETH_LAYER2_FLAG_UNICAST = 0x04,
+ QETH_LAYER2_FLAG_VLAN = 0x10,
+};
+
+enum qeth_header_ids {
+ QETH_HEADER_TYPE_LAYER3 = 0x01,
+ QETH_HEADER_TYPE_LAYER2 = 0x02,
+ QETH_HEADER_TYPE_TSO = 0x03,
+};
+/* flags for qeth_hdr.ext_flags */
+#define QETH_HDR_EXT_VLAN_FRAME 0x01
+#define QETH_HDR_EXT_TOKEN_ID 0x02
+#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
+#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
+#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
+#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
+#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
+
+static inline int
+qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+ return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
+}
+
+enum qeth_qdio_buffer_states {
+ /*
+ * inbound: read out by driver; owned by hardware in order to be filled
+ * outbound: owned by driver in order to be filled
+ */
+ QETH_QDIO_BUF_EMPTY,
+ /*
+ * inbound: filled by hardware; owned by driver in order to be read out
+ * outbound: filled by driver; owned by hardware in order to be sent
+ */
+ QETH_QDIO_BUF_PRIMED,
+};
+
+enum qeth_qdio_info_states {
+ QETH_QDIO_UNINITIALIZED,
+ QETH_QDIO_ALLOCATED,
+ QETH_QDIO_ESTABLISHED,
+};
+
+struct qeth_buffer_pool_entry {
+ struct list_head list;
+ struct list_head init_list;
+ void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+};
+
+struct qeth_qdio_buffer_pool {
+ struct list_head entry_list;
+ int buf_count;
+};
+
+struct qeth_qdio_buffer {
+ struct qdio_buffer *buffer;
+ volatile enum qeth_qdio_buffer_states state;
+ /* the buffer pool entry currently associated to this buffer */
+ struct qeth_buffer_pool_entry *pool_entry;
+};
+
+struct qeth_qdio_q {
+ struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+ /*
+ * buf_to_init means "buffer must be initialized by driver and must
+ * be made available for hardware" -> state is set to EMPTY
+ */
+ volatile int next_buf_to_init;
+} __attribute__ ((aligned(256)));
+
+/* possible types of qeth large_send support */
+enum qeth_large_send_types {
+ QETH_LARGE_SEND_NO,
+ QETH_LARGE_SEND_EDDP,
+ QETH_LARGE_SEND_TSO,
+};
+
+struct qeth_qdio_out_buffer {
+ struct qdio_buffer *buffer;
+ atomic_t state;
+ volatile int next_element_to_fill;
+ struct sk_buff_head skb_list;
+ struct list_head ctx_list;
+};
+
+struct qeth_card;
+
+enum qeth_out_q_states {
+ QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ QETH_OUT_Q_LOCKED_FLUSH,
+};
+
+struct qeth_qdio_out_q {
+ struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+ int queue_no;
+ struct qeth_card *card;
+ atomic_t state;
+ volatile int do_pack;
+ /*
+ * index of buffer to be filled by driver; state EMPTY or PACKING
+ */
+ volatile int next_buf_to_fill;
+ /*
+ * number of buffers that are currently filled (PRIMED)
+ * -> these buffers are hardware-owned
+ */
+ atomic_t used_buffers;
+ /* indicates whether PCI flag must be set (or if one is outstanding) */
+ atomic_t set_pci_flags_count;
+} __attribute__ ((aligned(256)));
+
+struct qeth_qdio_info {
+ volatile enum qeth_qdio_info_states state;
+ /* input */
+ struct qeth_qdio_q *in_q;
+ struct qeth_qdio_buffer_pool in_buf_pool;
+ struct qeth_qdio_buffer_pool init_pool;
+ int in_buf_size;
+
+ /* output */
+ int no_out_queues;
+ struct qeth_qdio_out_q **out_qs;
+
+ /* priority queueing */
+ int do_prio_queueing;
+ int default_out_queue;
+};
+
+enum qeth_send_errors {
+ QETH_SEND_ERROR_NONE,
+ QETH_SEND_ERROR_LINK_FAILURE,
+ QETH_SEND_ERROR_RETRY,
+ QETH_SEND_ERROR_KICK_IT,
+};
+
+#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
+#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
+/* tr mc mac is longer, but that will be enough to detect mc frames */
+#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
+#define QETH_TR_MAC_C 0x0300 /* canonical */
+
+#define DEFAULT_ADD_HHLEN 0
+#define MAX_ADD_HHLEN 1024
+
+/**
+ * buffer stuff for read channel
+ */
+#define QETH_CMD_BUFFER_NO 8
+
+/**
+ * channel state machine
+ */
+enum qeth_channel_states {
+ CH_STATE_UP,
+ CH_STATE_DOWN,
+ CH_STATE_ACTIVATING,
+ CH_STATE_HALTED,
+ CH_STATE_STOPPED,
+};
+/**
+ * card state machine
+ */
+enum qeth_card_states {
+ CARD_STATE_DOWN,
+ CARD_STATE_HARDSETUP,
+ CARD_STATE_SOFTSETUP,
+ CARD_STATE_UP,
+ CARD_STATE_RECOVER,
+};
+
+/**
+ * Protocol versions
+ */
+enum qeth_prot_versions {
+ QETH_PROT_SNA = 0x0001,
+ QETH_PROT_IPV4 = 0x0004,
+ QETH_PROT_IPV6 = 0x0006,
+};
+
+enum qeth_ip_types {
+ QETH_IP_TYPE_NORMAL,
+ QETH_IP_TYPE_VIPA,
+ QETH_IP_TYPE_RXIP,
+ QETH_IP_TYPE_DEL_ALL_MC,
+};
+
+enum qeth_cmd_buffer_state {
+ BUF_STATE_FREE,
+ BUF_STATE_LOCKED,
+ BUF_STATE_PROCESSED,
+};
+/**
+ * IP address and multicast list
+ */
+struct qeth_ipaddr {
+ struct list_head entry;
+ enum qeth_ip_types type;
+ enum qeth_ipa_setdelip_flags set_flags;
+ enum qeth_ipa_setdelip_flags del_flags;
+ int is_multicast;
+ volatile int users;
+ enum qeth_prot_versions proto;
+ unsigned char mac[OSA_ADDR_LEN];
+ union {
+ struct {
+ unsigned int addr;
+ unsigned int mask;
+ } a4;
+ struct {
+ struct in6_addr addr;
+ unsigned int pfxlen;
+ } a6;
+ } u;
+};
+
+struct qeth_ipato_entry {
+ struct list_head entry;
+ enum qeth_prot_versions proto;
+ char addr[16];
+ int mask_bits;
+};
+
+struct qeth_ipato {
+ int enabled;
+ int invert4;
+ int invert6;
+ struct list_head entries;
+};
+
+struct qeth_channel;
+
+struct qeth_cmd_buffer {
+ enum qeth_cmd_buffer_state state;
+ struct qeth_channel *channel;
+ unsigned char *data;
+ int rc;
+ void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+};
+
+
+/**
+ * definition of a qeth channel, used for read and write
+ */
+struct qeth_channel {
+ enum qeth_channel_states state;
+ struct ccw1 ccw;
+ spinlock_t iob_lock;
+ wait_queue_head_t wait_q;
+ struct tasklet_struct irq_tasklet;
+ struct ccw_device *ccwdev;
+/*command buffer for control data*/
+ struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
+ atomic_t irq_pending;
+ volatile int io_buf_no;
+ volatile int buf_no;
+};
+
+/**
+ * OSA card related definitions
+ */
+struct qeth_token {
+ __u32 issuer_rm_w;
+ __u32 issuer_rm_r;
+ __u32 cm_filter_w;
+ __u32 cm_filter_r;
+ __u32 cm_connection_w;
+ __u32 cm_connection_r;
+ __u32 ulp_filter_w;
+ __u32 ulp_filter_r;
+ __u32 ulp_connection_w;
+ __u32 ulp_connection_r;
+};
+
+struct qeth_seqno {
+ __u32 trans_hdr;
+ __u32 pdu_hdr;
+ __u32 pdu_hdr_ack;
+ __u16 ipa;
+};
+
+struct qeth_reply {
+ struct list_head list;
+ wait_queue_head_t wait_q;
+ int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
+ u32 seqno;
+ unsigned long offset;
+ int received;
+ int rc;
+ void *param;
+ struct qeth_card *card;
+ atomic_t refcnt;
+};
+
+#define QETH_BROADCAST_WITH_ECHO 1
+#define QETH_BROADCAST_WITHOUT_ECHO 2
+
+struct qeth_card_blkt {
+ int time_total;
+ int inter_packet;
+ int inter_packet_jumbo;
+};
+
+
+
+struct qeth_card_info {
+ unsigned short unit_addr2;
+ unsigned short cula;
+ unsigned short chpid;
+ __u16 func_level;
+ char mcl_level[QETH_MCL_LENGTH + 1];
+ int guestlan;
+ int layer2_mac_registered;
+ int portname_required;
+ int portno;
+ char portname[9];
+ enum qeth_card_types type;
+ enum qeth_link_types link_type;
+ int is_multicast_different;
+ int initial_mtu;
+ int max_mtu;
+ int broadcast_capable;
+ int unique_id;
+ struct qeth_card_blkt blkt;
+ __u32 csum_mask;
+};
+
+struct qeth_card_options {
+ struct qeth_routing_info route4;
+ struct qeth_ipa_info ipa4;
+ struct qeth_ipa_info adp; /*Adapter parameters*/
+#ifdef CONFIG_QETH_IPV6
+ struct qeth_routing_info route6;
+ struct qeth_ipa_info ipa6;
+#endif /* QETH_IPV6 */
+ enum qeth_checksum_types checksum_type;
+ int broadcast_mode;
+ int macaddr_mode;
+ int fake_broadcast;
+ int add_hhlen;
+ int fake_ll;
+ int layer2;
+ enum qeth_large_send_types large_send;
+};
+
+/*
+ * thread bits for qeth_card thread masks
+ */
+enum qeth_threads {
+ QETH_SET_IP_THREAD = 1,
+ QETH_RECOVER_THREAD = 2,
+};
+
+struct qeth_card {
+ struct list_head list;
+ enum qeth_card_states state;
+ int lan_online;
+ spinlock_t lock;
+/*hardware and sysfs stuff*/
+ struct ccwgroup_device *gdev;
+ struct qeth_channel read;
+ struct qeth_channel write;
+ struct qeth_channel data;
+
+ struct net_device *dev;
+ struct net_device_stats stats;
+
+ struct qeth_card_info info;
+ struct qeth_token token;
+ struct qeth_seqno seqno;
+ struct qeth_card_options options;
+
+ wait_queue_head_t wait_q;
+#ifdef CONFIG_QETH_VLAN
+ spinlock_t vlanlock;
+ struct vlan_group *vlangrp;
+#endif
+ struct work_struct kernel_thread_starter;
+ spinlock_t thread_mask_lock;
+ volatile unsigned long thread_start_mask;
+ volatile unsigned long thread_allowed_mask;
+ volatile unsigned long thread_running_mask;
+ spinlock_t ip_lock;
+ struct list_head ip_list;
+ struct list_head *ip_tbd_list;
+ struct qeth_ipato ipato;
+ struct list_head cmd_waiter_list;
+ /* QDIO buffer handling */
+ struct qeth_qdio_info qdio;
+#ifdef CONFIG_QETH_PERF_STATS
+ struct qeth_perf_stats perf_stats;
+#endif /* CONFIG_QETH_PERF_STATS */
+ int use_hard_stop;
+ int (*orig_hard_header)(struct sk_buff *,struct net_device *,
+ unsigned short,void *,void *,unsigned);
+};
+
+struct qeth_card_list_struct {
+ struct list_head list;
+ rwlock_t rwlock;
+};
+
+extern struct qeth_card_list_struct qeth_card_list;
+
+/*notifier list */
+struct qeth_notify_list_struct {
+ struct list_head list;
+ struct task_struct *task;
+ int signum;
+};
+extern spinlock_t qeth_notify_lock;
+extern struct list_head qeth_notify_list;
+
+/*some helper functions*/
+
+#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+
+inline static __u8
+qeth_get_ipa_adp_type(enum qeth_link_types link_type)
+{
+ switch (link_type) {
+ case QETH_LINK_TYPE_HSTR:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+inline static int
+qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
+{
+ struct sk_buff *new_skb = NULL;
+
+ if (skb_headroom(*skb) < size){
+ new_skb = skb_realloc_headroom(*skb, size);
+ if (!new_skb) {
+ PRINT_ERR("qeth_prepare_skb: could "
+ "not realloc headroom for qeth_hdr "
+ "on interface %s", QETH_CARD_IFNAME(card));
+ return -ENOMEM;
+ }
+ *skb = new_skb;
+ }
+ return 0;
+}
+static inline struct sk_buff *
+qeth_pskb_unshare(struct sk_buff *skb, int pri)
+{
+ struct sk_buff *nskb;
+ if (!skb_cloned(skb))
+ return skb;
+ nskb = skb_copy(skb, pri);
+ kfree_skb(skb); /* free our shared copy */
+ return nskb;
+}
+
+
+inline static void *
+qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
+{
+ void *hdr;
+
+ hdr = (void *) skb_push(*skb, size);
+ /*
+ * sanity check, the Linux memory allocation scheme should
+ * never present us cases like this one (the qdio header size plus
+ * the first 40 bytes of the paket cross a 4k boundary)
+ */
+ if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
+ (((unsigned long) hdr + size +
+ QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
+ PRINT_ERR("qeth_prepare_skb: misaligned "
+ "packet on interface %s. Discarded.",
+ QETH_CARD_IFNAME(card));
+ return NULL;
+ }
+ return hdr;
+}
+
+inline static int
+qeth_get_hlen(__u8 link_type)
+{
+#ifdef CONFIG_QETH_IPV6
+ switch (link_type) {
+ case QETH_LINK_TYPE_HSTR:
+ case QETH_LINK_TYPE_LANE_TR:
+ return sizeof(struct qeth_hdr) + TR_HLEN;
+ default:
+#ifdef CONFIG_QETH_VLAN
+ return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN;
+#else
+ return sizeof(struct qeth_hdr) + ETH_HLEN;
+#endif
+ }
+#else /* CONFIG_QETH_IPV6 */
+#ifdef CONFIG_QETH_VLAN
+ return sizeof(struct qeth_hdr) + VLAN_HLEN;
+#else
+ return sizeof(struct qeth_hdr);
+#endif
+#endif /* CONFIG_QETH_IPV6 */
+}
+
+inline static unsigned short
+qeth_get_netdev_flags(struct qeth_card *card)
+{
+ if (card->options.layer2)
+ return 0;
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ return IFF_NOARP;
+#ifdef CONFIG_QETH_IPV6
+ default:
+ return 0;
+#else
+ default:
+ return IFF_NOARP;
+#endif
+ }
+}
+
+inline static int
+qeth_get_initial_mtu_for_card(struct qeth_card * card)
+{
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_UNKNOWN:
+ return 1500;
+ case QETH_CARD_TYPE_IQD:
+ return card->info.max_mtu;
+ case QETH_CARD_TYPE_OSAE:
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_HSTR:
+ case QETH_LINK_TYPE_LANE_TR:
+ return 2000;
+ default:
+ return 1492;
+ }
+ default:
+ return 1500;
+ }
+}
+
+inline static int
+qeth_get_max_mtu_for_card(int cardtype)
+{
+ switch (cardtype) {
+ case QETH_CARD_TYPE_UNKNOWN:
+ return 61440;
+ case QETH_CARD_TYPE_OSAE:
+ return 61440;
+ case QETH_CARD_TYPE_IQD:
+ return 57344;
+ default:
+ return 1500;
+ }
+}
+
+inline static int
+qeth_get_mtu_out_of_mpc(int cardtype)
+{
+ switch (cardtype) {
+ case QETH_CARD_TYPE_IQD:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+inline static int
+qeth_get_mtu_outof_framesize(int framesize)
+{
+ switch (framesize) {
+ case 0x4000:
+ return 8192;
+ case 0x6000:
+ return 16384;
+ case 0xa000:
+ return 32768;
+ case 0xffff:
+ return 57344;
+ default:
+ return 0;
+ }
+}
+
+inline static int
+qeth_mtu_is_valid(struct qeth_card * card, int mtu)
+{
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSAE:
+ return ((mtu >= 576) && (mtu <= 61440));
+ case QETH_CARD_TYPE_IQD:
+ return ((mtu >= 576) &&
+ (mtu <= card->info.max_mtu + 4096 - 32));
+ case QETH_CARD_TYPE_UNKNOWN:
+ default:
+ return 1;
+ }
+}
+
+inline static int
+qeth_get_arphdr_type(int cardtype, int linktype)
+{
+ switch (cardtype) {
+ case QETH_CARD_TYPE_OSAE:
+ switch (linktype) {
+ case QETH_LINK_TYPE_LANE_TR:
+ case QETH_LINK_TYPE_HSTR:
+ return ARPHRD_IEEE802_TR;
+ default:
+ return ARPHRD_ETHER;
+ }
+ case QETH_CARD_TYPE_IQD:
+ default:
+ return ARPHRD_ETHER;
+ }
+}
+
+#ifdef CONFIG_QETH_PERF_STATS
+inline static int
+qeth_get_micros(void)
+{
+ return (int) (get_clock() >> 12);
+}
+#endif
+
+static inline int
+qeth_get_qdio_q_format(struct qeth_card *card)
+{
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static inline void
+qeth_ipaddr4_to_string(const __u8 *addr, char *buf)
+{
+ sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
+}
+
+static inline int
+qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
+{
+ const char *start, *end;
+ char abuf[4];
+ char *tmp;
+ int len;
+ int i;
+
+ start = buf;
+ for (i = 0; i < 3; i++) {
+ if (!(end = strchr(start, '.')))
+ return -EINVAL;
+ len = end - start;
+ memset(abuf, 0, 4);
+ strncpy(abuf, start, len);
+ addr[i] = simple_strtoul(abuf, &tmp, 10);
+ start = end + 1;
+ }
+ memset(abuf, 0, 4);
+ strcpy(abuf, start);
+ addr[3] = simple_strtoul(abuf, &tmp, 10);
+ return 0;
+}
+
+static inline void
+qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
+{
+ sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+ ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
+ addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], addr[6], addr[7],
+ addr[8], addr[9], addr[10], addr[11],
+ addr[12], addr[13], addr[14], addr[15]);
+}
+
+static inline int
+qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
+{
+ const char *start, *end;
+ u16 *tmp_addr;
+ char abuf[5];
+ char *tmp;
+ int len;
+ int i;
+
+ tmp_addr = (u16 *)addr;
+ start = buf;
+ for (i = 0; i < 7; i++) {
+ if (!(end = strchr(start, ':')))
+ return -EINVAL;
+ len = end - start;
+ memset(abuf, 0, 5);
+ strncpy(abuf, start, len);
+ tmp_addr[i] = simple_strtoul(abuf, &tmp, 16);
+ start = end + 1;
+ }
+ memset(abuf, 0, 5);
+ strcpy(abuf, start);
+ tmp_addr[7] = simple_strtoul(abuf, &tmp, 16);
+ return 0;
+}
+
+static inline void
+qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
+ char *buf)
+{
+ if (proto == QETH_PROT_IPV4)
+ return qeth_ipaddr4_to_string(addr, buf);
+ else if (proto == QETH_PROT_IPV6)
+ return qeth_ipaddr6_to_string(addr, buf);
+}
+
+static inline int
+qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
+ __u8 *addr)
+{
+ if (proto == QETH_PROT_IPV4)
+ return qeth_string_to_ipaddr4(buf, addr);
+ else if (proto == QETH_PROT_IPV6)
+ return qeth_string_to_ipaddr6(buf, addr);
+ else
+ return -EINVAL;
+}
+
+extern int
+qeth_setrouting_v4(struct qeth_card *);
+extern int
+qeth_setrouting_v6(struct qeth_card *);
+
+extern int
+qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
+
+extern void
+qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int);
+
+extern int
+qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+
+extern void
+qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+
+extern int
+qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+
+extern void
+qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+
+extern int
+qeth_notifier_register(struct task_struct *, int );
+
+extern int
+qeth_notifier_unregister(struct task_struct * );
+
+extern void
+qeth_schedule_recovery(struct qeth_card *);
+
+extern int
+qeth_realloc_buffer_pool(struct qeth_card *, int);
+
+extern int
+qeth_set_large_send(struct qeth_card *);
+
+extern void
+qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
+ struct sk_buff *, int, int);
+extern void
+qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
+
+#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
new file mode 100644
index 000000000000..7ee1c06ed68a
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.c
@@ -0,0 +1,643 @@
+/*
+ *
+ * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
+ *
+ * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ * Author(s): Thomas Spatzier <tspat@de.ibm.com>
+ *
+ * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/kernel.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#include <linux/skbuff.h>
+
+#include <net/ip.h>
+
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_eddp.h"
+
+int
+qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
+ struct qeth_eddp_context *ctx)
+{
+ int index = queue->next_buf_to_fill;
+ int elements_needed = ctx->num_elements;
+ int elements_in_buffer;
+ int skbs_in_buffer;
+ int buffers_needed = 0;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcbfc");
+ while(elements_needed > 0) {
+ buffers_needed++;
+ if (atomic_read(&queue->bufs[index].state) !=
+ QETH_QDIO_BUF_EMPTY)
+ return -EBUSY;
+
+ elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
+ queue->bufs[index].next_element_to_fill;
+ skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
+ elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
+ index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ }
+ return buffers_needed;
+}
+
+static inline void
+qeth_eddp_free_context(struct qeth_eddp_context *ctx)
+{
+ int i;
+
+ QETH_DBF_TEXT(trace, 5, "eddpfctx");
+ for (i = 0; i < ctx->num_pages; ++i)
+ free_page((unsigned long)ctx->pages[i]);
+ kfree(ctx->pages);
+ if (ctx->elements != NULL)
+ kfree(ctx->elements);
+ kfree(ctx);
+}
+
+
+static inline void
+qeth_eddp_get_context(struct qeth_eddp_context *ctx)
+{
+ atomic_inc(&ctx->refcnt);
+}
+
+void
+qeth_eddp_put_context(struct qeth_eddp_context *ctx)
+{
+ if (atomic_dec_return(&ctx->refcnt) == 0)
+ qeth_eddp_free_context(ctx);
+}
+
+void
+qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
+{
+ struct qeth_eddp_context_reference *ref;
+
+ QETH_DBF_TEXT(trace, 6, "eddprctx");
+ while (!list_empty(&buf->ctx_list)){
+ ref = list_entry(buf->ctx_list.next,
+ struct qeth_eddp_context_reference, list);
+ qeth_eddp_put_context(ref->ctx);
+ list_del(&ref->list);
+ kfree(ref);
+ }
+}
+
+static inline int
+qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
+ struct qeth_eddp_context *ctx)
+{
+ struct qeth_eddp_context_reference *ref;
+
+ QETH_DBF_TEXT(trace, 6, "eddprfcx");
+ ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
+ if (ref == NULL)
+ return -ENOMEM;
+ qeth_eddp_get_context(ctx);
+ ref->ctx = ctx;
+ list_add_tail(&ref->list, &buf->ctx_list);
+ return 0;
+}
+
+int
+qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_eddp_context *ctx,
+ int index)
+{
+ struct qeth_qdio_out_buffer *buf = NULL;
+ struct qdio_buffer *buffer;
+ int elements = ctx->num_elements;
+ int element = 0;
+ int flush_cnt = 0;
+ int must_refcnt = 1;
+ int i;
+
+ QETH_DBF_TEXT(trace, 5, "eddpfibu");
+ while (elements > 0) {
+ buf = &queue->bufs[index];
+ if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
+ /* normally this should not happen since we checked for
+ * available elements in qeth_check_elements_for_context
+ */
+ if (element == 0)
+ return -EBUSY;
+ else {
+ PRINT_WARN("could only partially fill eddp "
+ "buffer!\n");
+ goto out;
+ }
+ }
+ /* check if the whole next skb fits into current buffer */
+ if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
+ buf->next_element_to_fill)
+ < ctx->elements_per_skb){
+ /* no -> go to next buffer */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ flush_cnt++;
+ /* new buffer, so we have to add ctx to buffer'ctx_list
+ * and increment ctx's refcnt */
+ must_refcnt = 1;
+ continue;
+ }
+ if (must_refcnt){
+ must_refcnt = 0;
+ if (qeth_eddp_buf_ref_context(buf, ctx)){
+ PRINT_WARN("no memory to create eddp context "
+ "reference\n");
+ goto out_check;
+ }
+ }
+ buffer = buf->buffer;
+ /* fill one skb into buffer */
+ for (i = 0; i < ctx->elements_per_skb; ++i){
+ buffer->element[buf->next_element_to_fill].addr =
+ ctx->elements[element].addr;
+ buffer->element[buf->next_element_to_fill].length =
+ ctx->elements[element].length;
+ buffer->element[buf->next_element_to_fill].flags =
+ ctx->elements[element].flags;
+ buf->next_element_to_fill++;
+ element++;
+ elements--;
+ }
+ }
+out_check:
+ if (!queue->do_pack) {
+ QETH_DBF_TEXT(trace, 6, "fillbfnp");
+ /* set state to PRIMED -> will be flushed */
+ if (buf->next_element_to_fill > 0){
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ flush_cnt++;
+ }
+ } else {
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.skbs_sent_pack++;
+#endif
+ QETH_DBF_TEXT(trace, 6, "fillbfpa");
+ if (buf->next_element_to_fill >=
+ QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
+ /*
+ * packed buffer if full -> set state PRIMED
+ * -> will be flushed
+ */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ flush_cnt++;
+ }
+ }
+out:
+ return flush_cnt;
+}
+
+static inline int
+qeth_get_skb_data_len(struct sk_buff *skb)
+{
+ int len = skb->len;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
+ len -= skb_shinfo(skb)->frags[i].size;
+ return len;
+}
+
+static inline void
+qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
+ struct qeth_eddp_data *eddp)
+{
+ u8 *page;
+ int page_remainder;
+ int page_offset;
+ int hdr_len;
+ struct qeth_eddp_element *element;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcrsh");
+ page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+ page_offset = ctx->offset % PAGE_SIZE;
+ element = &ctx->elements[ctx->num_elements];
+ hdr_len = eddp->nhl + eddp->thl;
+ /* FIXME: layer2 and VLAN !!! */
+ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
+ hdr_len += ETH_HLEN;
+ if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
+ hdr_len += VLAN_HLEN;
+ /* does complete header fit in current page ? */
+ page_remainder = PAGE_SIZE - page_offset;
+ if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
+ /* no -> go to start of next page */
+ ctx->offset += page_remainder;
+ page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+ page_offset = 0;
+ }
+ memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
+ element->addr = page + page_offset;
+ element->length = sizeof(struct qeth_hdr);
+ ctx->offset += sizeof(struct qeth_hdr);
+ page_offset += sizeof(struct qeth_hdr);
+ /* add mac header (?) */
+ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
+ memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
+ element->length += ETH_HLEN;
+ ctx->offset += ETH_HLEN;
+ page_offset += ETH_HLEN;
+ }
+ /* add VLAN tag */
+ if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
+ memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
+ element->length += VLAN_HLEN;
+ ctx->offset += VLAN_HLEN;
+ page_offset += VLAN_HLEN;
+ }
+ /* add network header */
+ memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
+ element->length += eddp->nhl;
+ eddp->nh_in_ctx = page + page_offset;
+ ctx->offset += eddp->nhl;
+ page_offset += eddp->nhl;
+ /* add transport header */
+ memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
+ element->length += eddp->thl;
+ eddp->th_in_ctx = page + page_offset;
+ ctx->offset += eddp->thl;
+}
+
+static inline void
+qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
+ u32 *hcsum)
+{
+ struct skb_frag_struct *frag;
+ int left_in_frag;
+ int copy_len;
+ u8 *src;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcdtc");
+ if (skb_shinfo(eddp->skb)->nr_frags == 0) {
+ memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
+ *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
+ *hcsum);
+ eddp->skb_offset += len;
+ } else {
+ while (len > 0) {
+ if (eddp->frag < 0) {
+ /* we're in skb->data */
+ left_in_frag = qeth_get_skb_data_len(eddp->skb)
+ - eddp->skb_offset;
+ src = eddp->skb->data + eddp->skb_offset;
+ } else {
+ frag = &skb_shinfo(eddp->skb)->
+ frags[eddp->frag];
+ left_in_frag = frag->size - eddp->frag_offset;
+ src = (u8 *)(
+ (page_to_pfn(frag->page) << PAGE_SHIFT)+
+ frag->page_offset + eddp->frag_offset);
+ }
+ if (left_in_frag <= 0) {
+ eddp->frag++;
+ eddp->frag_offset = 0;
+ continue;
+ }
+ copy_len = min(left_in_frag, len);
+ memcpy(dst, src, copy_len);
+ *hcsum = csum_partial(src, copy_len, *hcsum);
+ dst += copy_len;
+ eddp->frag_offset += copy_len;
+ eddp->skb_offset += copy_len;
+ len -= copy_len;
+ }
+ }
+}
+
+static inline void
+qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
+ struct qeth_eddp_data *eddp, int data_len,
+ u32 hcsum)
+{
+ u8 *page;
+ int page_remainder;
+ int page_offset;
+ struct qeth_eddp_element *element;
+ int first_lap = 1;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcsdt");
+ page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+ page_offset = ctx->offset % PAGE_SIZE;
+ element = &ctx->elements[ctx->num_elements];
+ while (data_len){
+ page_remainder = PAGE_SIZE - page_offset;
+ if (page_remainder < data_len){
+ qeth_eddp_copy_data_tcp(page + page_offset, eddp,
+ page_remainder, &hcsum);
+ element->length += page_remainder;
+ if (first_lap)
+ element->flags = SBAL_FLAGS_FIRST_FRAG;
+ else
+ element->flags = SBAL_FLAGS_MIDDLE_FRAG;
+ ctx->num_elements++;
+ element++;
+ data_len -= page_remainder;
+ ctx->offset += page_remainder;
+ page = ctx->pages[ctx->offset >> PAGE_SHIFT];
+ page_offset = 0;
+ element->addr = page + page_offset;
+ } else {
+ qeth_eddp_copy_data_tcp(page + page_offset, eddp,
+ data_len, &hcsum);
+ element->length += data_len;
+ if (!first_lap)
+ element->flags = SBAL_FLAGS_LAST_FRAG;
+ ctx->num_elements++;
+ ctx->offset += data_len;
+ data_len = 0;
+ }
+ first_lap = 0;
+ }
+ ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
+}
+
+static inline u32
+qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
+{
+ u32 phcsum; /* pseudo header checksum */
+
+ QETH_DBF_TEXT(trace, 5, "eddpckt4");
+ eddp->th.tcp.h.check = 0;
+ /* compute pseudo header checksum */
+ phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
+ eddp->thl + data_len, IPPROTO_TCP, 0);
+ /* compute checksum of tcp header */
+ return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
+}
+
+static inline u32
+qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
+{
+ u32 proto;
+ u32 phcsum; /* pseudo header checksum */
+
+ QETH_DBF_TEXT(trace, 5, "eddpckt6");
+ eddp->th.tcp.h.check = 0;
+ /* compute pseudo header checksum */
+ phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
+ sizeof(struct in6_addr), 0);
+ phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
+ sizeof(struct in6_addr), phcsum);
+ proto = htonl(IPPROTO_TCP);
+ phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
+ return phcsum;
+}
+
+static inline struct qeth_eddp_data *
+qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
+{
+ struct qeth_eddp_data *eddp;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcrda");
+ eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
+ if (eddp){
+ memset(eddp, 0, sizeof(struct qeth_eddp_data));
+ eddp->nhl = nhl;
+ eddp->thl = thl;
+ memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
+ memcpy(&eddp->nh, nh, nhl);
+ memcpy(&eddp->th, th, thl);
+ eddp->frag = -1; /* initially we're in skb->data */
+ }
+ return eddp;
+}
+
+static inline void
+__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
+ struct qeth_eddp_data *eddp)
+{
+ struct tcphdr *tcph;
+ int data_len;
+ u32 hcsum;
+
+ QETH_DBF_TEXT(trace, 5, "eddpftcp");
+ eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
+ tcph = eddp->skb->h.th;
+ while (eddp->skb_offset < eddp->skb->len) {
+ data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
+ (int)(eddp->skb->len - eddp->skb_offset));
+ /* prepare qdio hdr */
+ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
+ eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
+ eddp->nhl + eddp->thl -
+ sizeof(struct qeth_hdr);
+#ifdef CONFIG_QETH_VLAN
+ if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
+ eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
+#endif /* CONFIG_QETH_VLAN */
+ } else
+ eddp->qh.hdr.l3.length = data_len + eddp->nhl +
+ eddp->thl;
+ /* prepare ip hdr */
+ if (eddp->skb->protocol == ETH_P_IP){
+ eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
+ eddp->thl;
+ eddp->nh.ip4.h.check = 0;
+ eddp->nh.ip4.h.check =
+ ip_fast_csum((u8 *)&eddp->nh.ip4.h,
+ eddp->nh.ip4.h.ihl);
+ } else
+ eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
+ /* prepare tcp hdr */
+ if (data_len == (eddp->skb->len - eddp->skb_offset)){
+ /* last segment -> set FIN and PSH flags */
+ eddp->th.tcp.h.fin = tcph->fin;
+ eddp->th.tcp.h.psh = tcph->psh;
+ }
+ if (eddp->skb->protocol == ETH_P_IP)
+ hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
+ else
+ hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
+ /* fill the next segment into the context */
+ qeth_eddp_create_segment_hdrs(ctx, eddp);
+ qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
+ if (eddp->skb_offset >= eddp->skb->len)
+ break;
+ /* prepare headers for next round */
+ if (eddp->skb->protocol == ETH_P_IP)
+ eddp->nh.ip4.h.id++;
+ eddp->th.tcp.h.seq += data_len;
+ }
+}
+
+static inline int
+qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
+ struct sk_buff *skb, struct qeth_hdr *qhdr)
+{
+ struct qeth_eddp_data *eddp = NULL;
+
+ QETH_DBF_TEXT(trace, 5, "eddpficx");
+ /* create our segmentation headers and copy original headers */
+ if (skb->protocol == ETH_P_IP)
+ eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
+ skb->nh.iph->ihl*4,
+ (u8 *)skb->h.th, skb->h.th->doff*4);
+ else
+ eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
+ sizeof(struct ipv6hdr),
+ (u8 *)skb->h.th, skb->h.th->doff*4);
+
+ if (eddp == NULL) {
+ QETH_DBF_TEXT(trace, 2, "eddpfcnm");
+ return -ENOMEM;
+ }
+ if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
+ memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
+#ifdef CONFIG_QETH_VLAN
+ if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
+ eddp->vlan[0] = __constant_htons(skb->protocol);
+ eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
+ }
+#endif /* CONFIG_QETH_VLAN */
+ }
+ /* the next flags will only be set on the last segment */
+ eddp->th.tcp.h.fin = 0;
+ eddp->th.tcp.h.psh = 0;
+ eddp->skb = skb;
+ /* begin segmentation and fill context */
+ __qeth_eddp_fill_context_tcp(ctx, eddp);
+ kfree(eddp);
+ return 0;
+}
+
+static inline void
+qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
+ int hdr_len)
+{
+ int skbs_per_page;
+
+ QETH_DBF_TEXT(trace, 5, "eddpcanp");
+ /* can we put multiple skbs in one page? */
+ skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
+ if (skbs_per_page > 1){
+ ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
+ skbs_per_page + 1;
+ ctx->elements_per_skb = 1;
+ } else {
+ /* no -> how many elements per skb? */
+ ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
+ PAGE_SIZE) >> PAGE_SHIFT;
+ ctx->num_pages = ctx->elements_per_skb *
+ (skb_shinfo(skb)->tso_segs + 1);
+ }
+ ctx->num_elements = ctx->elements_per_skb *
+ (skb_shinfo(skb)->tso_segs + 1);
+}
+
+static inline struct qeth_eddp_context *
+qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
+ int hdr_len)
+{
+ struct qeth_eddp_context *ctx = NULL;
+ u8 *addr;
+ int i;
+
+ QETH_DBF_TEXT(trace, 5, "creddpcg");
+ /* create the context and allocate pages */
+ ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
+ if (ctx == NULL){
+ QETH_DBF_TEXT(trace, 2, "ceddpcn1");
+ return NULL;
+ }
+ memset(ctx, 0, sizeof(struct qeth_eddp_context));
+ ctx->type = QETH_LARGE_SEND_EDDP;
+ qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
+ if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
+ QETH_DBF_TEXT(trace, 2, "ceddpcis");
+ kfree(ctx);
+ return NULL;
+ }
+ ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
+ if (ctx->pages == NULL){
+ QETH_DBF_TEXT(trace, 2, "ceddpcn2");
+ kfree(ctx);
+ return NULL;
+ }
+ memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
+ for (i = 0; i < ctx->num_pages; ++i){
+ addr = (u8 *)__get_free_page(GFP_ATOMIC);
+ if (addr == NULL){
+ QETH_DBF_TEXT(trace, 2, "ceddpcn3");
+ ctx->num_pages = i;
+ qeth_eddp_free_context(ctx);
+ return NULL;
+ }
+ memset(addr, 0, PAGE_SIZE);
+ ctx->pages[i] = addr;
+ }
+ ctx->elements = kmalloc(ctx->num_elements *
+ sizeof(struct qeth_eddp_element), GFP_ATOMIC);
+ if (ctx->elements == NULL){
+ QETH_DBF_TEXT(trace, 2, "ceddpcn4");
+ qeth_eddp_free_context(ctx);
+ return NULL;
+ }
+ memset(ctx->elements, 0,
+ ctx->num_elements * sizeof(struct qeth_eddp_element));
+ /* reset num_elements; will be incremented again in fill_buffer to
+ * reflect number of actually used elements */
+ ctx->num_elements = 0;
+ return ctx;
+}
+
+static inline struct qeth_eddp_context *
+qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *qhdr)
+{
+ struct qeth_eddp_context *ctx = NULL;
+
+ QETH_DBF_TEXT(trace, 5, "creddpct");
+ if (skb->protocol == ETH_P_IP)
+ ctx = qeth_eddp_create_context_generic(card, skb,
+ sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
+ skb->h.th->doff*4);
+ else if (skb->protocol == ETH_P_IPV6)
+ ctx = qeth_eddp_create_context_generic(card, skb,
+ sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
+ skb->h.th->doff*4);
+ else
+ QETH_DBF_TEXT(trace, 2, "cetcpinv");
+
+ if (ctx == NULL) {
+ QETH_DBF_TEXT(trace, 2, "creddpnl");
+ return NULL;
+ }
+ if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
+ QETH_DBF_TEXT(trace, 2, "ceddptfe");
+ qeth_eddp_free_context(ctx);
+ return NULL;
+ }
+ atomic_set(&ctx->refcnt, 1);
+ return ctx;
+}
+
+struct qeth_eddp_context *
+qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *qhdr)
+{
+ QETH_DBF_TEXT(trace, 5, "creddpc");
+ switch (skb->sk->sk_protocol){
+ case IPPROTO_TCP:
+ return qeth_eddp_create_context_tcp(card, skb, qhdr);
+ default:
+ QETH_DBF_TEXT(trace, 2, "eddpinvp");
+ }
+ return NULL;
+}
+
+
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h
new file mode 100644
index 000000000000..e1b51860bc57
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.h
@@ -0,0 +1,85 @@
+/*
+ * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.5 $)
+ *
+ * Header file for qeth enhanced device driver pakcing.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ * Author(s): Thomas Spatzier <tspat@de.ibm.com>
+ *
+ * $Revision: 1.5 $ $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#ifndef __QETH_EDDP_H__
+#define __QETH_EDDP_H__
+
+struct qeth_eddp_element {
+ u32 flags;
+ u32 length;
+ void *addr;
+};
+
+struct qeth_eddp_context {
+ atomic_t refcnt;
+ enum qeth_large_send_types type;
+ int num_pages; /* # of allocated pages */
+ u8 **pages; /* pointers to pages */
+ int offset; /* offset in ctx during creation */
+ int num_elements; /* # of required 'SBALEs' */
+ struct qeth_eddp_element *elements; /* array of 'SBALEs' */
+ int elements_per_skb; /* # of 'SBALEs' per skb **/
+};
+
+struct qeth_eddp_context_reference {
+ struct list_head list;
+ struct qeth_eddp_context *ctx;
+};
+
+extern struct qeth_eddp_context *
+qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,struct qeth_hdr *);
+
+extern void
+qeth_eddp_put_context(struct qeth_eddp_context *);
+
+extern int
+qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
+
+extern void
+qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
+
+extern int
+qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
+ struct qeth_eddp_context *);
+/*
+ * Data used for fragmenting a IP packet.
+ */
+struct qeth_eddp_data {
+ struct qeth_hdr qh;
+ struct ethhdr mac;
+ u16 vlan[2];
+ union {
+ struct {
+ struct iphdr h;
+ u8 options[40];
+ } ip4;
+ struct {
+ struct ipv6hdr h;
+ } ip6;
+ } nh;
+ u8 nhl;
+ void *nh_in_ctx; /* address of nh within the ctx */
+ union {
+ struct {
+ struct tcphdr h;
+ u8 options[40];
+ } tcp;
+ } th;
+ u8 thl;
+ void *th_in_ctx; /* address of th within the ctx */
+ struct sk_buff *skb;
+ int skb_offset;
+ int frag;
+ int frag_offset;
+} __attribute__ ((packed));
+
+#endif /* __QETH_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
new file mode 100644
index 000000000000..5c9a51ce91b6
--- /dev/null
+++ b/drivers/s390/net/qeth_fs.h
@@ -0,0 +1,163 @@
+/*
+ * linux/drivers/s390/net/qeth_fs.h
+ *
+ * Linux on zSeries OSA Express and HiperSockets support.
+ *
+ * This header file contains definitions related to sysfs and procfs.
+ *
+ * Copyright 2000,2003 IBM Corporation
+ * Author(s): Thomas Spatzier <tspat@de.ibm.com>
+ *
+ */
+#ifndef __QETH_FS_H__
+#define __QETH_FS_H__
+
+#define VERSION_QETH_FS_H "$Revision: 1.9 $"
+
+extern const char *VERSION_QETH_PROC_C;
+extern const char *VERSION_QETH_SYS_C;
+
+#ifdef CONFIG_PROC_FS
+extern int
+qeth_create_procfs_entries(void);
+
+extern void
+qeth_remove_procfs_entries(void);
+#else
+static inline int
+qeth_create_procfs_entries(void)
+{
+ return 0;
+}
+
+static inline void
+qeth_remove_procfs_entries(void)
+{
+}
+#endif /* CONFIG_PROC_FS */
+
+extern int
+qeth_create_device_attributes(struct device *dev);
+
+extern void
+qeth_remove_device_attributes(struct device *dev);
+
+extern int
+qeth_create_driver_attributes(void);
+
+extern void
+qeth_remove_driver_attributes(void);
+
+/*
+ * utility functions used in qeth_proc.c and qeth_sys.c
+ */
+
+static inline const char *
+qeth_get_checksum_str(struct qeth_card *card)
+{
+ if (card->options.checksum_type == SW_CHECKSUMMING)
+ return "sw";
+ else if (card->options.checksum_type == HW_CHECKSUMMING)
+ return "hw";
+ else
+ return "no";
+}
+
+static inline const char *
+qeth_get_prioq_str(struct qeth_card *card, char *buf)
+{
+ if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING)
+ sprintf(buf, "always_q_%i", card->qdio.default_out_queue);
+ else
+ strcpy(buf, (card->qdio.do_prio_queueing ==
+ QETH_PRIO_Q_ING_PREC)?
+ "by_prec." : "by_ToS");
+ return buf;
+}
+
+static inline const char *
+qeth_get_bufsize_str(struct qeth_card *card)
+{
+ if (card->qdio.in_buf_size == 16384)
+ return "16k";
+ else if (card->qdio.in_buf_size == 24576)
+ return "24k";
+ else if (card->qdio.in_buf_size == 32768)
+ return "32k";
+ else if (card->qdio.in_buf_size == 40960)
+ return "40k";
+ else
+ return "64k";
+}
+
+static inline const char *
+qeth_get_cardname(struct qeth_card *card)
+{
+ if (card->info.guestlan) {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSAE:
+ return " Guest LAN QDIO";
+ case QETH_CARD_TYPE_IQD:
+ return " Guest LAN Hiper";
+ default:
+ return " unknown";
+ }
+ } else {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSAE:
+ return " OSD Express";
+ case QETH_CARD_TYPE_IQD:
+ return " HiperSockets";
+ default:
+ return " unknown";
+ }
+ }
+ return " n/a";
+}
+
+/* max length to be returned: 14 */
+static inline const char *
+qeth_get_cardname_short(struct qeth_card *card)
+{
+ if (card->info.guestlan){
+ switch (card->info.type){
+ case QETH_CARD_TYPE_OSAE:
+ return "GuestLAN QDIO";
+ case QETH_CARD_TYPE_IQD:
+ return "GuestLAN Hiper";
+ default:
+ return "unknown";
+ }
+ } else {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSAE:
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ return "OSD_100";
+ case QETH_LINK_TYPE_HSTR:
+ return "HSTR";
+ case QETH_LINK_TYPE_GBIT_ETH:
+ return "OSD_1000";
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ return "OSD_10GIG";
+ case QETH_LINK_TYPE_LANE_ETH100:
+ return "OSD_FE_LANE";
+ case QETH_LINK_TYPE_LANE_TR:
+ return "OSD_TR_LANE";
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ return "OSD_GbE_LANE";
+ case QETH_LINK_TYPE_LANE:
+ return "OSD_ATM_LANE";
+ default:
+ return "OSD_Express";
+ }
+ case QETH_CARD_TYPE_IQD:
+ return "HiperSockets";
+ default:
+ return "unknown";
+ }
+ }
+ return "n/a";
+}
+
+#endif /* __QETH_FS_H__ */
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
new file mode 100644
index 000000000000..607b92542df6
--- /dev/null
+++ b/drivers/s390/net/qeth_main.c
@@ -0,0 +1,8236 @@
+/*
+ *
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.206 $)
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author(s): Original Code written by
+ * Utz Bacher (utz.bacher@de.ibm.com)
+ * Rewritten by
+ * Frank Pavlic (pavlic@de.ibm.com) and
+ * Thomas Spatzier <tspat@de.ibm.com>
+ *
+ * $Revision: 1.206 $ $Date: 2005/03/24 09:04:18 $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/***
+ * eye catcher; just for debugging purposes
+ */
+void volatile
+qeth_eyecatcher(void)
+{
+ return;
+}
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/mii.h>
+#include <linux/rcupdate.h>
+#include <linux/ethtool.h>
+
+#include <net/arp.h>
+#include <net/ip.h>
+#include <net/route.h>
+
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/qeth.h>
+#include <asm/timex.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_fs.h"
+#include "qeth_eddp.h"
+#include "qeth_tso.h"
+
+#define VERSION_QETH_C "$Revision: 1.206 $"
+static const char *version = "qeth S/390 OSA-Express driver";
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *qeth_dbf_setup = NULL;
+static debug_info_t *qeth_dbf_data = NULL;
+static debug_info_t *qeth_dbf_misc = NULL;
+static debug_info_t *qeth_dbf_control = NULL;
+debug_info_t *qeth_dbf_trace = NULL;
+static debug_info_t *qeth_dbf_sense = NULL;
+static debug_info_t *qeth_dbf_qerr = NULL;
+
+DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
+
+/**
+ * some more definitions and declarations
+ */
+static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
+
+/* list of our cards */
+struct qeth_card_list_struct qeth_card_list;
+/*process list want to be notified*/
+spinlock_t qeth_notify_lock;
+struct list_head qeth_notify_list;
+
+static void qeth_send_control_data_cb(struct qeth_channel *,
+ struct qeth_cmd_buffer *);
+
+/**
+ * here we go with function implementation
+ */
+static void
+qeth_init_qdio_info(struct qeth_card *card);
+
+static int
+qeth_init_qdio_queues(struct qeth_card *card);
+
+static int
+qeth_alloc_qdio_buffers(struct qeth_card *card);
+
+static void
+qeth_free_qdio_buffers(struct qeth_card *);
+
+static void
+qeth_clear_qdio_buffers(struct qeth_card *);
+
+static void
+qeth_clear_ip_list(struct qeth_card *, int, int);
+
+static void
+qeth_clear_ipacmd_list(struct qeth_card *);
+
+static int
+qeth_qdio_clear_card(struct qeth_card *, int);
+
+static void
+qeth_clear_working_pool_list(struct qeth_card *);
+
+static void
+qeth_clear_cmd_buffers(struct qeth_channel *);
+
+static int
+qeth_stop(struct net_device *);
+
+static void
+qeth_clear_ipato_list(struct qeth_card *);
+
+static int
+qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+
+static void
+qeth_irq_tasklet(unsigned long);
+
+static int
+qeth_set_online(struct ccwgroup_device *);
+
+static struct qeth_ipaddr *
+qeth_get_addr_buffer(enum qeth_prot_versions);
+
+static void
+qeth_set_multicast_list(struct net_device *);
+
+static void
+qeth_notify_processes(void)
+{
+ /*notify all registered processes */
+ struct qeth_notify_list_struct *n_entry;
+
+ QETH_DBF_TEXT(trace,3,"procnoti");
+ spin_lock(&qeth_notify_lock);
+ list_for_each_entry(n_entry, &qeth_notify_list, list) {
+ send_sig(n_entry->signum, n_entry->task, 1);
+ }
+ spin_unlock(&qeth_notify_lock);
+
+}
+int
+qeth_notifier_unregister(struct task_struct *p)
+{
+ struct qeth_notify_list_struct *n_entry, *tmp;
+
+ QETH_DBF_TEXT(trace, 2, "notunreg");
+ spin_lock(&qeth_notify_lock);
+ list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
+ if (n_entry->task == p) {
+ list_del(&n_entry->list);
+ kfree(n_entry);
+ goto out;
+ }
+ }
+out:
+ spin_unlock(&qeth_notify_lock);
+ return 0;
+}
+int
+qeth_notifier_register(struct task_struct *p, int signum)
+{
+ struct qeth_notify_list_struct *n_entry;
+
+
+ /*check first if entry already exists*/
+ spin_lock(&qeth_notify_lock);
+ list_for_each_entry(n_entry, &qeth_notify_list, list) {
+ if (n_entry->task == p) {
+ n_entry->signum = signum;
+ spin_unlock(&qeth_notify_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&qeth_notify_lock);
+
+ n_entry = (struct qeth_notify_list_struct *)
+ kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
+ if (!n_entry)
+ return -ENOMEM;
+ n_entry->task = p;
+ n_entry->signum = signum;
+ spin_lock(&qeth_notify_lock);
+ list_add(&n_entry->list,&qeth_notify_list);
+ spin_unlock(&qeth_notify_lock);
+ return 0;
+}
+
+
+/**
+ * free channel command buffers
+ */
+static void
+qeth_clean_channel(struct qeth_channel *channel)
+{
+ int cnt;
+
+ QETH_DBF_TEXT(setup, 2, "freech");
+ for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+ kfree(channel->iob[cnt].data);
+}
+
+/**
+ * free card
+ */
+static void
+qeth_free_card(struct qeth_card *card)
+{
+
+ QETH_DBF_TEXT(setup, 2, "freecrd");
+ QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+ qeth_clean_channel(&card->read);
+ qeth_clean_channel(&card->write);
+ if (card->dev)
+ free_netdev(card->dev);
+ qeth_clear_ip_list(card, 0, 0);
+ qeth_clear_ipato_list(card);
+ kfree(card->ip_tbd_list);
+ qeth_free_qdio_buffers(card);
+ kfree(card);
+}
+
+/**
+ * alloc memory for command buffer per channel
+ */
+static int
+qeth_setup_channel(struct qeth_channel *channel)
+{
+ int cnt;
+
+ QETH_DBF_TEXT(setup, 2, "setupch");
+ for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+ channel->iob[cnt].data = (char *)
+ kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+ if (channel->iob[cnt].data == NULL)
+ break;
+ channel->iob[cnt].state = BUF_STATE_FREE;
+ channel->iob[cnt].channel = channel;
+ channel->iob[cnt].callback = qeth_send_control_data_cb;
+ channel->iob[cnt].rc = 0;
+ }
+ if (cnt < QETH_CMD_BUFFER_NO) {
+ while (cnt-- > 0)
+ kfree(channel->iob[cnt].data);
+ return -ENOMEM;
+ }
+ channel->buf_no = 0;
+ channel->io_buf_no = 0;
+ atomic_set(&channel->irq_pending, 0);
+ spin_lock_init(&channel->iob_lock);
+
+ init_waitqueue_head(&channel->wait_q);
+ channel->irq_tasklet.data = (unsigned long) channel;
+ channel->irq_tasklet.func = qeth_irq_tasklet;
+ return 0;
+}
+
+/**
+ * alloc memory for card structure
+ */
+static struct qeth_card *
+qeth_alloc_card(void)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(setup, 2, "alloccrd");
+ card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
+ GFP_DMA|GFP_KERNEL);
+ if (!card)
+ return NULL;
+ QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+ memset(card, 0, sizeof(struct qeth_card));
+ if (qeth_setup_channel(&card->read)) {
+ kfree(card);
+ return NULL;
+ }
+ if (qeth_setup_channel(&card->write)) {
+ qeth_clean_channel(&card->read);
+ kfree(card);
+ return NULL;
+ }
+ return card;
+}
+
+static long
+__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!IS_ERR(irb))
+ return 0;
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
+ QETH_DBF_TEXT(trace, 2, "ckirberr");
+ QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
+ break;
+ case -ETIMEDOUT:
+ PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
+ QETH_DBF_TEXT(trace, 2, "ckirberr");
+ QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
+ break;
+ default:
+ PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
+ cdev->dev.bus_id);
+ QETH_DBF_TEXT(trace, 2, "ckirberr");
+ QETH_DBF_TEXT(trace, 2, " rc???");
+ }
+ return PTR_ERR(irb);
+}
+
+static int
+qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+ int dstat,cstat;
+ char *sense;
+
+ sense = (char *) irb->ecw;
+ cstat = irb->scsw.cstat;
+ dstat = irb->scsw.dstat;
+
+ if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+ SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
+ QETH_DBF_TEXT(trace,2, "CGENCHK");
+ PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
+ cdev->dev.bus_id, dstat, cstat);
+ HEXDUMP16(WARN, "irb: ", irb);
+ HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
+ return 1;
+ }
+
+ if (dstat & DEV_STAT_UNIT_CHECK) {
+ if (sense[SENSE_RESETTING_EVENT_BYTE] &
+ SENSE_RESETTING_EVENT_FLAG) {
+ QETH_DBF_TEXT(trace,2,"REVIND");
+ return 1;
+ }
+ if (sense[SENSE_COMMAND_REJECT_BYTE] &
+ SENSE_COMMAND_REJECT_FLAG) {
+ QETH_DBF_TEXT(trace,2,"CMDREJi");
+ return 0;
+ }
+ if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
+ QETH_DBF_TEXT(trace,2,"AFFE");
+ return 1;
+ }
+ if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
+ QETH_DBF_TEXT(trace,2,"ZEROSEN");
+ return 0;
+ }
+ QETH_DBF_TEXT(trace,2,"DGENCHK");
+ return 1;
+ }
+ return 0;
+}
+static int qeth_issue_next_read(struct qeth_card *);
+
+/**
+ * interrupt handler
+ */
+static void
+qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ int rc;
+ int cstat,dstat;
+ struct qeth_cmd_buffer *buffer;
+ struct qeth_channel *channel;
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace,5,"irq");
+
+ if (__qeth_check_irb_error(cdev, irb))
+ return;
+ cstat = irb->scsw.cstat;
+ dstat = irb->scsw.dstat;
+
+ card = CARD_FROM_CDEV(cdev);
+ if (!card)
+ return;
+
+ if (card->read.ccwdev == cdev){
+ channel = &card->read;
+ QETH_DBF_TEXT(trace,5,"read");
+ } else if (card->write.ccwdev == cdev) {
+ channel = &card->write;
+ QETH_DBF_TEXT(trace,5,"write");
+ } else {
+ channel = &card->data;
+ QETH_DBF_TEXT(trace,5,"data");
+ }
+ atomic_set(&channel->irq_pending, 0);
+
+ if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
+ channel->state = CH_STATE_STOPPED;
+
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
+ channel->state = CH_STATE_HALTED;
+
+ /*let's wake up immediately on data channel*/
+ if ((channel == &card->data) && (intparm != 0))
+ goto out;
+
+ if (intparm == QETH_CLEAR_CHANNEL_PARM) {
+ QETH_DBF_TEXT(trace, 6, "clrchpar");
+ /* we don't have to handle this further */
+ intparm = 0;
+ }
+ if (intparm == QETH_HALT_CHANNEL_PARM) {
+ QETH_DBF_TEXT(trace, 6, "hltchpar");
+ /* we don't have to handle this further */
+ intparm = 0;
+ }
+ if ((dstat & DEV_STAT_UNIT_EXCEP) ||
+ (dstat & DEV_STAT_UNIT_CHECK) ||
+ (cstat)) {
+ if (irb->esw.esw0.erw.cons) {
+ /* TODO: we should make this s390dbf */
+ PRINT_WARN("sense data available on channel %s.\n",
+ CHANNEL_ID(channel));
+ PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
+ HEXDUMP16(WARN,"irb: ",irb);
+ HEXDUMP16(WARN,"sense data: ",irb->ecw);
+ }
+ rc = qeth_get_problem(cdev,irb);
+ if (rc) {
+ qeth_schedule_recovery(card);
+ goto out;
+ }
+ }
+
+ if (intparm) {
+ buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+ buffer->state = BUF_STATE_PROCESSED;
+ }
+ if (channel == &card->data)
+ return;
+
+ if (channel == &card->read &&
+ channel->state == CH_STATE_UP)
+ qeth_issue_next_read(card);
+
+ tasklet_schedule(&channel->irq_tasklet);
+ return;
+out:
+ wake_up(&card->wait_q);
+}
+
+/**
+ * tasklet function scheduled from irq handler
+ */
+static void
+qeth_irq_tasklet(unsigned long data)
+{
+ struct qeth_card *card;
+ struct qeth_channel *channel;
+ struct qeth_cmd_buffer *iob;
+ __u8 index;
+
+ QETH_DBF_TEXT(trace,5,"irqtlet");
+ channel = (struct qeth_channel *) data;
+ iob = channel->iob;
+ index = channel->buf_no;
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ while (iob[index].state == BUF_STATE_PROCESSED) {
+ if (iob[index].callback !=NULL) {
+ iob[index].callback(channel,iob + index);
+ }
+ index = (index + 1) % QETH_CMD_BUFFER_NO;
+ }
+ channel->buf_no = index;
+ wake_up(&card->wait_q);
+}
+
+static int qeth_stop_card(struct qeth_card *);
+
+static int
+qeth_set_offline(struct ccwgroup_device *cgdev)
+{
+ struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
+ int rc = 0;
+ enum qeth_card_states recover_flag;
+
+ QETH_DBF_TEXT(setup, 3, "setoffl");
+ QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
+
+ recover_flag = card->state;
+ if (qeth_stop_card(card) == -ERESTARTSYS){
+ PRINT_WARN("Stopping card %s interrupted by user!\n",
+ CARD_BUS_ID(card));
+ return -ERESTARTSYS;
+ }
+ if ((rc = ccw_device_set_offline(CARD_DDEV(card))) ||
+ (rc = ccw_device_set_offline(CARD_WDEV(card))) ||
+ (rc = ccw_device_set_offline(CARD_RDEV(card)))) {
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ }
+ if (recover_flag == CARD_STATE_UP)
+ card->state = CARD_STATE_RECOVER;
+ qeth_notify_processes();
+ return 0;
+}
+
+static int
+qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
+
+
+static void
+qeth_remove_device(struct ccwgroup_device *cgdev)
+{
+ struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(setup, 3, "rmdev");
+ QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
+
+ if (!card)
+ return;
+
+ if (qeth_wait_for_threads(card, 0xffffffff))
+ return;
+
+ if (cgdev->state == CCWGROUP_ONLINE){
+ card->use_hard_stop = 1;
+ qeth_set_offline(cgdev);
+ }
+ /* remove form our internal list */
+ write_lock_irqsave(&qeth_card_list.rwlock, flags);
+ list_del(&card->list);
+ write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+ if (card->dev)
+ unregister_netdev(card->dev);
+ qeth_remove_device_attributes(&cgdev->dev);
+ qeth_free_card(card);
+ cgdev->dev.driver_data = NULL;
+ put_device(&cgdev->dev);
+}
+
+static int
+qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
+static int
+qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
+
+/**
+ * Add/remove address to/from card's ip list, i.e. try to add or remove
+ * reference to/from an IP address that is already registered on the card.
+ * Returns:
+ * 0 address was on card and its reference count has been adjusted,
+ * but is still > 0, so nothing has to be done
+ * also returns 0 if card was not on card and the todo was to delete
+ * the address -> there is also nothing to be done
+ * 1 address was not on card and the todo is to add it to the card's ip
+ * list
+ * -1 address was on card and its reference count has been decremented
+ * to <= 0 by the todo -> address must be removed from card
+ */
+static int
+__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
+ struct qeth_ipaddr **__addr)
+{
+ struct qeth_ipaddr *addr;
+ int found = 0;
+
+ list_for_each_entry(addr, &card->ip_list, entry) {
+ if ((addr->proto == QETH_PROT_IPV4) &&
+ (todo->proto == QETH_PROT_IPV4) &&
+ (addr->type == todo->type) &&
+ (addr->u.a4.addr == todo->u.a4.addr) &&
+ (addr->u.a4.mask == todo->u.a4.mask) ){
+ found = 1;
+ break;
+ }
+ if ((addr->proto == QETH_PROT_IPV6) &&
+ (todo->proto == QETH_PROT_IPV6) &&
+ (addr->type == todo->type) &&
+ (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
+ (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
+ sizeof(struct in6_addr)) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found){
+ addr->users += todo->users;
+ if (addr->users <= 0){
+ *__addr = addr;
+ return -1;
+ } else {
+ /* for VIPA and RXIP limit refcount to 1 */
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ addr->users = 1;
+ return 0;
+ }
+ }
+ if (todo->users > 0){
+ /* for VIPA and RXIP limit refcount to 1 */
+ if (todo->type != QETH_IP_TYPE_NORMAL)
+ todo->users = 1;
+ return 1;
+ } else
+ return 0;
+}
+
+static inline int
+__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
+ int same_type)
+{
+ struct qeth_ipaddr *tmp;
+
+ list_for_each_entry(tmp, list, entry) {
+ if ((tmp->proto == QETH_PROT_IPV4) &&
+ (addr->proto == QETH_PROT_IPV4) &&
+ ((same_type && (tmp->type == addr->type)) ||
+ (!same_type && (tmp->type != addr->type)) ) &&
+ (tmp->u.a4.addr == addr->u.a4.addr) ){
+ return 1;
+ }
+ if ((tmp->proto == QETH_PROT_IPV6) &&
+ (addr->proto == QETH_PROT_IPV6) &&
+ ((same_type && (tmp->type == addr->type)) ||
+ (!same_type && (tmp->type != addr->type)) ) &&
+ (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
+ sizeof(struct in6_addr)) == 0) ) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add IP to be added to todo list. If there is already an "add todo"
+ * in this list we just incremenent the reference count.
+ * Returns 0 if we just incremented reference count.
+ */
+static int
+__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
+{
+ struct qeth_ipaddr *tmp, *t;
+ int found = 0;
+
+ list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
+ if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
+ (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
+ return 0;
+ if ((tmp->proto == QETH_PROT_IPV4) &&
+ (addr->proto == QETH_PROT_IPV4) &&
+ (tmp->type == addr->type) &&
+ (tmp->is_multicast == addr->is_multicast) &&
+ (tmp->u.a4.addr == addr->u.a4.addr) &&
+ (tmp->u.a4.mask == addr->u.a4.mask) ){
+ found = 1;
+ break;
+ }
+ if ((tmp->proto == QETH_PROT_IPV6) &&
+ (addr->proto == QETH_PROT_IPV6) &&
+ (tmp->type == addr->type) &&
+ (tmp->is_multicast == addr->is_multicast) &&
+ (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
+ (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
+ sizeof(struct in6_addr)) == 0) ){
+ found = 1;
+ break;
+ }
+ }
+ if (found){
+ if (addr->users != 0)
+ tmp->users += addr->users;
+ else
+ tmp->users += add? 1:-1;
+ if (tmp->users == 0){
+ list_del(&tmp->entry);
+ kfree(tmp);
+ }
+ return 0;
+ } else {
+ if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
+ list_add(&addr->entry, card->ip_tbd_list);
+ else {
+ if (addr->users == 0)
+ addr->users += add? 1:-1;
+ if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
+ qeth_is_addr_covered_by_ipato(card, addr)){
+ QETH_DBF_TEXT(trace, 2, "tkovaddr");
+ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+ list_add_tail(&addr->entry, card->ip_tbd_list);
+ }
+ return 1;
+ }
+}
+
+/**
+ * Remove IP address from list
+ */
+static int
+qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,4,"delip");
+ if (addr->proto == QETH_PROT_IPV4)
+ QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
+ else {
+ QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
+ QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
+ }
+ spin_lock_irqsave(&card->ip_lock, flags);
+ rc = __qeth_insert_ip_todo(card, addr, 0);
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ return rc;
+}
+
+static int
+qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,4,"addip");
+ if (addr->proto == QETH_PROT_IPV4)
+ QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
+ else {
+ QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
+ QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
+ }
+ spin_lock_irqsave(&card->ip_lock, flags);
+ rc = __qeth_insert_ip_todo(card, addr, 1);
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ return rc;
+}
+
+static inline void
+__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
+{
+ struct qeth_ipaddr *addr, *tmp;
+ int rc;
+
+ list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
+ if (addr->is_multicast) {
+ spin_unlock_irqrestore(&card->ip_lock, *flags);
+ rc = qeth_deregister_addr_entry(card, addr);
+ spin_lock_irqsave(&card->ip_lock, *flags);
+ if (!rc) {
+ list_del(&addr->entry);
+ kfree(addr);
+ }
+ }
+ }
+}
+
+static void
+qeth_set_ip_addr_list(struct qeth_card *card)
+{
+ struct list_head *tbd_list;
+ struct qeth_ipaddr *todo, *addr;
+ unsigned long flags;
+ int rc;
+
+ QETH_DBF_TEXT(trace, 2, "sdiplist");
+ QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
+
+ spin_lock_irqsave(&card->ip_lock, flags);
+ tbd_list = card->ip_tbd_list;
+ card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
+ if (!card->ip_tbd_list) {
+ QETH_DBF_TEXT(trace, 0, "silnomem");
+ card->ip_tbd_list = tbd_list;
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ return;
+ } else
+ INIT_LIST_HEAD(card->ip_tbd_list);
+
+ while (!list_empty(tbd_list)){
+ todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
+ list_del(&todo->entry);
+ if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
+ __qeth_delete_all_mc(card, &flags);
+ kfree(todo);
+ continue;
+ }
+ rc = __qeth_ref_ip_on_card(card, todo, &addr);
+ if (rc == 0) {
+ /* nothing to be done; only adjusted refcount */
+ kfree(todo);
+ } else if (rc == 1) {
+ /* new entry to be added to on-card list */
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ rc = qeth_register_addr_entry(card, todo);
+ spin_lock_irqsave(&card->ip_lock, flags);
+ if (!rc)
+ list_add_tail(&todo->entry, &card->ip_list);
+ else
+ kfree(todo);
+ } else if (rc == -1) {
+ /* on-card entry to be removed */
+ list_del_init(&addr->entry);
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ rc = qeth_deregister_addr_entry(card, addr);
+ spin_lock_irqsave(&card->ip_lock, flags);
+ if (!rc)
+ kfree(addr);
+ else
+ list_add_tail(&addr->entry, &card->ip_list);
+ kfree(todo);
+ }
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ kfree(tbd_list);
+}
+
+static void qeth_delete_mc_addresses(struct qeth_card *);
+static void qeth_add_multicast_ipv4(struct qeth_card *);
+#ifdef CONFIG_QETH_IPV6
+static void qeth_add_multicast_ipv6(struct qeth_card *);
+#endif
+
+static inline int
+qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ if ( !(card->thread_allowed_mask & thread) ||
+ (card->thread_start_mask & thread) ) {
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return -EPERM;
+ }
+ card->thread_start_mask |= thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return 0;
+}
+
+static void
+qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_start_mask &= ~thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+
+static void
+qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_running_mask &= ~thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+
+static inline int
+__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ if (card->thread_start_mask & thread){
+ if ((card->thread_allowed_mask & thread) &&
+ !(card->thread_running_mask & thread)){
+ rc = 1;
+ card->thread_start_mask &= ~thread;
+ card->thread_running_mask |= thread;
+ } else
+ rc = -EPERM;
+ }
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return rc;
+}
+
+static int
+qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+ int rc = 0;
+
+ wait_event(card->wait_q,
+ (rc = __qeth_do_run_thread(card, thread)) >= 0);
+ return rc;
+}
+
+static int
+qeth_register_ip_addresses(void *ptr)
+{
+ struct qeth_card *card;
+
+ card = (struct qeth_card *) ptr;
+ daemonize("qeth_reg_ip");
+ QETH_DBF_TEXT(trace,4,"regipth1");
+ if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
+ return 0;
+ QETH_DBF_TEXT(trace,4,"regipth2");
+ qeth_set_ip_addr_list(card);
+ qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
+ return 0;
+}
+
+static int
+qeth_recover(void *ptr)
+{
+ struct qeth_card *card;
+ int rc = 0;
+
+ card = (struct qeth_card *) ptr;
+ daemonize("qeth_recover");
+ QETH_DBF_TEXT(trace,2,"recover1");
+ QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
+ if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+ return 0;
+ QETH_DBF_TEXT(trace,2,"recover2");
+ PRINT_WARN("Recovery of device %s started ...\n",
+ CARD_BUS_ID(card));
+ card->use_hard_stop = 1;
+ qeth_set_offline(card->gdev);
+ rc = qeth_set_online(card->gdev);
+ if (!rc)
+ PRINT_INFO("Device %s successfully recovered!\n",
+ CARD_BUS_ID(card));
+ else
+ PRINT_INFO("Device %s could not be recovered!\n",
+ CARD_BUS_ID(card));
+ /* don't run another scheduled recovery */
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+ return 0;
+}
+
+void
+qeth_schedule_recovery(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(trace,2,"startrec");
+
+ if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+}
+
+static int
+qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
+ (u8) card->thread_start_mask,
+ (u8) card->thread_allowed_mask,
+ (u8) card->thread_running_mask);
+ rc = (card->thread_start_mask & thread);
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return rc;
+}
+
+static void
+qeth_start_kernel_thread(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(trace , 2, "strthrd");
+
+ if (card->read.state != CH_STATE_UP &&
+ card->write.state != CH_STATE_UP)
+ return;
+
+ if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
+ kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
+ if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
+ kernel_thread(qeth_recover, (void *) card, SIGCHLD);
+}
+
+
+static void
+qeth_set_intial_options(struct qeth_card *card)
+{
+ card->options.route4.type = NO_ROUTER;
+#ifdef CONFIG_QETH_IPV6
+ card->options.route6.type = NO_ROUTER;
+#endif /* QETH_IPV6 */
+ card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
+ card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
+ card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
+ card->options.fake_broadcast = 0;
+ card->options.add_hhlen = DEFAULT_ADD_HHLEN;
+ card->options.fake_ll = 0;
+ card->options.layer2 = 0;
+}
+
+/**
+ * initialize channels ,card and all state machines
+ */
+static int
+qeth_setup_card(struct qeth_card *card)
+{
+
+ QETH_DBF_TEXT(setup, 2, "setupcrd");
+ QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+
+ card->read.state = CH_STATE_DOWN;
+ card->write.state = CH_STATE_DOWN;
+ card->data.state = CH_STATE_DOWN;
+ card->state = CARD_STATE_DOWN;
+ card->lan_online = 0;
+ card->use_hard_stop = 0;
+ card->dev = NULL;
+#ifdef CONFIG_QETH_VLAN
+ spin_lock_init(&card->vlanlock);
+ card->vlangrp = NULL;
+#endif
+ spin_lock_init(&card->ip_lock);
+ spin_lock_init(&card->thread_mask_lock);
+ card->thread_start_mask = 0;
+ card->thread_allowed_mask = 0;
+ card->thread_running_mask = 0;
+ INIT_WORK(&card->kernel_thread_starter,
+ (void *)qeth_start_kernel_thread,card);
+ INIT_LIST_HEAD(&card->ip_list);
+ card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!card->ip_tbd_list) {
+ QETH_DBF_TEXT(setup, 0, "iptbdnom");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(card->ip_tbd_list);
+ INIT_LIST_HEAD(&card->cmd_waiter_list);
+ init_waitqueue_head(&card->wait_q);
+ /* intial options */
+ qeth_set_intial_options(card);
+ /* IP address takeover */
+ INIT_LIST_HEAD(&card->ipato.entries);
+ card->ipato.enabled = 0;
+ card->ipato.invert4 = 0;
+ card->ipato.invert6 = 0;
+ /* init QDIO stuff */
+ qeth_init_qdio_info(card);
+ return 0;
+}
+
+static int
+is_1920_device (struct qeth_card *card)
+{
+ int single_queue = 0;
+ struct ccw_device *ccwdev;
+ struct channelPath_dsc {
+ u8 flags;
+ u8 lsn;
+ u8 desc;
+ u8 chpid;
+ u8 swla;
+ u8 zeroes;
+ u8 chla;
+ u8 chpp;
+ } *chp_dsc;
+
+ QETH_DBF_TEXT(setup, 2, "chk_1920");
+
+ ccwdev = card->data.ccwdev;
+ chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
+ if (chp_dsc != NULL) {
+ /* CHPP field bit 6 == 1 -> single queue */
+ single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
+ kfree(chp_dsc);
+ }
+ QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
+ return single_queue;
+}
+
+static int
+qeth_determine_card_type(struct qeth_card *card)
+{
+ int i = 0;
+
+ QETH_DBF_TEXT(setup, 2, "detcdtyp");
+
+ while (known_devices[i][4]) {
+ if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
+ (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
+ card->info.type = known_devices[i][4];
+ if (is_1920_device(card)) {
+ PRINT_INFO("Priority Queueing not able "
+ "due to hardware limitations!\n");
+ card->qdio.no_out_queues = 1;
+ card->qdio.default_out_queue = 0;
+ } else {
+ card->qdio.no_out_queues = known_devices[i][8];
+ }
+ card->info.is_multicast_different = known_devices[i][9];
+ return 0;
+ }
+ i++;
+ }
+ card->info.type = QETH_CARD_TYPE_UNKNOWN;
+ PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
+ return -ENOENT;
+}
+
+static int
+qeth_probe_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card;
+ struct device *dev;
+ unsigned long flags;
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "probedev");
+
+ dev = &gdev->dev;
+ if (!get_device(dev))
+ return -ENODEV;
+
+ card = qeth_alloc_card();
+ if (!card) {
+ put_device(dev);
+ QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
+ return -ENOMEM;
+ }
+ card->read.ccwdev = gdev->cdev[0];
+ card->write.ccwdev = gdev->cdev[1];
+ card->data.ccwdev = gdev->cdev[2];
+
+ if ((rc = qeth_setup_card(card))){
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ put_device(dev);
+ qeth_free_card(card);
+ return rc;
+ }
+ gdev->dev.driver_data = card;
+ card->gdev = gdev;
+ gdev->cdev[0]->handler = qeth_irq;
+ gdev->cdev[1]->handler = qeth_irq;
+ gdev->cdev[2]->handler = qeth_irq;
+
+ rc = qeth_create_device_attributes(dev);
+ if (rc) {
+ put_device(dev);
+ qeth_free_card(card);
+ return rc;
+ }
+ if ((rc = qeth_determine_card_type(card))){
+ PRINT_WARN("%s: not a valid card type\n", __func__);
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ put_device(dev);
+ qeth_free_card(card);
+ return rc;
+ }
+ /* insert into our internal list */
+ write_lock_irqsave(&qeth_card_list.rwlock, flags);
+ list_add_tail(&card->list, &qeth_card_list.list);
+ write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+ return rc;
+}
+
+
+static int
+qeth_get_unitaddr(struct qeth_card *card)
+{
+ int length;
+ char *prcd;
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "getunit");
+ rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
+ if (rc) {
+ PRINT_ERR("read_conf_data for device %s returned %i\n",
+ CARD_DDEV_ID(card), rc);
+ return rc;
+ }
+ card->info.chpid = prcd[30];
+ card->info.unit_addr2 = prcd[31];
+ card->info.cula = prcd[63];
+ card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
+ (prcd[0x11] == _ascebc['M']));
+ return 0;
+}
+
+static void
+qeth_init_tokens(struct qeth_card *card)
+{
+ card->token.issuer_rm_w = 0x00010103UL;
+ card->token.cm_filter_w = 0x00010108UL;
+ card->token.cm_connection_w = 0x0001010aUL;
+ card->token.ulp_filter_w = 0x0001010bUL;
+ card->token.ulp_connection_w = 0x0001010dUL;
+}
+
+static inline __u16
+raw_devno_from_bus_id(char *id)
+{
+ id += (strlen(id) - 4);
+ return (__u16) simple_strtoul(id, &id, 16);
+}
+/**
+ * setup channel
+ */
+static void
+qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace, 4, "setupccw");
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ if (channel == &card->read)
+ memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
+ else
+ memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
+ channel->ccw.count = len;
+ channel->ccw.cda = (__u32) __pa(iob);
+}
+
+/**
+ * get free buffer for ccws (IDX activation, lancmds,ipassists...)
+ */
+static struct qeth_cmd_buffer *
+__qeth_get_buffer(struct qeth_channel *channel)
+{
+ __u8 index;
+
+ QETH_DBF_TEXT(trace, 6, "getbuff");
+ index = channel->io_buf_no;
+ do {
+ if (channel->iob[index].state == BUF_STATE_FREE) {
+ channel->iob[index].state = BUF_STATE_LOCKED;
+ channel->io_buf_no = (channel->io_buf_no + 1) %
+ QETH_CMD_BUFFER_NO;
+ memset(channel->iob[index].data, 0, QETH_BUFSIZE);
+ return channel->iob + index;
+ }
+ index = (index + 1) % QETH_CMD_BUFFER_NO;
+ } while(index != channel->io_buf_no);
+
+ return NULL;
+}
+
+/**
+ * release command buffer
+ */
+static void
+qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
+{
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace, 6, "relbuff");
+ spin_lock_irqsave(&channel->iob_lock, flags);
+ memset(iob->data, 0, QETH_BUFSIZE);
+ iob->state = BUF_STATE_FREE;
+ iob->callback = qeth_send_control_data_cb;
+ iob->rc = 0;
+ spin_unlock_irqrestore(&channel->iob_lock, flags);
+}
+
+static struct qeth_cmd_buffer *
+qeth_get_buffer(struct qeth_channel *channel)
+{
+ struct qeth_cmd_buffer *buffer = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->iob_lock, flags);
+ buffer = __qeth_get_buffer(channel);
+ spin_unlock_irqrestore(&channel->iob_lock, flags);
+ return buffer;
+}
+
+static struct qeth_cmd_buffer *
+qeth_wait_for_buffer(struct qeth_channel *channel)
+{
+ struct qeth_cmd_buffer *buffer;
+ wait_event(channel->wait_q,
+ ((buffer = qeth_get_buffer(channel)) != NULL));
+ return buffer;
+}
+
+static void
+qeth_clear_cmd_buffers(struct qeth_channel *channel)
+{
+ int cnt = 0;
+
+ for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+ qeth_release_buffer(channel,&channel->iob[cnt]);
+ channel->buf_no = 0;
+ channel->io_buf_no = 0;
+}
+
+/**
+ * start IDX for read and write channel
+ */
+static int
+qeth_idx_activate_get_answer(struct qeth_channel *channel,
+ void (*idx_reply_cb)(struct qeth_channel *,
+ struct qeth_cmd_buffer *))
+{
+ struct qeth_cmd_buffer *iob;
+ unsigned long flags;
+ int rc;
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(setup, 2, "idxanswr");
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ iob = qeth_get_buffer(channel);
+ iob->callback = idx_reply_cb;
+ memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
+ channel->ccw.count = QETH_BUFSIZE;
+ channel->ccw.cda = (__u32) __pa(iob->data);
+
+ wait_event(card->wait_q,
+ atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
+ QETH_DBF_TEXT(setup, 6, "noirqpnd");
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start(channel->ccwdev,
+ &channel->ccw,(addr_t) iob, 0, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ if (rc) {
+ PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+ return rc;
+ }
+ rc = wait_event_interruptible_timeout(card->wait_q,
+ channel->state == CH_STATE_UP, QETH_TIMEOUT);
+ if (rc == -ERESTARTSYS)
+ return rc;
+ if (channel->state != CH_STATE_UP){
+ rc = -ETIME;
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ qeth_clear_cmd_buffers(channel);
+ } else
+ rc = 0;
+ return rc;
+}
+
+static int
+qeth_idx_activate_channel(struct qeth_channel *channel,
+ void (*idx_reply_cb)(struct qeth_channel *,
+ struct qeth_cmd_buffer *))
+{
+ struct qeth_card *card;
+ struct qeth_cmd_buffer *iob;
+ unsigned long flags;
+ __u16 temp;
+ int rc;
+
+ card = CARD_FROM_CDEV(channel->ccwdev);
+
+ QETH_DBF_TEXT(setup, 2, "idxactch");
+
+ iob = qeth_get_buffer(channel);
+ iob->callback = idx_reply_cb;
+ memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
+ channel->ccw.count = IDX_ACTIVATE_SIZE;
+ channel->ccw.cda = (__u32) __pa(iob->data);
+ if (channel == &card->write) {
+ memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+ &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.trans_hdr++;
+ } else {
+ memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+ &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ }
+ memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+ &card->info.func_level,sizeof(__u16));
+ temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
+ temp = (card->info.cula << 8) + card->info.unit_addr2;
+ memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
+
+ wait_event(card->wait_q,
+ atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
+ QETH_DBF_TEXT(setup, 6, "noirqpnd");
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start(channel->ccwdev,
+ &channel->ccw,(addr_t) iob, 0, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ if (rc) {
+ PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+ return rc;
+ }
+ rc = wait_event_interruptible_timeout(card->wait_q,
+ channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
+ if (rc == -ERESTARTSYS)
+ return rc;
+ if (channel->state != CH_STATE_ACTIVATING) {
+ PRINT_WARN("qeth: IDX activate timed out!\n");
+ QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
+ qeth_clear_cmd_buffers(channel);
+ return -ETIME;
+ }
+ return qeth_idx_activate_get_answer(channel,idx_reply_cb);
+}
+
+static int
+qeth_peer_func_level(int level)
+{
+ if ((level & 0xff) == 8)
+ return (level & 0xff) + 0x400;
+ if (((level >> 8) & 3) == 1)
+ return (level & 0xff) + 0x200;
+ return level;
+}
+
+static void
+qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
+{
+ struct qeth_card *card;
+ __u16 temp;
+
+ QETH_DBF_TEXT(setup ,2, "idxwrcb");
+
+ if (channel->state == CH_STATE_DOWN) {
+ channel->state = CH_STATE_ACTIVATING;
+ goto out;
+ }
+ card = CARD_FROM_CDEV(channel->ccwdev);
+
+ if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
+ "reply\n", CARD_WDEV_ID(card));
+ goto out;
+ }
+ memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
+ PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
+ "function level mismatch "
+ "(sent: 0x%x, received: 0x%x)\n",
+ CARD_WDEV_ID(card), card->info.func_level, temp);
+ goto out;
+ }
+ channel->state = CH_STATE_UP;
+out:
+ qeth_release_buffer(channel, iob);
+}
+
+static int
+qeth_check_idx_response(unsigned char *buffer)
+{
+ if (!buffer)
+ return 0;
+
+ QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
+ if ((buffer[2] & 0xc0) == 0xc0) {
+ PRINT_WARN("received an IDX TERMINATE "
+ "with cause code 0x%02x%s\n",
+ buffer[4],
+ ((buffer[4] == 0x22) ?
+ " -- try another portname" : ""));
+ QETH_DBF_TEXT(trace, 2, "ckidxres");
+ QETH_DBF_TEXT(trace, 2, " idxterm");
+ QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
+{
+ struct qeth_card *card;
+ __u16 temp;
+
+ QETH_DBF_TEXT(setup , 2, "idxrdcb");
+ if (channel->state == CH_STATE_DOWN) {
+ channel->state = CH_STATE_ACTIVATING;
+ goto out;
+ }
+
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ if (qeth_check_idx_response(iob->data)) {
+ goto out;
+ }
+ if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
+ "reply\n", CARD_RDEV_ID(card));
+ goto out;
+ }
+
+/**
+ * temporary fix for microcode bug
+ * to revert it,replace OR by AND
+ */
+ if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
+ (card->info.type == QETH_CARD_TYPE_OSAE) )
+ card->info.portname_required = 1;
+
+ memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if (temp != qeth_peer_func_level(card->info.func_level)) {
+ PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
+ "level mismatch (sent: 0x%x, received: 0x%x)\n",
+ CARD_RDEV_ID(card), card->info.func_level, temp);
+ goto out;
+ }
+ memcpy(&card->token.issuer_rm_r,
+ QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ memcpy(&card->info.mcl_level[0],
+ QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+ channel->state = CH_STATE_UP;
+out:
+ qeth_release_buffer(channel,iob);
+}
+
+static int
+qeth_issue_next_read(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace,5,"issnxrd");
+ if (card->read.state != CH_STATE_UP)
+ return -EIO;
+ iob = qeth_get_buffer(&card->read);
+ if (!iob) {
+ PRINT_WARN("issue_next_read failed: no iob available!\n");
+ return -ENOMEM;
+ }
+ qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
+ wait_event(card->wait_q,
+ atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
+ QETH_DBF_TEXT(trace, 6, "noirqpnd");
+ rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
+ (addr_t) iob, 0, 0);
+ if (rc) {
+ PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
+ atomic_set(&card->read.irq_pending, 0);
+ qeth_schedule_recovery(card);
+ wake_up(&card->wait_q);
+ }
+ return rc;
+}
+
+static struct qeth_reply *
+qeth_alloc_reply(struct qeth_card *card)
+{
+ struct qeth_reply *reply;
+
+ reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ if (reply){
+ memset(reply, 0, sizeof(struct qeth_reply));
+ atomic_set(&reply->refcnt, 1);
+ reply->card = card;
+ };
+ return reply;
+}
+
+static void
+qeth_get_reply(struct qeth_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ atomic_inc(&reply->refcnt);
+}
+
+static void
+qeth_put_reply(struct qeth_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ if (atomic_dec_and_test(&reply->refcnt))
+ kfree(reply);
+}
+
+static void
+qeth_cmd_timeout(unsigned long data)
+{
+ struct qeth_reply *reply, *list_reply, *r;
+ unsigned long flags;
+
+ reply = (struct qeth_reply *) data;
+ spin_lock_irqsave(&reply->card->lock, flags);
+ list_for_each_entry_safe(list_reply, r,
+ &reply->card->cmd_waiter_list, list) {
+ if (reply == list_reply){
+ qeth_get_reply(reply);
+ list_del_init(&reply->list);
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+ reply->rc = -ETIME;
+ reply->received = 1;
+ wake_up(&reply->wait_q);
+ qeth_put_reply(reply);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+}
+
+static void
+qeth_reset_ip_addresses(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(trace, 2, "rstipadd");
+
+ qeth_clear_ip_list(card, 0, 1);
+ /* this function will also schedule the SET_IP_THREAD */
+ qeth_set_multicast_list(card->dev);
+}
+
+static struct qeth_ipa_cmd *
+qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+{
+ struct qeth_ipa_cmd *cmd = NULL;
+
+ QETH_DBF_TEXT(trace,5,"chkipad");
+ if (IS_IPA(iob->data)){
+ cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ if (IS_IPA_REPLY(cmd))
+ return cmd;
+ else {
+ switch (cmd->hdr.command) {
+ case IPA_CMD_STOPLAN:
+ PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
+ "there is a network problem or "
+ "someone pulled the cable or "
+ "disabled the port.\n",
+ QETH_CARD_IFNAME(card),
+ card->info.chpid);
+ card->lan_online = 0;
+ netif_carrier_off(card->dev);
+ return NULL;
+ case IPA_CMD_STARTLAN:
+ PRINT_INFO("Link reestablished on %s "
+ "(CHPID 0x%X). Scheduling "
+ "IP address reset.\n",
+ QETH_CARD_IFNAME(card),
+ card->info.chpid);
+ card->lan_online = 1;
+ netif_carrier_on(card->dev);
+ qeth_reset_ip_addresses(card);
+ return NULL;
+ case IPA_CMD_REGISTER_LOCAL_ADDR:
+ QETH_DBF_TEXT(trace,3, "irla");
+ break;
+ case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ QETH_DBF_TEXT(trace,3, "urla");
+ break;
+ default:
+ PRINT_WARN("Received data is IPA "
+ "but not a reply!\n");
+ break;
+ }
+ }
+ }
+ return cmd;
+}
+
+/**
+ * wake all waiting ipa commands
+ */
+static void
+qeth_clear_ipacmd_list(struct qeth_card *card)
+{
+ struct qeth_reply *reply, *r;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace, 4, "clipalst");
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+ qeth_get_reply(reply);
+ reply->rc = -EIO;
+ reply->received = 1;
+ list_del_init(&reply->list);
+ wake_up(&reply->wait_q);
+ qeth_put_reply(reply);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void
+qeth_send_control_data_cb(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ struct qeth_card *card;
+ struct qeth_reply *reply, *r;
+ struct qeth_ipa_cmd *cmd;
+ unsigned long flags;
+ int keep_reply;
+
+ QETH_DBF_TEXT(trace,4,"sndctlcb");
+
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ if (qeth_check_idx_response(iob->data)) {
+ qeth_clear_ipacmd_list(card);
+ qeth_schedule_recovery(card);
+ goto out;
+ }
+
+ cmd = qeth_check_ipa_data(card, iob);
+ if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
+ goto out;
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+ if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
+ ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+ qeth_get_reply(reply);
+ list_del_init(&reply->list);
+ spin_unlock_irqrestore(&card->lock, flags);
+ keep_reply = 0;
+ if (reply->callback != NULL) {
+ if (cmd) {
+ reply->offset = (__u16)((char*)cmd -
+ (char *)iob->data);
+ keep_reply = reply->callback(card,
+ reply,
+ (unsigned long)cmd);
+ }
+ else
+ keep_reply = reply->callback(card,
+ reply,
+ (unsigned long)iob);
+ }
+ if (cmd)
+ reply->rc = (u16) cmd->hdr.return_code;
+ else if (iob->rc)
+ reply->rc = iob->rc;
+ if (keep_reply) {
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list,
+ &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ reply->received = 1;
+ wake_up(&reply->wait_q);
+ }
+ qeth_put_reply(reply);
+ goto out;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+out:
+ memcpy(&card->seqno.pdu_hdr_ack,
+ QETH_PDU_HEADER_SEQ_NO(iob->data),
+ QETH_SEQ_NO_LENGTH);
+ qeth_release_buffer(channel,iob);
+}
+
+static int
+qeth_send_control_data(struct qeth_card *card, int len,
+ struct qeth_cmd_buffer *iob,
+ int (*reply_cb)
+ (struct qeth_card *, struct qeth_reply*, unsigned long),
+ void *reply_param)
+
+{
+ int rc;
+ unsigned long flags;
+ struct qeth_reply *reply;
+ struct timer_list timer;
+
+ QETH_DBF_TEXT(trace, 2, "sendctl");
+
+ qeth_setup_ccw(&card->write,iob->data,len);
+
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+ &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.trans_hdr++;
+
+ memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.pdu_hdr++;
+ memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
+ iob->callback = qeth_release_buffer;
+
+ reply = qeth_alloc_reply(card);
+ if (!reply) {
+ PRINT_WARN("Could no alloc qeth_reply!\n");
+ return -ENOMEM;
+ }
+ reply->callback = reply_cb;
+ reply->param = reply_param;
+ if (card->state == CARD_STATE_DOWN)
+ reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ else
+ reply->seqno = card->seqno.ipa++;
+ init_timer(&timer);
+ timer.function = qeth_cmd_timeout;
+ timer.data = (unsigned long) reply;
+ if (IS_IPA(iob->data))
+ timer.expires = jiffies + QETH_IPA_TIMEOUT;
+ else
+ timer.expires = jiffies + QETH_TIMEOUT;
+ init_waitqueue_head(&reply->wait_q);
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
+ QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
+ wait_event(card->wait_q,
+ atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+ QETH_DBF_TEXT(trace, 6, "noirqpnd");
+ spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+ rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+ (addr_t) iob, 0, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ if (rc){
+ PRINT_WARN("qeth_send_control_data: "
+ "ccw_device_start rc = %i\n", rc);
+ QETH_DBF_TEXT_(trace, 2, " err%d", rc);
+ spin_lock_irqsave(&card->lock, flags);
+ list_del_init(&reply->list);
+ qeth_put_reply(reply);
+ spin_unlock_irqrestore(&card->lock, flags);
+ qeth_release_buffer(iob->channel, iob);
+ atomic_set(&card->write.irq_pending, 0);
+ wake_up(&card->wait_q);
+ return rc;
+ }
+ add_timer(&timer);
+ wait_event(reply->wait_q, reply->received);
+ del_timer_sync(&timer);
+ rc = reply->rc;
+ qeth_put_reply(reply);
+ return rc;
+}
+
+static int
+qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ int (*reply_cb)
+ (struct qeth_card *,struct qeth_reply*, unsigned long),
+ void *reply_param)
+{
+ int rc;
+ char prot_type;
+
+ QETH_DBF_TEXT(trace,4,"sendipa");
+
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+
+ if (card->options.layer2)
+ prot_type = QETH_PROT_LAYER2;
+ else
+ prot_type = QETH_PROT_TCPIP;
+
+ memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+
+ rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
+ reply_cb, reply_param);
+ return rc;
+}
+
+
+static int
+qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup, 2, "cmenblcb");
+
+ iob = (struct qeth_cmd_buffer *) data;
+ memcpy(&card->token.cm_filter_r,
+ QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
+ return 0;
+}
+
+static int
+qeth_cm_enable(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup,2,"cmenable");
+
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+ memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
+ &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
+ &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
+
+ rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
+ qeth_cm_enable_cb, NULL);
+ return rc;
+}
+
+static int
+qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup, 2, "cmsetpcb");
+
+ iob = (struct qeth_cmd_buffer *) data;
+ memcpy(&card->token.cm_connection_r,
+ QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
+ return 0;
+}
+
+static int
+qeth_cm_setup(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup,2,"cmsetup");
+
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+ memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
+ &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
+ &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
+ &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
+ rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
+ qeth_cm_setup_cb, NULL);
+ return rc;
+
+}
+
+static int
+qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+
+ __u16 mtu, framesize;
+ __u16 len;
+ __u8 link_type;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup, 2, "ulpenacb");
+
+ iob = (struct qeth_cmd_buffer *) data;
+ memcpy(&card->token.ulp_filter_r,
+ QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ if (qeth_get_mtu_out_of_mpc(card->info.type)) {
+ memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
+ mtu = qeth_get_mtu_outof_framesize(framesize);
+ if (!mtu) {
+ iob->rc = -EINVAL;
+ QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
+ return 0;
+ }
+ card->info.max_mtu = mtu;
+ card->info.initial_mtu = mtu;
+ card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
+ } else {
+ card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
+ card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
+ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+ }
+
+ memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
+ if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
+ memcpy(&link_type,
+ QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
+ card->info.link_type = link_type;
+ } else
+ card->info.link_type = 0;
+ QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
+ return 0;
+}
+
+static int
+qeth_ulp_enable(struct qeth_card *card)
+{
+ int rc;
+ char prot_type;
+ struct qeth_cmd_buffer *iob;
+
+ /*FIXME: trace view callbacks*/
+ QETH_DBF_TEXT(setup,2,"ulpenabl");
+
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
+
+ *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
+ (__u8) card->info.portno;
+ if (card->options.layer2)
+ prot_type = QETH_PROT_LAYER2;
+ else
+ prot_type = QETH_PROT_TCPIP;
+
+ memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
+ memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
+ &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
+ &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
+ card->info.portname, 9);
+ rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
+ qeth_ulp_enable_cb, NULL);
+ return rc;
+
+}
+
+static inline __u16
+__raw_devno_from_bus_id(char *id)
+{
+ id += (strlen(id) - 4);
+ return (__u16) simple_strtoul(id, &id, 16);
+}
+
+static int
+qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup, 2, "ulpstpcb");
+
+ iob = (struct qeth_cmd_buffer *) data;
+ memcpy(&card->token.ulp_connection_r,
+ QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
+ return 0;
+}
+
+static int
+qeth_ulp_setup(struct qeth_card *card)
+{
+ int rc;
+ __u16 temp;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup,2,"ulpsetup");
+
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
+
+ memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
+ &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
+ &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
+ &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
+
+ temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
+ memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
+ temp = (card->info.cula << 8) + card->info.unit_addr2;
+ memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
+ rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
+ qeth_ulp_setup_cb, NULL);
+ return rc;
+}
+
+static inline int
+qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
+ unsigned int qdio_error,
+ unsigned int siga_error)
+{
+ int rc = 0;
+
+ if (qdio_error || siga_error) {
+ QETH_DBF_TEXT(trace, 2, "qdinerr");
+ QETH_DBF_TEXT(qerr, 2, "qdinerr");
+ QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
+ buf->buffer->element[15].flags & 0xff);
+ QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
+ buf->buffer->element[14].flags & 0xff);
+ QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
+ QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
+ rc = 1;
+ }
+ return rc;
+}
+
+static inline struct sk_buff *
+qeth_get_skb(unsigned int length)
+{
+ struct sk_buff* skb;
+#ifdef CONFIG_QETH_VLAN
+ if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
+ skb_reserve(skb, VLAN_HLEN);
+#else
+ skb = dev_alloc_skb(length);
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *
+qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
+ struct qdio_buffer_element **__element, int *__offset,
+ struct qeth_hdr **hdr)
+{
+ struct qdio_buffer_element *element = *__element;
+ int offset = *__offset;
+ struct sk_buff *skb = NULL;
+ int skb_len;
+ void *data_ptr;
+ int data_len;
+
+ QETH_DBF_TEXT(trace,6,"nextskb");
+ /* qeth_hdr must not cross element boundaries */
+ if (element->length < offset + sizeof(struct qeth_hdr)){
+ if (qeth_is_last_sbale(element))
+ return NULL;
+ element++;
+ offset = 0;
+ if (element->length < sizeof(struct qeth_hdr))
+ return NULL;
+ }
+ *hdr = element->addr + offset;
+
+ offset += sizeof(struct qeth_hdr);
+ if (card->options.layer2)
+ skb_len = (*hdr)->hdr.l2.pkt_length;
+ else
+ skb_len = (*hdr)->hdr.l3.length;
+
+ if (!skb_len)
+ return NULL;
+ if (card->options.fake_ll){
+ if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
+ goto no_mem;
+ skb_pull(skb, QETH_FAKE_LL_LEN);
+ } else if (!(skb = qeth_get_skb(skb_len)))
+ goto no_mem;
+ data_ptr = element->addr + offset;
+ while (skb_len) {
+ data_len = min(skb_len, (int)(element->length - offset));
+ if (data_len)
+ memcpy(skb_put(skb, data_len), data_ptr, data_len);
+ skb_len -= data_len;
+ if (skb_len){
+ if (qeth_is_last_sbale(element)){
+ QETH_DBF_TEXT(trace,4,"unexeob");
+ QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
+ QETH_DBF_TEXT(qerr,2,"unexeob");
+ QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
+ QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
+ dev_kfree_skb_any(skb);
+ card->stats.rx_errors++;
+ return NULL;
+ }
+ element++;
+ offset = 0;
+ data_ptr = element->addr;
+ } else {
+ offset += data_len;
+ }
+ }
+ *__element = element;
+ *__offset = offset;
+ return skb;
+no_mem:
+ if (net_ratelimit()){
+ PRINT_WARN("No memory for packet received on %s.\n",
+ QETH_CARD_IFNAME(card));
+ QETH_DBF_TEXT(trace,2,"noskbmem");
+ QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
+ }
+ card->stats.rx_dropped++;
+ return NULL;
+}
+
+static inline unsigned short
+qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct qeth_card *card;
+ struct ethhdr *eth;
+
+ QETH_DBF_TEXT(trace,6,"typtrans");
+
+ card = (struct qeth_card *)dev->priv;
+#ifdef CONFIG_TR
+ if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
+ return tr_type_trans(skb,dev);
+#endif /* CONFIG_TR */
+ skb->mac.raw = skb->data;
+ skb_pull(skb, ETH_HLEN );
+ eth = eth_hdr(skb);
+
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ skb->pkt_type = PACKET_OTHERHOST;
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+ if (*(unsigned short *) (skb->data) == 0xFFFF)
+ return htons(ETH_P_802_3);
+ return htons(ETH_P_802_2);
+}
+
+static inline void
+qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+ struct ethhdr *fake_hdr;
+ struct iphdr *ip_hdr;
+
+ QETH_DBF_TEXT(trace,5,"skbfake");
+ skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
+ /* this is a fake ethernet header */
+ fake_hdr = (struct ethhdr *) skb->mac.raw;
+
+ /* the destination MAC address */
+ switch (skb->pkt_type){
+ case PACKET_MULTICAST:
+ switch (skb->protocol){
+#ifdef CONFIG_QETH_IPV6
+ case __constant_htons(ETH_P_IPV6):
+ ndisc_mc_map((struct in6_addr *)
+ skb->data + QETH_FAKE_LL_V6_ADDR_POS,
+ fake_hdr->h_dest, card->dev, 0);
+ break;
+#endif /* CONFIG_QETH_IPV6 */
+ case __constant_htons(ETH_P_IP):
+ ip_hdr = (struct iphdr *)skb->data;
+ if (card->dev->type == ARPHRD_IEEE802_TR)
+ ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
+ else
+ ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
+ break;
+ default:
+ memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
+ }
+ break;
+ case PACKET_BROADCAST:
+ memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
+ break;
+ default:
+ memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
+ }
+ /* the source MAC address */
+ if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+ memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
+ else
+ memset(fake_hdr->h_source, 0, ETH_ALEN);
+ /* the protocol */
+ fake_hdr->h_proto = skb->protocol;
+}
+
+static inline void
+qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+#ifdef CONFIG_QETH_VLAN
+ u16 *vlan_tag;
+
+ if (hdr->hdr.l3.ext_flags &
+ (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
+ vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
+ *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
+ hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ *(vlan_tag + 1) = skb->protocol;
+ skb->protocol = __constant_htons(ETH_P_8021Q);
+ }
+#endif /* CONFIG_QETH_VLAN */
+}
+
+static inline __u16
+qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+ unsigned short vlan_id = 0;
+#ifdef CONFIG_QETH_VLAN
+ struct vlan_hdr *vhdr;
+#endif
+
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = qeth_type_trans(skb, skb->dev);
+ if (card->options.checksum_type == NO_CHECKSUMMING)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+#ifdef CONFIG_QETH_VLAN
+ if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
+ vhdr = (struct vlan_hdr *) skb->data;
+ skb->protocol =
+ __constant_htons(vhdr->h_vlan_encapsulated_proto);
+ vlan_id = hdr->hdr.l2.vlan_id;
+ skb_pull(skb, VLAN_HLEN);
+ }
+#endif
+ return vlan_id;
+}
+
+static inline void
+qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+#ifdef CONFIG_QETH_IPV6
+ if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = qeth_type_trans(skb, card->dev);
+ return;
+ }
+#endif /* CONFIG_QETH_IPV6 */
+ skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
+ ETH_P_IP);
+ switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
+ case QETH_CAST_UNICAST:
+ skb->pkt_type = PACKET_HOST;
+ break;
+ case QETH_CAST_MULTICAST:
+ skb->pkt_type = PACKET_MULTICAST;
+ card->stats.multicast++;
+ break;
+ case QETH_CAST_BROADCAST:
+ skb->pkt_type = PACKET_BROADCAST;
+ card->stats.multicast++;
+ break;
+ case QETH_CAST_ANYCAST:
+ case QETH_CAST_NOCAST:
+ default:
+ skb->pkt_type = PACKET_HOST;
+ }
+ qeth_rebuild_skb_vlan(card, skb, hdr);
+ if (card->options.fake_ll)
+ qeth_rebuild_skb_fake_ll(card, skb, hdr);
+ else
+ skb->mac.raw = skb->data;
+ skb->ip_summed = card->options.checksum_type;
+ if (card->options.checksum_type == HW_CHECKSUMMING){
+ if ( (hdr->hdr.l3.ext_flags &
+ (QETH_HDR_EXT_CSUM_HDR_REQ |
+ QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
+ (QETH_HDR_EXT_CSUM_HDR_REQ |
+ QETH_HDR_EXT_CSUM_TRANSP_REQ) )
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = SW_CHECKSUMMING;
+ }
+}
+
+static inline void
+qeth_process_inbound_buffer(struct qeth_card *card,
+ struct qeth_qdio_buffer *buf, int index)
+{
+ struct qdio_buffer_element *element;
+ struct sk_buff *skb;
+ struct qeth_hdr *hdr;
+ int offset;
+ int rxrc;
+ __u16 vlan_tag = 0;
+
+ /* get first element of current buffer */
+ element = (struct qdio_buffer_element *)&buf->buffer->element[0];
+ offset = 0;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.bufs_rec++;
+#endif
+ while((skb = qeth_get_next_skb(card, buf->buffer, &element,
+ &offset, &hdr))) {
+ skb->dev = card->dev;
+ if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
+ vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
+ else
+ qeth_rebuild_skb(card, skb, hdr);
+ /* is device UP ? */
+ if (!(card->dev->flags & IFF_UP)){
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+#ifdef CONFIG_QETH_VLAN
+ if (vlan_tag)
+ vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
+ else
+#endif
+ rxrc = netif_rx(skb);
+ card->dev->last_rx = jiffies;
+ card->stats.rx_packets++;
+ card->stats.rx_bytes += skb->len;
+ }
+}
+
+static inline struct qeth_buffer_pool_entry *
+qeth_get_buffer_pool_entry(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *entry;
+
+ QETH_DBF_TEXT(trace, 6, "gtbfplen");
+ if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
+ entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+ struct qeth_buffer_pool_entry, list);
+ list_del_init(&entry->list);
+ return entry;
+ }
+ return NULL;
+}
+
+static inline void
+qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
+{
+ struct qeth_buffer_pool_entry *pool_entry;
+ int i;
+
+ pool_entry = qeth_get_buffer_pool_entry(card);
+ /*
+ * since the buffer is accessed only from the input_tasklet
+ * there shouldn't be a need to synchronize; also, since we use
+ * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
+ * buffers
+ */
+ BUG_ON(!pool_entry);
+
+ buf->pool_entry = pool_entry;
+ for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
+ buf->buffer->element[i].length = PAGE_SIZE;
+ buf->buffer->element[i].addr = pool_entry->elements[i];
+ if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
+ buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
+ else
+ buf->buffer->element[i].flags = 0;
+ }
+ buf->state = QETH_QDIO_BUF_EMPTY;
+}
+
+static inline void
+qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf)
+{
+ int i;
+ struct sk_buff *skb;
+
+ /* is PCI flag set on buffer? */
+ if (buf->buffer->element[0].flags & 0x40)
+ atomic_dec(&queue->set_pci_flags_count);
+
+ while ((skb = skb_dequeue(&buf->skb_list))){
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+ qeth_eddp_buf_release_contexts(buf);
+ for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
+ buf->buffer->element[i].length = 0;
+ buf->buffer->element[i].addr = NULL;
+ buf->buffer->element[i].flags = 0;
+ }
+ buf->next_element_to_fill = 0;
+ atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+}
+
+static inline void
+qeth_queue_input_buffer(struct qeth_card *card, int index)
+{
+ struct qeth_qdio_q *queue = card->qdio.in_q;
+ int count;
+ int i;
+ int rc;
+
+ QETH_DBF_TEXT(trace,6,"queinbuf");
+ count = (index < queue->next_buf_to_init)?
+ card->qdio.in_buf_pool.buf_count -
+ (queue->next_buf_to_init - index) :
+ card->qdio.in_buf_pool.buf_count -
+ (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
+ /* only requeue at a certain threshold to avoid SIGAs */
+ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
+ for (i = queue->next_buf_to_init;
+ i < queue->next_buf_to_init + count; ++i)
+ qeth_init_input_buffer(card,
+ &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
+ /*
+ * according to old code it should be avoided to requeue all
+ * 128 buffers in order to benefit from PCI avoidance.
+ * this function keeps at least one buffer (the buffer at
+ * 'index') un-requeued -> this buffer is the first buffer that
+ * will be requeued the next time
+ */
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_do_qdio_cnt++;
+ card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
+#endif
+ rc = do_QDIO(CARD_DDEV(card),
+ QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
+ 0, queue->next_buf_to_init, count, NULL);
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
+ card->perf_stats.inbound_do_qdio_start_time;
+#endif
+ if (rc){
+ PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
+ "return %i (device %s).\n",
+ rc, CARD_DDEV_ID(card));
+ QETH_DBF_TEXT(trace,2,"qinberr");
+ QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
+ }
+ queue->next_buf_to_init = (queue->next_buf_to_init + count) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ }
+}
+
+static inline void
+qeth_put_buffer_pool_entry(struct qeth_card *card,
+ struct qeth_buffer_pool_entry *entry)
+{
+ QETH_DBF_TEXT(trace, 6, "ptbfplen");
+ list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
+}
+
+static void
+qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
+ unsigned int qdio_err, unsigned int siga_err,
+ unsigned int queue, int first_element, int count,
+ unsigned long card_ptr)
+{
+ struct net_device *net_dev;
+ struct qeth_card *card;
+ struct qeth_qdio_buffer *buffer;
+ int index;
+ int i;
+
+ QETH_DBF_TEXT(trace, 6, "qdinput");
+ card = (struct qeth_card *) card_ptr;
+ net_dev = card->dev;
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_cnt++;
+ card->perf_stats.inbound_start_time = qeth_get_micros();
+#endif
+ if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
+ if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
+ QETH_DBF_TEXT(trace, 1,"qdinchk");
+ QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
+ QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
+ qeth_schedule_recovery(card);
+ return;
+ }
+ }
+ for (i = first_element; i < (first_element + count); ++i) {
+ index = i % QDIO_MAX_BUFFERS_PER_Q;
+ buffer = &card->qdio.in_q->bufs[index];
+ if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
+ qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
+ qeth_process_inbound_buffer(card, buffer, index);
+ /* clear buffer and give back to hardware */
+ qeth_put_buffer_pool_entry(card, buffer->pool_entry);
+ qeth_queue_input_buffer(card, index);
+ }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.inbound_time += qeth_get_micros() -
+ card->perf_stats.inbound_start_time;
+#endif
+}
+
+static inline int
+qeth_handle_send_error(struct qeth_card *card,
+ struct qeth_qdio_out_buffer *buffer,
+ int qdio_err, int siga_err)
+{
+ int sbalf15 = buffer->buffer->element[15].flags & 0xff;
+ int cc = siga_err & 3;
+
+ QETH_DBF_TEXT(trace, 6, "hdsnderr");
+ switch (cc) {
+ case 0:
+ if (qdio_err){
+ QETH_DBF_TEXT(trace, 1,"lnkfail");
+ QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(trace,1,"%04x %02x",
+ (u16)qdio_err, (u8)sbalf15);
+ return QETH_SEND_ERROR_LINK_FAILURE;
+ }
+ return QETH_SEND_ERROR_NONE;
+ case 2:
+ if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
+ QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
+ QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
+ return QETH_SEND_ERROR_KICK_IT;
+ }
+ if ((sbalf15 >= 15) && (sbalf15 <= 31))
+ return QETH_SEND_ERROR_RETRY;
+ return QETH_SEND_ERROR_LINK_FAILURE;
+ /* look at qdio_error and sbalf 15 */
+ case 1:
+ QETH_DBF_TEXT(trace, 1, "SIGAcc1");
+ QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
+ return QETH_SEND_ERROR_LINK_FAILURE;
+ case 3:
+ QETH_DBF_TEXT(trace, 1, "SIGAcc3");
+ QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
+ return QETH_SEND_ERROR_KICK_IT;
+ }
+ return QETH_SEND_ERROR_LINK_FAILURE;
+}
+
+void
+qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
+ int index, int count)
+{
+ struct qeth_qdio_out_buffer *buf;
+ int rc;
+ int i;
+
+ QETH_DBF_TEXT(trace, 6, "flushbuf");
+
+ for (i = index; i < index + count; ++i) {
+ buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ buf->buffer->element[buf->next_element_to_fill - 1].flags |=
+ SBAL_FLAGS_LAST_ENTRY;
+
+ if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+ continue;
+
+ if (!queue->do_pack){
+ if ((atomic_read(&queue->used_buffers) >=
+ (QETH_HIGH_WATERMARK_PACK -
+ QETH_WATERMARK_PACK_FUZZ)) &&
+ !atomic_read(&queue->set_pci_flags_count)){
+ /* it's likely that we'll go to packing
+ * mode soon */
+ atomic_inc(&queue->set_pci_flags_count);
+ buf->buffer->element[0].flags |= 0x40;
+ }
+ } else {
+ if (!atomic_read(&queue->set_pci_flags_count)){
+ /*
+ * there's no outstanding PCI any more, so we
+ * have to request a PCI to be sure the the PCI
+ * will wake at some time in the future then we
+ * can flush packed buffers that might still be
+ * hanging around, which can happen if no
+ * further send was requested by the stack
+ */
+ atomic_inc(&queue->set_pci_flags_count);
+ buf->buffer->element[0].flags |= 0x40;
+ }
+ }
+ }
+
+ queue->card->dev->trans_start = jiffies;
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.outbound_do_qdio_cnt++;
+ queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
+#endif
+ if (under_int)
+ rc = do_QDIO(CARD_DDEV(queue->card),
+ QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
+ queue->queue_no, index, count, NULL);
+ else
+ rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
+ queue->queue_no, index, count, NULL);
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
+ queue->card->perf_stats.outbound_do_qdio_start_time;
+#endif
+ if (rc){
+ QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
+ "returned error (%i) on device %s.",
+ rc, CARD_DDEV_ID(queue->card));
+ QETH_DBF_TEXT(trace, 2, "flushbuf");
+ QETH_DBF_TEXT_(trace, 2, " err%d", rc);
+ queue->card->stats.tx_errors += count;
+ /* this must not happen under normal circumstances. if it
+ * happens something is really wrong -> recover */
+ qeth_schedule_recovery(queue->card);
+ return;
+ }
+ atomic_add(count, &queue->used_buffers);
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.bufs_sent += count;
+#endif
+}
+
+/*
+ * Switched to packing state if the number of used buffers on a queue
+ * reaches a certain limit.
+ */
+static inline void
+qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
+{
+ if (!queue->do_pack) {
+ if (atomic_read(&queue->used_buffers)
+ >= QETH_HIGH_WATERMARK_PACK){
+ /* switch non-PACKING -> PACKING */
+ QETH_DBF_TEXT(trace, 6, "np->pack");
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.sc_dp_p++;
+#endif
+ queue->do_pack = 1;
+ }
+ }
+}
+
+/*
+ * Switches from packing to non-packing mode. If there is a packing
+ * buffer on the queue this buffer will be prepared to be flushed.
+ * In that case 1 is returned to inform the caller. If no buffer
+ * has to be flushed, zero is returned.
+ */
+static inline int
+qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_count = 0;
+
+ if (queue->do_pack) {
+ if (atomic_read(&queue->used_buffers)
+ <= QETH_LOW_WATERMARK_PACK) {
+ /* switch PACKING -> non-PACKING */
+ QETH_DBF_TEXT(trace, 6, "pack->np");
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.sc_p_dp++;
+#endif
+ queue->do_pack = 0;
+ /* flush packing buffers */
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ if ((atomic_read(&buffer->state) ==
+ QETH_QDIO_BUF_EMPTY) &&
+ (buffer->next_element_to_fill > 0)) {
+ atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
+ flush_count++;
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ }
+ }
+ }
+ return flush_count;
+}
+
+/*
+ * Called to flush a packing buffer if no more pci flags are on the queue.
+ * Checks if there is a packing buffer and prepares it to be flushed.
+ * In that case returns 1, otherwise zero.
+ */
+static inline int
+qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+ (buffer->next_element_to_fill > 0)){
+ /* it's a packing buffer */
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+{
+ int index;
+ int flush_cnt = 0;
+ int q_was_packing = 0;
+
+ /*
+ * check if weed have to switch to non-packing mode or if
+ * we have to get a pci flag out on the queue
+ */
+ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+ !atomic_read(&queue->set_pci_flags_count)){
+ if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+ QETH_OUT_Q_UNLOCKED) {
+ /*
+ * If we get in here, there was no action in
+ * do_send_packet. So, we check if there is a
+ * packing buffer to be flushed here.
+ */
+ netif_stop_queue(queue->card->dev);
+ index = queue->next_buf_to_fill;
+ q_was_packing = queue->do_pack;
+ flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
+ if (!flush_cnt &&
+ !atomic_read(&queue->set_pci_flags_count))
+ flush_cnt +=
+ qeth_flush_buffers_on_no_pci(queue);
+#ifdef CONFIG_QETH_PERF_STATS
+ if (q_was_packing)
+ queue->card->perf_stats.bufs_sent_pack +=
+ flush_cnt;
+#endif
+ if (flush_cnt)
+ qeth_flush_buffers(queue, 1, index, flush_cnt);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ }
+ }
+}
+
+static void
+qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error,
+ unsigned int __queue, int first_element, int count,
+ unsigned long card_ptr)
+{
+ struct qeth_card *card = (struct qeth_card *) card_ptr;
+ struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
+ struct qeth_qdio_out_buffer *buffer;
+ int i;
+
+ QETH_DBF_TEXT(trace, 6, "qdouhdl");
+ if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
+ if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
+ QETH_DBF_SPRINTF(trace, 2, "On device %s: "
+ "received active check "
+ "condition (0x%08x).",
+ CARD_BUS_ID(card), status);
+ QETH_DBF_TEXT(trace, 2, "chkcond");
+ QETH_DBF_TEXT_(trace, 2, "%08x", status);
+ netif_stop_queue(card->dev);
+ qeth_schedule_recovery(card);
+ return;
+ }
+ }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_handler_cnt++;
+ card->perf_stats.outbound_handler_start_time = qeth_get_micros();
+#endif
+ for(i = first_element; i < (first_element + count); ++i){
+ buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ /*we only handle the KICK_IT error by doing a recovery */
+ if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
+ == QETH_SEND_ERROR_KICK_IT){
+ netif_stop_queue(card->dev);
+ qeth_schedule_recovery(card);
+ return;
+ }
+ qeth_clear_output_buffer(queue, buffer);
+ }
+ atomic_sub(count, &queue->used_buffers);
+ /* check if we need to do something on this outbound queue */
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ qeth_check_outbound_queue(queue);
+
+ netif_wake_queue(queue->card->dev);
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_handler_time += qeth_get_micros() -
+ card->perf_stats.outbound_handler_start_time;
+#endif
+}
+
+static void
+qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
+{
+
+ param_field[0] = _ascebc['P'];
+ param_field[1] = _ascebc['C'];
+ param_field[2] = _ascebc['I'];
+ param_field[3] = _ascebc['T'];
+ *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
+ *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
+ *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
+}
+
+static void
+qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
+{
+ param_field[16] = _ascebc['B'];
+ param_field[17] = _ascebc['L'];
+ param_field[18] = _ascebc['K'];
+ param_field[19] = _ascebc['T'];
+ *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
+ *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
+ *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
+}
+
+static void
+qeth_initialize_working_pool_list(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *entry;
+
+ QETH_DBF_TEXT(trace,5,"inwrklst");
+
+ list_for_each_entry(entry,
+ &card->qdio.init_pool.entry_list, init_list) {
+ qeth_put_buffer_pool_entry(card,entry);
+ }
+}
+
+static void
+qeth_clear_working_pool_list(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *pool_entry, *tmp;
+
+ QETH_DBF_TEXT(trace,5,"clwrklst");
+ list_for_each_entry_safe(pool_entry, tmp,
+ &card->qdio.in_buf_pool.entry_list, list){
+ list_del(&pool_entry->list);
+ }
+}
+
+static void
+qeth_free_buffer_pool(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *pool_entry, *tmp;
+ int i=0;
+ QETH_DBF_TEXT(trace,5,"freepool");
+ list_for_each_entry_safe(pool_entry, tmp,
+ &card->qdio.init_pool.entry_list, init_list){
+ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
+ free_page((unsigned long)pool_entry->elements[i]);
+ list_del(&pool_entry->init_list);
+ kfree(pool_entry);
+ }
+}
+
+static int
+qeth_alloc_buffer_pool(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *pool_entry;
+ void *ptr;
+ int i, j;
+
+ QETH_DBF_TEXT(trace,5,"alocpool");
+ for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
+ pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
+ if (!pool_entry){
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
+ ptr = (void *) __get_free_page(GFP_KERNEL);
+ if (!ptr) {
+ while (j > 0)
+ free_page((unsigned long)
+ pool_entry->elements[--j]);
+ kfree(pool_entry);
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ pool_entry->elements[j] = ptr;
+ }
+ list_add(&pool_entry->init_list,
+ &card->qdio.init_pool.entry_list);
+ }
+ return 0;
+}
+
+int
+qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+{
+ QETH_DBF_TEXT(trace, 2, "realcbp");
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
+ qeth_clear_working_pool_list(card);
+ qeth_free_buffer_pool(card);
+ card->qdio.in_buf_pool.buf_count = bufcnt;
+ card->qdio.init_pool.buf_count = bufcnt;
+ return qeth_alloc_buffer_pool(card);
+}
+
+static int
+qeth_alloc_qdio_buffers(struct qeth_card *card)
+{
+ int i, j;
+
+ QETH_DBF_TEXT(setup, 2, "allcqdbf");
+
+ if (card->qdio.state == QETH_QDIO_ALLOCATED)
+ return 0;
+
+ card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
+ if (!card->qdio.in_q)
+ return - ENOMEM;
+ QETH_DBF_TEXT(setup, 2, "inq");
+ QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
+ memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
+ /* give inbound qeth_qdio_buffers their qdio_buffers */
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+ card->qdio.in_q->bufs[i].buffer =
+ &card->qdio.in_q->qdio_bufs[i];
+ /* inbound buffer pool */
+ if (qeth_alloc_buffer_pool(card)){
+ kfree(card->qdio.in_q);
+ return -ENOMEM;
+ }
+ /* outbound */
+ card->qdio.out_qs =
+ kmalloc(card->qdio.no_out_queues *
+ sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
+ if (!card->qdio.out_qs){
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ for (i = 0; i < card->qdio.no_out_queues; ++i){
+ card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
+ GFP_KERNEL);
+ if (!card->qdio.out_qs[i]){
+ while (i > 0)
+ kfree(card->qdio.out_qs[--i]);
+ kfree(card->qdio.out_qs);
+ return -ENOMEM;
+ }
+ QETH_DBF_TEXT_(setup, 2, "outq %i", i);
+ QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
+ memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
+ card->qdio.out_qs[i]->queue_no = i;
+ /* give outbound qeth_qdio_buffers their qdio_buffers */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
+ card->qdio.out_qs[i]->bufs[j].buffer =
+ &card->qdio.out_qs[i]->qdio_bufs[j];
+ skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
+ skb_list);
+ INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
+ }
+ }
+ card->qdio.state = QETH_QDIO_ALLOCATED;
+ return 0;
+}
+
+static void
+qeth_free_qdio_buffers(struct qeth_card *card)
+{
+ int i, j;
+
+ QETH_DBF_TEXT(trace, 2, "freeqdbf");
+ if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
+ return;
+ kfree(card->qdio.in_q);
+ /* inbound buffer pool */
+ qeth_free_buffer_pool(card);
+ /* free outbound qdio_qs */
+ for (i = 0; i < card->qdio.no_out_queues; ++i){
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ kfree(card->qdio.out_qs[i]);
+ }
+ kfree(card->qdio.out_qs);
+ card->qdio.state = QETH_QDIO_UNINITIALIZED;
+}
+
+static void
+qeth_clear_qdio_buffers(struct qeth_card *card)
+{
+ int i, j;
+
+ QETH_DBF_TEXT(trace, 2, "clearqdbf");
+ /* clear outbound buffers to free skbs */
+ for (i = 0; i < card->qdio.no_out_queues; ++i)
+ if (card->qdio.out_qs[i]){
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ }
+}
+
+static void
+qeth_init_qdio_info(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(setup, 4, "intqdinf");
+ card->qdio.state = QETH_QDIO_UNINITIALIZED;
+ /* inbound */
+ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+ card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
+ card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
+ INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
+ INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
+ /* outbound */
+ card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+}
+
+static int
+qeth_init_qdio_queues(struct qeth_card *card)
+{
+ int i, j;
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "initqdqs");
+
+ /* inbound queue */
+ memset(card->qdio.in_q->qdio_bufs, 0,
+ QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ qeth_initialize_working_pool_list(card);
+ /*give only as many buffers to hardware as we have buffer pool entries*/
+ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
+ qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+ card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
+ card->qdio.in_buf_pool.buf_count - 1, NULL);
+ if (rc) {
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ return rc;
+ }
+ rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
+ if (rc) {
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ return rc;
+ }
+ /* outbound queue */
+ for (i = 0; i < card->qdio.no_out_queues; ++i){
+ memset(card->qdio.out_qs[i]->qdio_bufs, 0,
+ QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ }
+ card->qdio.out_qs[i]->card = card;
+ card->qdio.out_qs[i]->next_buf_to_fill = 0;
+ card->qdio.out_qs[i]->do_pack = 0;
+ atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
+ atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
+ atomic_set(&card->qdio.out_qs[i]->state,
+ QETH_OUT_Q_UNLOCKED);
+ }
+ return 0;
+}
+
+static int
+qeth_qdio_establish(struct qeth_card *card)
+{
+ struct qdio_initialize init_data;
+ char *qib_param_field;
+ struct qdio_buffer **in_sbal_ptrs;
+ struct qdio_buffer **out_sbal_ptrs;
+ int i, j, k;
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "qdioest");
+
+ qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
+ GFP_KERNEL);
+ if (!qib_param_field)
+ return -ENOMEM;
+
+ memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
+
+ qeth_create_qib_param_field(card, qib_param_field);
+ qeth_create_qib_param_field_blkt(card, qib_param_field);
+
+ in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
+ GFP_KERNEL);
+ if (!in_sbal_ptrs) {
+ kfree(qib_param_field);
+ return -ENOMEM;
+ }
+ for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+ in_sbal_ptrs[i] = (struct qdio_buffer *)
+ virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+
+ out_sbal_ptrs =
+ kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
+ sizeof(void *), GFP_KERNEL);
+ if (!out_sbal_ptrs) {
+ kfree(in_sbal_ptrs);
+ kfree(qib_param_field);
+ return -ENOMEM;
+ }
+ for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
+ for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
+ out_sbal_ptrs[k] = (struct qdio_buffer *)
+ virt_to_phys(card->qdio.out_qs[i]->
+ bufs[j].buffer);
+ }
+
+ memset(&init_data, 0, sizeof(struct qdio_initialize));
+ init_data.cdev = CARD_DDEV(card);
+ init_data.q_format = qeth_get_qdio_q_format(card);
+ init_data.qib_param_field_format = 0;
+ init_data.qib_param_field = qib_param_field;
+ init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
+ init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
+ init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
+ init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
+ init_data.no_input_qs = 1;
+ init_data.no_output_qs = card->qdio.no_out_queues;
+ init_data.input_handler = (qdio_handler_t *)
+ qeth_qdio_input_handler;
+ init_data.output_handler = (qdio_handler_t *)
+ qeth_qdio_output_handler;
+ init_data.int_parm = (unsigned long) card;
+ init_data.flags = QDIO_INBOUND_0COPY_SBALS |
+ QDIO_OUTBOUND_0COPY_SBALS |
+ QDIO_USE_OUTBOUND_PCIS;
+ init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
+ init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+
+ if (!(rc = qdio_initialize(&init_data)))
+ card->qdio.state = QETH_QDIO_ESTABLISHED;
+
+ kfree(out_sbal_ptrs);
+ kfree(in_sbal_ptrs);
+ kfree(qib_param_field);
+ return rc;
+}
+
+static int
+qeth_qdio_activate(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(setup,3,"qdioact");
+ return qdio_activate(CARD_DDEV(card), 0);
+}
+
+static int
+qeth_clear_channel(struct qeth_channel *channel)
+{
+ unsigned long flags;
+ struct qeth_card *card;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"clearch");
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ if (rc)
+ return rc;
+ rc = wait_event_interruptible_timeout(card->wait_q,
+ channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
+ if (rc == -ERESTARTSYS)
+ return rc;
+ if (channel->state != CH_STATE_STOPPED)
+ return -ETIME;
+ channel->state = CH_STATE_DOWN;
+ return 0;
+}
+
+static int
+qeth_halt_channel(struct qeth_channel *channel)
+{
+ unsigned long flags;
+ struct qeth_card *card;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"haltch");
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ if (rc)
+ return rc;
+ rc = wait_event_interruptible_timeout(card->wait_q,
+ channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
+ if (rc == -ERESTARTSYS)
+ return rc;
+ if (channel->state != CH_STATE_HALTED)
+ return -ETIME;
+ return 0;
+}
+
+static int
+qeth_halt_channels(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"haltchs");
+ if ((rc = qeth_halt_channel(&card->read)))
+ return rc;
+ if ((rc = qeth_halt_channel(&card->write)))
+ return rc;
+ return qeth_halt_channel(&card->data);
+}
+static int
+qeth_clear_channels(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"clearchs");
+ if ((rc = qeth_clear_channel(&card->read)))
+ return rc;
+ if ((rc = qeth_clear_channel(&card->write)))
+ return rc;
+ return qeth_clear_channel(&card->data);
+}
+
+static int
+qeth_clear_halt_card(struct qeth_card *card, int halt)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"clhacrd");
+ QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
+
+ if (halt)
+ rc = qeth_halt_channels(card);
+ if (rc)
+ return rc;
+ return qeth_clear_channels(card);
+}
+
+static int
+qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"qdioclr");
+ if (card->qdio.state == QETH_QDIO_ESTABLISHED){
+ if ((rc = qdio_cleanup(CARD_DDEV(card),
+ (card->info.type == QETH_CARD_TYPE_IQD) ?
+ QDIO_FLAG_CLEANUP_USING_HALT :
+ QDIO_FLAG_CLEANUP_USING_CLEAR)))
+ QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
+ card->qdio.state = QETH_QDIO_ALLOCATED;
+ }
+ if ((rc = qeth_clear_halt_card(card, use_halt)))
+ QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
+ card->state = CARD_STATE_DOWN;
+ return rc;
+}
+
+static int
+qeth_dm_act(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(setup,2,"dmact");
+
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
+
+ memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
+ &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
+ return rc;
+}
+
+static int
+qeth_mpc_initialize(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(setup,2,"mpcinit");
+
+ if ((rc = qeth_issue_next_read(card))){
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_cm_enable(card))){
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_cm_setup(card))){
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_ulp_enable(card))){
+ QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_ulp_setup(card))){
+ QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_alloc_qdio_buffers(card))){
+ QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
+ return rc;
+ }
+ if ((rc = qeth_qdio_establish(card))){
+ QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
+ qeth_free_qdio_buffers(card);
+ goto out_qdio;
+ }
+ if ((rc = qeth_qdio_activate(card))){
+ QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
+ goto out_qdio;
+ }
+ if ((rc = qeth_dm_act(card))){
+ QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
+ goto out_qdio;
+ }
+
+ return 0;
+out_qdio:
+ qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
+ return rc;
+}
+
+static struct net_device *
+qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
+{
+ struct net_device *dev = NULL;
+
+ switch (type) {
+ case QETH_CARD_TYPE_OSAE:
+ switch (linktype) {
+ case QETH_LINK_TYPE_LANE_TR:
+ case QETH_LINK_TYPE_HSTR:
+#ifdef CONFIG_TR
+ dev = alloc_trdev(0);
+#endif /* CONFIG_TR */
+ break;
+ default:
+ dev = alloc_etherdev(0);
+ }
+ break;
+ case QETH_CARD_TYPE_IQD:
+ dev = alloc_netdev(0, "hsi%d", ether_setup);
+ break;
+ default:
+ dev = alloc_etherdev(0);
+ }
+ return dev;
+}
+
+/*hard_header fake function; used in case fake_ll is set */
+static int
+qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ struct ethhdr *hdr;
+
+ hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN);
+ memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
+ if (type != ETH_P_802_3)
+ hdr->h_proto = htons(type);
+ else
+ hdr->h_proto = htons(len);
+ return QETH_FAKE_LL_LEN;
+}
+
+static inline int
+qeth_send_packet(struct qeth_card *, struct sk_buff *);
+
+static int
+qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int rc;
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace, 6, "hrdstxmi");
+ card = (struct qeth_card *)dev->priv;
+ if (skb==NULL) {
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ /* return OK; otherwise ksoftirqd goes to 100% */
+ return NETDEV_TX_OK;
+ }
+ if ((card->state != CARD_STATE_UP) || !card->lan_online) {
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ card->stats.tx_carrier_errors++;
+ dev_kfree_skb_any(skb);
+ /* return OK; otherwise ksoftirqd goes to 100% */
+ return NETDEV_TX_OK;
+ }
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_cnt++;
+ card->perf_stats.outbound_start_time = qeth_get_micros();
+#endif
+ netif_stop_queue(dev);
+ if ((rc = qeth_send_packet(card, skb))) {
+ if (rc == -EBUSY) {
+ return NETDEV_TX_BUSY;
+ } else {
+ card->stats.tx_errors++;
+ card->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ /*set to OK; otherwise ksoftirqd goes to 100% */
+ rc = NETDEV_TX_OK;
+ }
+ }
+ netif_wake_queue(dev);
+#ifdef CONFIG_QETH_PERF_STATS
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
+#endif
+ return rc;
+}
+
+static int
+qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
+{
+ int rc = 0;
+#ifdef CONFIG_QETH_VLAN
+ struct vlan_group *vg;
+ int i;
+
+ if (!(vg = card->vlangrp))
+ return rc;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
+ if (vg->vlan_devices[i] == dev){
+ rc = QETH_VLAN_CARD;
+ break;
+ }
+ }
+#endif
+ return rc;
+}
+
+static int
+qeth_verify_dev(struct net_device *dev)
+{
+ struct qeth_card *card;
+ unsigned long flags;
+ int rc = 0;
+
+ read_lock_irqsave(&qeth_card_list.rwlock, flags);
+ list_for_each_entry(card, &qeth_card_list.list, list){
+ if (card->dev == dev){
+ rc = QETH_REAL_CARD;
+ break;
+ }
+ rc = qeth_verify_vlan_dev(dev, card);
+ if (rc)
+ break;
+ }
+ read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+
+ return rc;
+}
+
+static struct qeth_card *
+qeth_get_card_from_dev(struct net_device *dev)
+{
+ struct qeth_card *card = NULL;
+ int rc;
+
+ rc = qeth_verify_dev(dev);
+ if (rc == QETH_REAL_CARD)
+ card = (struct qeth_card *)dev->priv;
+ else if (rc == QETH_VLAN_CARD)
+ card = (struct qeth_card *)
+ VLAN_DEV_INFO(dev)->real_dev->priv;
+
+ QETH_DBF_TEXT_(trace, 4, "%d", rc);
+ return card ;
+}
+
+static void
+qeth_tx_timeout(struct net_device *dev)
+{
+ struct qeth_card *card;
+
+ card = (struct qeth_card *) dev->priv;
+ card->stats.tx_errors++;
+ qeth_schedule_recovery(card);
+}
+
+static int
+qeth_open(struct net_device *dev)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace, 4, "qethopen");
+
+ card = (struct qeth_card *) dev->priv;
+
+ if (card->state != CARD_STATE_SOFTSETUP)
+ return -ENODEV;
+
+ if ( (card->options.layer2) &&
+ (!card->info.layer2_mac_registered)) {
+ QETH_DBF_TEXT(trace,4,"nomacadr");
+ return -EPERM;
+ }
+ card->dev->flags |= IFF_UP;
+ netif_start_queue(dev);
+ card->data.state = CH_STATE_UP;
+ card->state = CARD_STATE_UP;
+
+ if (!card->lan_online){
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ }
+ return 0;
+}
+
+static int
+qeth_stop(struct net_device *dev)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace, 4, "qethstop");
+
+ card = (struct qeth_card *) dev->priv;
+
+ netif_stop_queue(dev);
+ card->dev->flags &= ~IFF_UP;
+ if (card->state == CARD_STATE_UP)
+ card->state = CARD_STATE_SOFTSETUP;
+ return 0;
+}
+
+static inline int
+qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+{
+ int cast_type = RTN_UNSPEC;
+
+ if (skb->dst && skb->dst->neighbour){
+ cast_type = skb->dst->neighbour->type;
+ if ((cast_type == RTN_BROADCAST) ||
+ (cast_type == RTN_MULTICAST) ||
+ (cast_type == RTN_ANYCAST))
+ return cast_type;
+ else
+ return RTN_UNSPEC;
+ }
+ /* try something else */
+ if (skb->protocol == ETH_P_IPV6)
+ return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
+ else if (skb->protocol == ETH_P_IP)
+ return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
+ /* ... */
+ if (!memcmp(skb->data, skb->dev->broadcast, 6))
+ return RTN_BROADCAST;
+ else {
+ u16 hdr_mac;
+
+ hdr_mac = *((u16 *)skb->data);
+ /* tr multicast? */
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_HSTR:
+ case QETH_LINK_TYPE_LANE_TR:
+ if ((hdr_mac == QETH_TR_MAC_NC) ||
+ (hdr_mac == QETH_TR_MAC_C))
+ return RTN_MULTICAST;
+ /* eth or so multicast? */
+ default:
+ if ((hdr_mac == QETH_ETH_MAC_V4) ||
+ (hdr_mac == QETH_ETH_MAC_V6))
+ return RTN_MULTICAST;
+ }
+ }
+ return cast_type;
+}
+
+static inline int
+qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+ int ipv, int cast_type)
+{
+ if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
+ return card->qdio.default_out_queue;
+ switch (card->qdio.no_out_queues) {
+ case 4:
+ if (cast_type && card->info.is_multicast_different)
+ return card->info.is_multicast_different &
+ (card->qdio.no_out_queues - 1);
+ if (card->qdio.do_prio_queueing && (ipv == 4)) {
+ if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
+ if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
+ return 3;
+ if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
+ return 2;
+ if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
+ return 1;
+ if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
+ return 0;
+ }
+ if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
+ return 3 - (skb->nh.iph->tos >> 6);
+ } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
+ /* TODO: IPv6!!! */
+ }
+ return card->qdio.default_out_queue;
+ case 1: /* fallthrough for single-out-queue 1920-device */
+ default:
+ return card->qdio.default_out_queue;
+ }
+}
+
+static inline int
+qeth_get_ip_version(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case ETH_P_IPV6:
+ return 6;
+ case ETH_P_IP:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline int
+qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
+ struct qeth_hdr **hdr, int ipv)
+{
+ int rc = 0;
+#ifdef CONFIG_QETH_VLAN
+ u16 *tag;
+#endif
+
+ QETH_DBF_TEXT(trace, 6, "prepskb");
+
+ rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
+ if (rc)
+ return rc;
+#ifdef CONFIG_QETH_VLAN
+ if (card->vlangrp && vlan_tx_tag_present(*skb) &&
+ ((ipv == 6) || card->options.layer2) ) {
+ /*
+ * Move the mac addresses (6 bytes src, 6 bytes dest)
+ * to the beginning of the new header. We are using three
+ * memcpys instead of one memmove to save cycles.
+ */
+ skb_push(*skb, VLAN_HLEN);
+ memcpy((*skb)->data, (*skb)->data + 4, 4);
+ memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
+ memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
+ tag = (u16 *)((*skb)->data + 12);
+ /*
+ * first two bytes = ETH_P_8021Q (0x8100)
+ * second two bytes = VLANID
+ */
+ *tag = __constant_htons(ETH_P_8021Q);
+ *(tag + 1) = htons(vlan_tx_tag_get(*skb));
+ }
+#endif
+ *hdr = (struct qeth_hdr *)
+ qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
+ if (hdr == NULL)
+ return -EINVAL;
+ return 0;
+}
+
+static inline u8
+qeth_get_qeth_hdr_flags4(int cast_type)
+{
+ if (cast_type == RTN_MULTICAST)
+ return QETH_CAST_MULTICAST;
+ if (cast_type == RTN_BROADCAST)
+ return QETH_CAST_BROADCAST;
+ return QETH_CAST_UNICAST;
+}
+
+static inline u8
+qeth_get_qeth_hdr_flags6(int cast_type)
+{
+ u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
+ if (cast_type == RTN_MULTICAST)
+ return ct | QETH_CAST_MULTICAST;
+ if (cast_type == RTN_ANYCAST)
+ return ct | QETH_CAST_ANYCAST;
+ if (cast_type == RTN_BROADCAST)
+ return ct | QETH_CAST_BROADCAST;
+ return ct | QETH_CAST_UNICAST;
+}
+
+static inline void
+qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb)
+{
+ __u16 hdr_mac;
+
+ if (!memcmp(skb->data+QETH_HEADER_SIZE,
+ skb->dev->broadcast,6)) { /* broadcast? */
+ *(__u32 *)hdr->hdr.l2.flags |=
+ QETH_LAYER2_FLAG_BROADCAST << 8;
+ return;
+ }
+ hdr_mac=*((__u16*)skb->data);
+ /* tr multicast? */
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_HSTR:
+ case QETH_LINK_TYPE_LANE_TR:
+ if ((hdr_mac == QETH_TR_MAC_NC) ||
+ (hdr_mac == QETH_TR_MAC_C) )
+ *(__u32 *)hdr->hdr.l2.flags |=
+ QETH_LAYER2_FLAG_MULTICAST << 8;
+ else
+ *(__u32 *)hdr->hdr.l2.flags |=
+ QETH_LAYER2_FLAG_UNICAST << 8;
+ break;
+ /* eth or so multicast? */
+ default:
+ if ( (hdr_mac==QETH_ETH_MAC_V4) ||
+ (hdr_mac==QETH_ETH_MAC_V6) )
+ *(__u32 *)hdr->hdr.l2.flags |=
+ QETH_LAYER2_FLAG_MULTICAST << 8;
+ else
+ *(__u32 *)hdr->hdr.l2.flags |=
+ QETH_LAYER2_FLAG_UNICAST << 8;
+ }
+}
+
+static inline void
+qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int cast_type)
+{
+ memset(hdr, 0, sizeof(struct qeth_hdr));
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+
+ /* set byte 0 to "0x02" and byte 3 to casting flags */
+ if (cast_type==RTN_MULTICAST)
+ *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
+ else if (cast_type==RTN_BROADCAST)
+ *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
+ else
+ qeth_layer2_get_packet_type(card, hdr, skb);
+
+ hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
+#ifdef CONFIG_QETH_VLAN
+ /* VSWITCH relies on the VLAN
+ * information to be present in
+ * the QDIO header */
+ if ((card->vlangrp != NULL) &&
+ vlan_tx_tag_present(skb)) {
+ *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
+ hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
+ }
+#endif
+}
+
+void
+qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type)
+{
+ QETH_DBF_TEXT(trace, 6, "fillhdr");
+
+ memset(hdr, 0, sizeof(struct qeth_hdr));
+ if (card->options.layer2) {
+ qeth_layer2_fill_header(card, hdr, skb, cast_type);
+ return;
+ }
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ hdr->hdr.l3.ext_flags = 0;
+#ifdef CONFIG_QETH_VLAN
+ /*
+ * before we're going to overwrite this location with next hop ip.
+ * v6 uses passthrough, v4 sets the tag in the QDIO header.
+ */
+ if (card->vlangrp && vlan_tx_tag_present(skb)) {
+ hdr->hdr.l3.ext_flags = (ipv == 4) ?
+ QETH_HDR_EXT_VLAN_FRAME :
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+ hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+ }
+#endif /* CONFIG_QETH_VLAN */
+ hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+ if (ipv == 4) { /* IPv4 */
+ hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
+ memset(hdr->hdr.l3.dest_addr, 0, 12);
+ if ((skb->dst) && (skb->dst->neighbour)) {
+ *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
+ *((u32 *) skb->dst->neighbour->primary_key);
+ } else {
+ /* fill in destination address used in ip header */
+ *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
+ }
+ } else if (ipv == 6) { /* IPv6 or passthru */
+ hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
+ if ((skb->dst) && (skb->dst->neighbour)) {
+ memcpy(hdr->hdr.l3.dest_addr,
+ skb->dst->neighbour->primary_key, 16);
+ } else {
+ /* fill in destination address used in ip header */
+ memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
+ }
+ } else { /* passthrough */
+ if (!memcmp(skb->data + sizeof(struct qeth_hdr),
+ skb->dev->broadcast, 6)) { /* broadcast? */
+ hdr->hdr.l3.flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
+ } else {
+ hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
+ QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
+ QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
+ }
+ }
+}
+
+static inline void
+__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
+ int *next_element_to_fill)
+{
+ int length = skb->len;
+ struct skb_frag_struct *frag;
+ int fragno;
+ unsigned long addr;
+ int element;
+ int first_lap = 1;
+
+ fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */
+ element = *next_element_to_fill + fragno;
+ while (length > 0) {
+ if (fragno > 0) {
+ frag = &skb_shinfo(skb)->frags[fragno - 1];
+ addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
+ frag->page_offset;
+ buffer->element[element].addr = (char *)addr;
+ buffer->element[element].length = frag->size;
+ length -= frag->size;
+ if (first_lap)
+ buffer->element[element].flags =
+ SBAL_FLAGS_LAST_FRAG;
+ else
+ buffer->element[element].flags =
+ SBAL_FLAGS_MIDDLE_FRAG;
+ } else {
+ buffer->element[element].addr = skb->data;
+ buffer->element[element].length = length;
+ length = 0;
+ buffer->element[element].flags =
+ SBAL_FLAGS_FIRST_FRAG;
+ }
+ element--;
+ fragno--;
+ first_lap = 0;
+ }
+ *next_element_to_fill += skb_shinfo(skb)->nr_frags + 1;
+}
+
+static inline void
+__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
+ int *next_element_to_fill)
+{
+ int length = skb->len;
+ int length_here;
+ int element;
+ char *data;
+ int first_lap = 1;
+
+ element = *next_element_to_fill;
+ data = skb->data;
+ while (length > 0) {
+ /* length_here is the remaining amount of data in this page */
+ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
+ if (length < length_here)
+ length_here = length;
+ buffer->element[element].addr = data;
+ buffer->element[element].length = length_here;
+ length -= length_here;
+ if (!length){
+ if (first_lap)
+ buffer->element[element].flags = 0;
+ else
+ buffer->element[element].flags =
+ SBAL_FLAGS_LAST_FRAG;
+ } else {
+ if (first_lap)
+ buffer->element[element].flags =
+ SBAL_FLAGS_FIRST_FRAG;
+ else
+ buffer->element[element].flags =
+ SBAL_FLAGS_MIDDLE_FRAG;
+ }
+ data += length_here;
+ element++;
+ first_lap = 0;
+ }
+ *next_element_to_fill = element;
+}
+
+static inline int
+qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb)
+{
+ struct qdio_buffer *buffer;
+ int flush_cnt = 0;
+
+ QETH_DBF_TEXT(trace, 6, "qdfillbf");
+ buffer = buf->buffer;
+ atomic_inc(&skb->users);
+ skb_queue_tail(&buf->skb_list, skb);
+ if (skb_shinfo(skb)->nr_frags == 0)
+ __qeth_fill_buffer(skb, buffer,
+ (int *)&buf->next_element_to_fill);
+ else
+ __qeth_fill_buffer_frag(skb, buffer,
+ (int *)&buf->next_element_to_fill);
+
+ if (!queue->do_pack) {
+ QETH_DBF_TEXT(trace, 6, "fillbfnp");
+ /* set state to PRIMED -> will be flushed */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ flush_cnt = 1;
+ } else {
+ QETH_DBF_TEXT(trace, 6, "fillbfpa");
+#ifdef CONFIG_QETH_PERF_STATS
+ queue->card->perf_stats.skbs_sent_pack++;
+#endif
+ if (buf->next_element_to_fill >=
+ QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
+ /*
+ * packed buffer if full -> set state PRIMED
+ * -> will be flushed
+ */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ flush_cnt = 1;
+ }
+ }
+ return flush_cnt;
+}
+
+static inline int
+qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ int elements_needed,
+ struct qeth_eddp_context *ctx)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int buffers_needed = 0;
+ int flush_cnt = 0;
+ int index;
+
+ QETH_DBF_TEXT(trace, 6, "dosndpfa");
+
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
+ /* ... now we've got the queue */
+ index = queue->next_buf_to_fill;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /*
+ * check if buffer is empty to make sure that we do not 'overtake'
+ * ourselves and try to fill a buffer that is already primed
+ */
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+ card->stats.tx_dropped++;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
+ if (ctx == NULL)
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ else {
+ buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
+ if (buffers_needed < 0) {
+ card->stats.tx_dropped++;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + buffers_needed) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ }
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ if (ctx == NULL) {
+ qeth_fill_buffer(queue, buffer, skb);
+ qeth_flush_buffers(queue, 0, index, 1);
+ } else {
+ flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
+ WARN_ON(buffers_needed != flush_cnt);
+ qeth_flush_buffers(queue, 0, index, flush_cnt);
+ }
+ return 0;
+}
+
+static inline int
+qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ int elements_needed, struct qeth_eddp_context *ctx)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int start_index;
+ int flush_count = 0;
+ int do_pack = 0;
+ int tmp;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 6, "dosndpkt");
+
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
+ start_index = queue->next_buf_to_fill;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /*
+ * check if buffer is empty to make sure that we do not 'overtake'
+ * ourselves and try to fill a buffer that is already primed
+ */
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
+ card->stats.tx_dropped++;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
+ /* check if we need to switch packing state of this queue */
+ qeth_switch_to_packing_if_needed(queue);
+ if (queue->do_pack){
+ do_pack = 1;
+ if (ctx == NULL) {
+ /* does packet fit in current buffer? */
+ if((QETH_MAX_BUFFER_ELEMENTS(card) -
+ buffer->next_element_to_fill) < elements_needed){
+ /* ... no -> set state PRIMED */
+ atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
+ flush_count++;
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /* we did a step forward, so check buffer state
+ * again */
+ if (atomic_read(&buffer->state) !=
+ QETH_QDIO_BUF_EMPTY){
+ card->stats.tx_dropped++;
+ qeth_flush_buffers(queue, 0, start_index, flush_count);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* check if we have enough elements (including following
+ * free buffers) to handle eddp context */
+ if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
+ printk("eddp tx_dropped 1\n");
+ card->stats.tx_dropped++;
+ rc = -EBUSY;
+ goto out;
+ }
+ }
+ }
+ if (ctx == NULL)
+ tmp = qeth_fill_buffer(queue, buffer, skb);
+ else {
+ tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
+ if (tmp < 0) {
+ printk("eddp tx_dropped 2\n");
+ card->stats.tx_dropped++;
+ rc = - EBUSY;
+ goto out;
+ }
+ }
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ flush_count += tmp;
+out:
+ if (flush_count)
+ qeth_flush_buffers(queue, 0, start_index, flush_count);
+ /*
+ * queue->state will go from LOCKED -> UNLOCKED or from
+ * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
+ * (switch packing state or flush buffer to get another pci flag out).
+ * In that case we will enter this loop
+ */
+ while (atomic_dec_return(&queue->state)){
+ flush_count = 0;
+ start_index = queue->next_buf_to_fill;
+ /* check if we can go back to non-packing state */
+ flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+ /*
+ * check if we need to flush a packing buffer to get a pci
+ * flag out on the queue
+ */
+ if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
+ flush_count += qeth_flush_buffers_on_no_pci(queue);
+ if (flush_count)
+ qeth_flush_buffers(queue, 0, start_index, flush_count);
+ }
+ /* at this point the queue is UNLOCKED again */
+#ifdef CONFIG_QETH_PERF_STATS
+ if (do_pack)
+ queue->card->perf_stats.bufs_sent_pack += flush_count;
+#endif /* CONFIG_QETH_PERF_STATS */
+
+ return rc;
+}
+
+static inline int
+qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
+{
+ int ipv = 0;
+ int cast_type;
+ struct qeth_qdio_out_q *queue;
+ struct qeth_hdr *hdr;
+ int elements_needed = 0;
+ enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
+ struct qeth_eddp_context *ctx = NULL;
+ int rc;
+
+ QETH_DBF_TEXT(trace, 6, "sendpkt");
+
+ if (!card->options.layer2) {
+ ipv = qeth_get_ip_version(skb);
+ if ((card->dev->hard_header == qeth_fake_header) && ipv) {
+ if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
+ card->stats.tx_dropped++;
+ dev_kfree_skb_irq(skb);
+ return 0;
+ }
+ skb_pull(skb, QETH_FAKE_LL_LEN);
+ }
+ }
+ cast_type = qeth_get_cast_type(card, skb);
+ if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ queue = card->qdio.out_qs
+ [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+
+ if (skb_shinfo(skb)->tso_size)
+ large_send = card->options.large_send;
+
+ if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
+ QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
+ return rc;
+ }
+ /*are we able to do TSO ? If so ,prepare and send it from here */
+ if ((large_send == QETH_LARGE_SEND_TSO) &&
+ (cast_type == RTN_UNSPEC)) {
+ rc = qeth_tso_send_packet(card, skb, queue,
+ ipv, cast_type);
+ goto do_statistics;
+ }
+
+ qeth_fill_header(card, hdr, skb, ipv, cast_type);
+ if (large_send == QETH_LARGE_SEND_EDDP) {
+ ctx = qeth_eddp_create_context(card, skb, hdr);
+ if (ctx == NULL) {
+ PRINT_WARN("could not create eddp context\n");
+ return -EINVAL;
+ }
+ } else {
+ elements_needed = qeth_get_elements_no(card,(void*) hdr, skb);
+ if (!elements_needed)
+ return -EINVAL;
+ }
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ rc = qeth_do_send_packet(card, queue, skb, hdr,
+ elements_needed, ctx);
+ else
+ rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
+ elements_needed, ctx);
+do_statistics:
+ if (!rc){
+ card->stats.tx_packets++;
+ card->stats.tx_bytes += skb->len;
+#ifdef CONFIG_QETH_PERF_STATS
+ if (skb_shinfo(skb)->tso_size) {
+ card->perf_stats.large_send_bytes += skb->len;
+ card->perf_stats.large_send_cnt++;
+ }
+ if (skb_shinfo(skb)->nr_frags > 0){
+ card->perf_stats.sg_skbs_sent++;
+ /* nr_frags + skb->data */
+ card->perf_stats.sg_frags_sent +=
+ skb_shinfo(skb)->nr_frags + 1;
+ }
+#endif /* CONFIG_QETH_PERF_STATS */
+ }
+ if (ctx != NULL) {
+ /* drop creator's reference */
+ qeth_eddp_put_context(ctx);
+ /* free skb; it's not referenced by a buffer */
+ if (rc == 0)
+ dev_kfree_skb_any(skb);
+
+ }
+ return rc;
+}
+
+static int
+qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ struct qeth_card *card = (struct qeth_card *) dev->priv;
+ int rc = 0;
+
+ switch(regnum){
+ case MII_BMCR: /* Basic mode control register */
+ rc = BMCR_FULLDPLX;
+ if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ rc |= BMCR_SPEED100;
+ break;
+ case MII_BMSR: /* Basic mode status register */
+ rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
+ BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
+ BMSR_100BASE4;
+ break;
+ case MII_PHYSID1: /* PHYS ID 1 */
+ rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
+ dev->dev_addr[2];
+ rc = (rc >> 5) & 0xFFFF;
+ break;
+ case MII_PHYSID2: /* PHYS ID 2 */
+ rc = (dev->dev_addr[2] << 10) & 0xFFFF;
+ break;
+ case MII_ADVERTISE: /* Advertisement control reg */
+ rc = ADVERTISE_ALL;
+ break;
+ case MII_LPA: /* Link partner ability reg */
+ rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
+ LPA_100BASE4 | LPA_LPACK;
+ break;
+ case MII_EXPANSION: /* Expansion register */
+ break;
+ case MII_DCOUNTER: /* disconnect counter */
+ break;
+ case MII_FCSCOUNTER: /* false carrier counter */
+ break;
+ case MII_NWAYTEST: /* N-way auto-neg test register */
+ break;
+ case MII_RERRCOUNTER: /* rx error counter */
+ rc = card->stats.rx_errors;
+ break;
+ case MII_SREVISION: /* silicon revision */
+ break;
+ case MII_RESV1: /* reserved 1 */
+ break;
+ case MII_LBRERROR: /* loopback, rx, bypass error */
+ break;
+ case MII_PHYADDR: /* physical address */
+ break;
+ case MII_RESV2: /* reserved 2 */
+ break;
+ case MII_TPISTATUS: /* TPI status for 10mbps */
+ break;
+ case MII_NCONFIG: /* network interface config */
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static void
+qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ switch(regnum){
+ case MII_BMCR: /* Basic mode control register */
+ case MII_BMSR: /* Basic mode status register */
+ case MII_PHYSID1: /* PHYS ID 1 */
+ case MII_PHYSID2: /* PHYS ID 2 */
+ case MII_ADVERTISE: /* Advertisement control reg */
+ case MII_LPA: /* Link partner ability reg */
+ case MII_EXPANSION: /* Expansion register */
+ case MII_DCOUNTER: /* disconnect counter */
+ case MII_FCSCOUNTER: /* false carrier counter */
+ case MII_NWAYTEST: /* N-way auto-neg test register */
+ case MII_RERRCOUNTER: /* rx error counter */
+ case MII_SREVISION: /* silicon revision */
+ case MII_RESV1: /* reserved 1 */
+ case MII_LBRERROR: /* loopback, rx, bypass error */
+ case MII_PHYADDR: /* physical address */
+ case MII_RESV2: /* reserved 2 */
+ case MII_TPISTATUS: /* TPI status for 10mbps */
+ case MII_NCONFIG: /* network interface config */
+ default:
+ break;
+ }
+}
+
+static inline const char *
+qeth_arp_get_error_cause(int *rc)
+{
+ switch (*rc) {
+ case QETH_IPA_ARP_RC_FAILED:
+ *rc = -EIO;
+ return "operation failed";
+ case QETH_IPA_ARP_RC_NOTSUPP:
+ *rc = -EOPNOTSUPP;
+ return "operation not supported";
+ case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+ *rc = -EINVAL;
+ return "argument out of range";
+ case QETH_IPA_ARP_RC_Q_NOTSUPP:
+ *rc = -EOPNOTSUPP;
+ return "query operation not supported";
+ case QETH_IPA_ARP_RC_Q_NO_DATA:
+ *rc = -ENOENT;
+ return "no query data available";
+ default:
+ return "unknown error";
+ }
+}
+
+static int
+qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
+ __u16, long);
+
+static int
+qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
+{
+ int tmp;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"arpstnoe");
+
+ /* TODO: really not supported by GuestLAN? */
+ if (card->info.guestlan)
+ return -EOPNOTSUPP;
+ if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+ rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+ no_entries);
+ if (rc) {
+ tmp = rc;
+ PRINT_WARN("Could not set number of ARP entries on %s: "
+ "%s (0x%x/%d)\n",
+ QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
+ tmp, tmp);
+ }
+ return rc;
+}
+
+static inline void
+qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
+ struct qeth_arp_query_data *qdata,
+ int entry_size, int uentry_size)
+{
+ char *entry_ptr;
+ char *uentry_ptr;
+ int i;
+
+ entry_ptr = (char *)&qdata->data;
+ uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
+ for (i = 0; i < qdata->no_entries; ++i){
+ /* strip off 32 bytes "media specific information" */
+ memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
+ entry_ptr += entry_size;
+ uentry_ptr += uentry_size;
+ }
+}
+
+static int
+qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_arp_query_data *qdata;
+ struct qeth_arp_query_info *qinfo;
+ int entry_size;
+ int uentry_size;
+ int i;
+
+ QETH_DBF_TEXT(trace,4,"arpquecb");
+
+ qinfo = (struct qeth_arp_query_info *) reply->param;
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code) {
+ QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
+ return 0;
+ }
+ if (cmd->data.setassparms.hdr.return_code) {
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
+ return 0;
+ }
+ qdata = &cmd->data.setassparms.data.query_arp;
+ switch(qdata->reply_bits){
+ case 5:
+ uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
+ if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
+ uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
+ break;
+ case 7:
+ /* fall through to default */
+ default:
+ /* tr is the same as eth -> entry7 */
+ uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
+ if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
+ uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
+ break;
+ }
+ /* check if there is enough room in userspace */
+ if ((qinfo->udata_len - qinfo->udata_offset) <
+ qdata->no_entries * uentry_size){
+ QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
+ cmd->hdr.return_code = -ENOMEM;
+ PRINT_WARN("query ARP user space buffer is too small for "
+ "the returned number of ARP entries. "
+ "Aborting query!\n");
+ goto out_error;
+ }
+ QETH_DBF_TEXT_(trace, 4, "anore%i",
+ cmd->data.setassparms.hdr.number_of_replies);
+ QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
+ QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
+
+ if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
+ /* strip off "media specific information" */
+ qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
+ uentry_size);
+ } else
+ /*copy entries to user buffer*/
+ memcpy(qinfo->udata + qinfo->udata_offset,
+ (char *)&qdata->data, qdata->no_entries*uentry_size);
+
+ qinfo->no_entries += qdata->no_entries;
+ qinfo->udata_offset += (qdata->no_entries*uentry_size);
+ /* check if all replies received ... */
+ if (cmd->data.setassparms.hdr.seq_no <
+ cmd->data.setassparms.hdr.number_of_replies)
+ return 1;
+ memcpy(qinfo->udata, &qinfo->no_entries, 4);
+ /* keep STRIP_ENTRIES flag so the user program can distinguish
+ * stripped entries from normal ones */
+ if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
+ qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
+ memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
+ return 0;
+out_error:
+ i = 0;
+ memcpy(qinfo->udata, &i, 4);
+ return 0;
+}
+
+static int
+qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ int len, int (*reply_cb)(struct qeth_card *,
+ struct qeth_reply *,
+ unsigned long),
+ void *reply_param)
+{
+ QETH_DBF_TEXT(trace,4,"sendarp");
+
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+ reply_cb, reply_param);
+}
+
+static int
+qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ int len, int (*reply_cb)(struct qeth_card *,
+ struct qeth_reply *,
+ unsigned long),
+ void *reply_param)
+{
+ u16 s1, s2;
+
+ QETH_DBF_TEXT(trace,4,"sendsnmp");
+
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ /* adjust PDU length fields in IPA_PDU_HEADER */
+ s1 = (u32) IPA_PDU_HEADER_SIZE + len;
+ s2 = (u32) len;
+ memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+ return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+ reply_cb, reply_param);
+}
+
+static struct qeth_cmd_buffer *
+qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
+ __u16, __u16, enum qeth_prot_versions);
+static int
+qeth_arp_query(struct qeth_card *card, char *udata)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_arp_query_info qinfo = {0, };
+ int tmp;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"arpquery");
+
+ /*
+ * currently GuestLAN does only deliver all zeros on query arp,
+ * even though arp processing is supported (according to IPA supp.
+ * funcs flags); since all zeros is no valueable information,
+ * we say EOPNOTSUPP for all ARP functions
+ */
+ /*if (card->info.guestlan)
+ return -EOPNOTSUPP; */
+ if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
+ IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+ /* get size of userspace buffer and mask_bits -> 6 bytes */
+ if (copy_from_user(&qinfo, udata, 6))
+ return -EFAULT;
+ if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
+ return -ENOMEM;
+ memset(qinfo.udata, 0, qinfo.udata_len);
+ qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_QUERY_INFO,
+ sizeof(int),QETH_PROT_IPV4);
+
+ rc = qeth_send_ipa_arp_cmd(card, iob,
+ QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
+ qeth_arp_query_cb, (void *)&qinfo);
+ if (rc) {
+ tmp = rc;
+ PRINT_WARN("Error while querying ARP cache on %s: %s "
+ "(0x%x/%d)\n",
+ QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
+ tmp, tmp);
+ copy_to_user(udata, qinfo.udata, 4);
+ } else {
+ copy_to_user(udata, qinfo.udata, qinfo.udata_len);
+ }
+ kfree(qinfo.udata);
+ return rc;
+}
+
+/**
+ * SNMP command callback
+ */
+static int
+qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long sdata)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_arp_query_info *qinfo;
+ struct qeth_snmp_cmd *snmp;
+ unsigned char *data;
+ __u16 data_len;
+
+ QETH_DBF_TEXT(trace,3,"snpcmdcb");
+
+ cmd = (struct qeth_ipa_cmd *) sdata;
+ data = (unsigned char *)((char *)cmd - reply->offset);
+ qinfo = (struct qeth_arp_query_info *) reply->param;
+ snmp = &cmd->data.setadapterparms.data.snmp;
+
+ if (cmd->hdr.return_code) {
+ QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
+ return 0;
+ }
+ if (cmd->data.setadapterparms.hdr.return_code) {
+ cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
+ QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
+ return 0;
+ }
+ data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
+ if (cmd->data.setadapterparms.hdr.seq_no == 1)
+ data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
+ else
+ data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
+
+ /* check if there is enough room in userspace */
+ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
+ QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
+ cmd->hdr.return_code = -ENOMEM;
+ return 0;
+ }
+ QETH_DBF_TEXT_(trace, 4, "snore%i",
+ cmd->data.setadapterparms.hdr.used_total);
+ QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
+ /*copy entries to user buffer*/
+ if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+ memcpy(qinfo->udata + qinfo->udata_offset,
+ (char *)snmp,
+ data_len + offsetof(struct qeth_snmp_cmd,data));
+ qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
+ } else {
+ memcpy(qinfo->udata + qinfo->udata_offset,
+ (char *)&snmp->request, data_len);
+ }
+ qinfo->udata_offset += data_len;
+ /* check if all replies received ... */
+ QETH_DBF_TEXT_(trace, 4, "srtot%i",
+ cmd->data.setadapterparms.hdr.used_total);
+ QETH_DBF_TEXT_(trace, 4, "srseq%i",
+ cmd->data.setadapterparms.hdr.seq_no);
+ if (cmd->data.setadapterparms.hdr.seq_no <
+ cmd->data.setadapterparms.hdr.used_total)
+ return 1;
+ return 0;
+}
+
+static struct qeth_cmd_buffer *
+qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
+ enum qeth_prot_versions );
+
+static struct qeth_cmd_buffer *
+qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
+ QETH_PROT_IPV4);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+ cmd->data.setadapterparms.hdr.command_code = command;
+ cmd->data.setadapterparms.hdr.used_total = 1;
+ cmd->data.setadapterparms.hdr.seq_no = 1;
+
+ return iob;
+}
+
+/**
+ * function to send SNMP commands to OSA-E card
+ */
+static int
+qeth_snmp_command(struct qeth_card *card, char *udata)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_snmp_ureq *ureq;
+ int req_len;
+ struct qeth_arp_query_info qinfo = {0, };
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"snmpcmd");
+
+ if (card->info.guestlan)
+ return -EOPNOTSUPP;
+
+ if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
+ (!card->options.layer2) ) {
+ PRINT_WARN("SNMP Query MIBS not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+ /* skip 4 bytes (data_len struct member) to get req_len */
+ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ return -EFAULT;
+ ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
+ if (!ureq) {
+ QETH_DBF_TEXT(trace, 2, "snmpnome");
+ return -ENOMEM;
+ }
+ if (copy_from_user(ureq, udata,
+ req_len+sizeof(struct qeth_snmp_ureq_hdr))){
+ kfree(ureq);
+ return -EFAULT;
+ }
+ qinfo.udata_len = ureq->hdr.data_len;
+ if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
+ kfree(ureq);
+ return -ENOMEM;
+ }
+ memset(qinfo.udata, 0, qinfo.udata_len);
+ qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
+
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
+ QETH_SNMP_SETADP_CMDLENGTH + req_len);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
+ rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
+ qeth_snmp_command_cb, (void *)&qinfo);
+ if (rc)
+ PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
+ QETH_CARD_IFNAME(card), rc);
+ else
+ copy_to_user(udata, qinfo.udata, qinfo.udata_len);
+
+ kfree(ureq);
+ kfree(qinfo.udata);
+ return rc;
+}
+
+static int
+qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
+ unsigned long);
+
+static int
+qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
+ __u16, long,
+ int (*reply_cb)
+ (struct qeth_card *, struct qeth_reply *, unsigned long),
+ void *reply_param);
+
+static int
+qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
+{
+ struct qeth_cmd_buffer *iob;
+ char buf[16];
+ int tmp;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"arpadent");
+
+ /*
+ * currently GuestLAN does only deliver all zeros on query arp,
+ * even though arp processing is supported (according to IPA supp.
+ * funcs flags); since all zeros is no valueable information,
+ * we say EOPNOTSUPP for all ARP functions
+ */
+ if (card->info.guestlan)
+ return -EOPNOTSUPP;
+ if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_ADD_ENTRY,
+ sizeof(struct qeth_arp_cache_entry),
+ QETH_PROT_IPV4);
+ rc = qeth_send_setassparms(card, iob,
+ sizeof(struct qeth_arp_cache_entry),
+ (unsigned long) entry,
+ qeth_default_setassparms_cb, NULL);
+ if (rc) {
+ tmp = rc;
+ qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+ PRINT_WARN("Could not add ARP entry for address %s on %s: "
+ "%s (0x%x/%d)\n",
+ buf, QETH_CARD_IFNAME(card),
+ qeth_arp_get_error_cause(&rc), tmp, tmp);
+ }
+ return rc;
+}
+
+static int
+qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
+{
+ struct qeth_cmd_buffer *iob;
+ char buf[16] = {0, };
+ int tmp;
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"arprment");
+
+ /*
+ * currently GuestLAN does only deliver all zeros on query arp,
+ * even though arp processing is supported (according to IPA supp.
+ * funcs flags); since all zeros is no valueable information,
+ * we say EOPNOTSUPP for all ARP functions
+ */
+ if (card->info.guestlan)
+ return -EOPNOTSUPP;
+ if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+ memcpy(buf, entry, 12);
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY,
+ 12,
+ QETH_PROT_IPV4);
+ rc = qeth_send_setassparms(card, iob,
+ 12, (unsigned long)buf,
+ qeth_default_setassparms_cb, NULL);
+ if (rc) {
+ tmp = rc;
+ memset(buf, 0, 16);
+ qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+ PRINT_WARN("Could not delete ARP entry for address %s on %s: "
+ "%s (0x%x/%d)\n",
+ buf, QETH_CARD_IFNAME(card),
+ qeth_arp_get_error_cause(&rc), tmp, tmp);
+ }
+ return rc;
+}
+
+static int
+qeth_arp_flush_cache(struct qeth_card *card)
+{
+ int rc;
+ int tmp;
+
+ QETH_DBF_TEXT(trace,3,"arpflush");
+
+ /*
+ * currently GuestLAN does only deliver all zeros on query arp,
+ * even though arp processing is supported (according to IPA supp.
+ * funcs flags); since all zeros is no valueable information,
+ * we say EOPNOTSUPP for all ARP functions
+ */
+ if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+ return -EOPNOTSUPP;
+ if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+ rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
+ if (rc){
+ tmp = rc;
+ PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
+ QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
+ tmp, tmp);
+ }
+ return rc;
+}
+
+static int
+qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+ struct qeth_arp_cache_entry arp_entry;
+ struct mii_ioctl_data *mii_data;
+ int rc = 0;
+
+ if (!card)
+ return -ENODEV;
+
+ if ((card->state != CARD_STATE_UP) &&
+ (card->state != CARD_STATE_SOFTSETUP))
+ return -ENODEV;
+
+ switch (cmd){
+ case SIOC_QETH_ARP_SET_NO_ENTRIES:
+ if ( !capable(CAP_NET_ADMIN) ||
+ (card->options.layer2) ) {
+ rc = -EPERM;
+ break;
+ }
+ rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
+ break;
+ case SIOC_QETH_ARP_QUERY_INFO:
+ if ( !capable(CAP_NET_ADMIN) ||
+ (card->options.layer2) ) {
+ rc = -EPERM;
+ break;
+ }
+ rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
+ break;
+ case SIOC_QETH_ARP_ADD_ENTRY:
+ if ( !capable(CAP_NET_ADMIN) ||
+ (card->options.layer2) ) {
+ rc = -EPERM;
+ break;
+ }
+ if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+ sizeof(struct qeth_arp_cache_entry)))
+ rc = -EFAULT;
+ else
+ rc = qeth_arp_add_entry(card, &arp_entry);
+ break;
+ case SIOC_QETH_ARP_REMOVE_ENTRY:
+ if ( !capable(CAP_NET_ADMIN) ||
+ (card->options.layer2) ) {
+ rc = -EPERM;
+ break;
+ }
+ if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+ sizeof(struct qeth_arp_cache_entry)))
+ rc = -EFAULT;
+ else
+ rc = qeth_arp_remove_entry(card, &arp_entry);
+ break;
+ case SIOC_QETH_ARP_FLUSH_CACHE:
+ if ( !capable(CAP_NET_ADMIN) ||
+ (card->options.layer2) ) {
+ rc = -EPERM;
+ break;
+ }
+ rc = qeth_arp_flush_cache(card);
+ break;
+ case SIOC_QETH_ADP_SET_SNMP_CONTROL:
+ rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+ break;
+ case SIOC_QETH_GET_CARD_TYPE:
+ if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ !card->info.guestlan)
+ return 1;
+ return 0;
+ break;
+ case SIOCGMIIPHY:
+ mii_data = if_mii(rq);
+ mii_data->phy_id = 0;
+ break;
+ case SIOCGMIIREG:
+ mii_data = if_mii(rq);
+ if (mii_data->phy_id != 0)
+ rc = -EINVAL;
+ else
+ mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+ case SIOCSMIIREG:
+ rc = -EOPNOTSUPP;
+ break;
+ /* TODO: remove return if qeth_mdio_write does something */
+ if (!capable(CAP_NET_ADMIN)){
+ rc = -EPERM;
+ break;
+ }
+ mii_data = if_mii(rq);
+ if (mii_data->phy_id != 0)
+ rc = -EINVAL;
+ else
+ qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
+ mii_data->val_in);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+ if (rc)
+ QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
+ return rc;
+}
+
+static struct net_device_stats *
+qeth_get_stats(struct net_device *dev)
+{
+ struct qeth_card *card;
+
+ card = (struct qeth_card *) (dev->priv);
+
+ QETH_DBF_TEXT(trace,5,"getstat");
+
+ return &card->stats;
+}
+
+static int
+qeth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct qeth_card *card;
+ char dbf_text[15];
+
+ card = (struct qeth_card *) (dev->priv);
+
+ QETH_DBF_TEXT(trace,4,"chgmtu");
+ sprintf(dbf_text, "%8x", new_mtu);
+ QETH_DBF_TEXT(trace,4,dbf_text);
+
+ if (new_mtu < 64)
+ return -EINVAL;
+ if (new_mtu > 65535)
+ return -EINVAL;
+ if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
+ (!qeth_mtu_is_valid(card, new_mtu)))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_QETH_VLAN
+static void
+qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct qeth_card *card;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace,4,"vlanreg");
+
+ card = (struct qeth_card *) dev->priv;
+ spin_lock_irqsave(&card->vlanlock, flags);
+ card->vlangrp = grp;
+ spin_unlock_irqrestore(&card->vlanlock, flags);
+}
+
+static inline void
+qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
+ unsigned short vid)
+{
+ int i;
+ struct sk_buff *skb;
+ struct sk_buff_head tmp_list;
+
+ skb_queue_head_init(&tmp_list);
+ for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
+ while ((skb = skb_dequeue(&buf->skb_list))){
+ if (vlan_tx_tag_present(skb) &&
+ (vlan_tx_tag_get(skb) == vid)) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb(skb);
+ } else
+ skb_queue_tail(&tmp_list, skb);
+ }
+ }
+ while ((skb = skb_dequeue(&tmp_list)))
+ skb_queue_tail(&buf->skb_list, skb);
+}
+
+static void
+qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
+{
+ int i, j;
+
+ QETH_DBF_TEXT(trace, 4, "frvlskbs");
+ for (i = 0; i < card->qdio.no_out_queues; ++i){
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_free_vlan_buffer(card, &card->qdio.
+ out_qs[i]->bufs[j], vid);
+ }
+}
+
+static void
+qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ struct qeth_ipaddr *addr;
+
+ QETH_DBF_TEXT(trace, 4, "frvaddr4");
+ if (!card->vlangrp)
+ return;
+ rcu_read_lock();
+ in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
+ if (!in_dev)
+ goto out;
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
+ if (addr){
+ addr->u.a4.addr = ifa->ifa_address;
+ addr->u.a4.mask = ifa->ifa_mask;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ if (!qeth_delete_ip(card, addr))
+ kfree(addr);
+ }
+ }
+out:
+ rcu_read_unlock();
+}
+
+static void
+qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
+{
+#ifdef CONFIG_QETH_IPV6
+ struct inet6_dev *in6_dev;
+ struct inet6_ifaddr *ifa;
+ struct qeth_ipaddr *addr;
+
+ QETH_DBF_TEXT(trace, 4, "frvaddr6");
+ if (!card->vlangrp)
+ return;
+ in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
+ if (!in6_dev)
+ return;
+ for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
+ addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
+ if (addr){
+ memcpy(&addr->u.a6.addr, &ifa->addr,
+ sizeof(struct in6_addr));
+ addr->u.a6.pfxlen = ifa->prefix_len;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ if (!qeth_delete_ip(card, addr))
+ kfree(addr);
+ }
+ }
+ in6_dev_put(in6_dev);
+#endif /* CONFIG_QETH_IPV6 */
+}
+
+static void
+qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
+ enum qeth_ipa_cmds ipacmd)
+{
+ int rc;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setdelvlan.vlan_id = i;
+
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+ if (rc) {
+ PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
+ "Continuing\n",i, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_TEXT_(trace, 2, "L2VL%4x", ipacmd);
+ QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(trace, 2, "err%d", rc);
+ }
+}
+
+static void
+qeth_layer2_process_vlans(struct qeth_card *card, int clear)
+{
+ unsigned short i;
+
+ QETH_DBF_TEXT(trace, 3, "L2prcvln");
+
+ if (!card->vlangrp)
+ return;
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ if (card->vlangrp->vlan_devices[i] == NULL)
+ continue;
+ if (clear)
+ qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
+ else
+ qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
+ }
+}
+
+/*add_vid is layer 2 used only ....*/
+static void
+qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
+
+ card = (struct qeth_card *) dev->priv;
+ if (!card->options.layer2)
+ return;
+ qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+}
+
+/*... kill_vid used for both modes*/
+static void
+qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct qeth_card *card;
+ unsigned long flags;
+
+ QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
+
+ card = (struct qeth_card *) dev->priv;
+ /* free all skbs for the vlan device */
+ qeth_free_vlan_skbs(card, vid);
+ spin_lock_irqsave(&card->vlanlock, flags);
+ /* unregister IP addresses of vlan device */
+ qeth_free_vlan_addresses4(card, vid);
+ qeth_free_vlan_addresses6(card, vid);
+ if (card->vlangrp)
+ card->vlangrp->vlan_devices[vid] = NULL;
+ spin_unlock_irqrestore(&card->vlanlock, flags);
+ if (card->options.layer2)
+ qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ qeth_set_multicast_list(card->dev);
+}
+#endif
+
+/**
+ * set multicast address on card
+ */
+static void
+qeth_set_multicast_list(struct net_device *dev)
+{
+ struct qeth_card *card = (struct qeth_card *) dev->priv;
+
+ QETH_DBF_TEXT(trace,3,"setmulti");
+ qeth_delete_mc_addresses(card);
+ qeth_add_multicast_ipv4(card);
+#ifdef CONFIG_QETH_IPV6
+ qeth_add_multicast_ipv6(card);
+#endif
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+}
+
+static int
+qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
+{
+ return 0;
+}
+
+static void
+qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
+{
+ if (dev->type == ARPHRD_IEEE802_TR)
+ ip_tr_mc_map(ipm, mac);
+ else
+ ip_eth_mc_map(ipm, mac);
+}
+
+static struct qeth_ipaddr *
+qeth_get_addr_buffer(enum qeth_prot_versions prot)
+{
+ struct qeth_ipaddr *addr;
+
+ addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
+ if (addr == NULL) {
+ PRINT_WARN("Not enough memory to add address\n");
+ return NULL;
+ }
+ memset(addr,0,sizeof(struct qeth_ipaddr));
+ addr->type = QETH_IP_TYPE_NORMAL;
+ addr->proto = prot;
+ return addr;
+}
+
+static void
+qeth_delete_mc_addresses(struct qeth_card *card)
+{
+ struct qeth_ipaddr *iptodo;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace,4,"delmc");
+ iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
+ if (!iptodo) {
+ QETH_DBF_TEXT(trace, 2, "dmcnomem");
+ return;
+ }
+ iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
+ spin_lock_irqsave(&card->ip_lock, flags);
+ if (!__qeth_insert_ip_todo(card, iptodo, 0))
+ kfree(iptodo);
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+static inline void
+qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
+{
+ struct qeth_ipaddr *ipm;
+ struct ip_mc_list *im4;
+ char buf[MAX_ADDR_LEN];
+
+ QETH_DBF_TEXT(trace,4,"addmc");
+ for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+ qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
+ ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
+ if (!ipm)
+ continue;
+ ipm->u.a4.addr = im4->multiaddr;
+ memcpy(ipm->mac,buf,OSA_ADDR_LEN);
+ ipm->is_multicast = 1;
+ if (!qeth_add_ip(card,ipm))
+ kfree(ipm);
+ }
+}
+
+static inline void
+qeth_add_vlan_mc(struct qeth_card *card)
+{
+#ifdef CONFIG_QETH_VLAN
+ struct in_device *in_dev;
+ struct vlan_group *vg;
+ int i;
+
+ QETH_DBF_TEXT(trace,4,"addmcvl");
+ if ( ((card->options.layer2 == 0) &&
+ (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
+ (card->vlangrp == NULL) )
+ return ;
+
+ vg = card->vlangrp;
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ if (vg->vlan_devices[i] == NULL ||
+ !(vg->vlan_devices[i]->flags & IFF_UP))
+ continue;
+ in_dev = in_dev_get(vg->vlan_devices[i]);
+ if (!in_dev)
+ continue;
+ read_lock(&in_dev->mc_list_lock);
+ qeth_add_mc(card,in_dev);
+ read_unlock(&in_dev->mc_list_lock);
+ in_dev_put(in_dev);
+ }
+#endif
+}
+
+static void
+qeth_add_multicast_ipv4(struct qeth_card *card)
+{
+ struct in_device *in4_dev;
+
+ QETH_DBF_TEXT(trace,4,"chkmcv4");
+ in4_dev = in_dev_get(card->dev);
+ if (in4_dev == NULL)
+ return;
+ read_lock(&in4_dev->mc_list_lock);
+ qeth_add_mc(card, in4_dev);
+ qeth_add_vlan_mc(card);
+ read_unlock(&in4_dev->mc_list_lock);
+ in_dev_put(in4_dev);
+}
+
+#ifdef CONFIG_QETH_IPV6
+static inline void
+qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
+{
+ struct qeth_ipaddr *ipm;
+ struct ifmcaddr6 *im6;
+ char buf[MAX_ADDR_LEN];
+
+ QETH_DBF_TEXT(trace,4,"addmc6");
+ for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
+ ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
+ ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
+ if (!ipm)
+ continue;
+ ipm->is_multicast = 1;
+ memcpy(ipm->mac,buf,OSA_ADDR_LEN);
+ memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
+ sizeof(struct in6_addr));
+ if (!qeth_add_ip(card,ipm))
+ kfree(ipm);
+ }
+}
+
+static inline void
+qeth_add_vlan_mc6(struct qeth_card *card)
+{
+#ifdef CONFIG_QETH_VLAN
+ struct inet6_dev *in_dev;
+ struct vlan_group *vg;
+ int i;
+
+ QETH_DBF_TEXT(trace,4,"admc6vl");
+ if ( ((card->options.layer2 == 0) &&
+ (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
+ (card->vlangrp == NULL))
+ return ;
+
+ vg = card->vlangrp;
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ if (vg->vlan_devices[i] == NULL ||
+ !(vg->vlan_devices[i]->flags & IFF_UP))
+ continue;
+ in_dev = in6_dev_get(vg->vlan_devices[i]);
+ if (!in_dev)
+ continue;
+ read_lock(&in_dev->lock);
+ qeth_add_mc6(card,in_dev);
+ read_unlock(&in_dev->lock);
+ in6_dev_put(in_dev);
+ }
+#endif /* CONFIG_QETH_VLAN */
+}
+
+static void
+qeth_add_multicast_ipv6(struct qeth_card *card)
+{
+ struct inet6_dev *in6_dev;
+
+ QETH_DBF_TEXT(trace,4,"chkmcv6");
+ if ((card->options.layer2 == 0) &&
+ (!qeth_is_supported(card, IPA_IPV6)) )
+ return ;
+
+ in6_dev = in6_dev_get(card->dev);
+ if (in6_dev == NULL)
+ return;
+ read_lock(&in6_dev->lock);
+ qeth_add_mc6(card, in6_dev);
+ qeth_add_vlan_mc6(card);
+ read_unlock(&in6_dev->lock);
+ in6_dev_put(in6_dev);
+}
+#endif /* CONFIG_QETH_IPV6 */
+
+static int
+qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+ enum qeth_ipa_cmds ipacmd,
+ int (*reply_cb) (struct qeth_card *,
+ struct qeth_reply*,
+ unsigned long))
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace, 2, "L2sdmac");
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+ memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+}
+
+static int
+qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ __u8 *mac;
+
+ QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
+ cmd = (struct qeth_ipa_cmd *) data;
+ mac = &cmd->data.setdelmac.mac[0];
+ /* MAC already registered, needed in couple/uncouple case */
+ if (cmd->hdr.return_code == 0x2005) {
+ PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
+ "already existing on %s \n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ QETH_CARD_IFNAME(card));
+ cmd->hdr.return_code = 0;
+ }
+ if (cmd->hdr.return_code)
+ PRINT_ERR("Could not set group MAC " \
+ "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ QETH_CARD_IFNAME(card),cmd->hdr.return_code);
+ return 0;
+}
+
+static int
+qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+{
+ QETH_DBF_TEXT(trace, 2, "L2Sgmac");
+ return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
+ qeth_layer2_send_setgroupmac_cb);
+}
+
+static int
+qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ __u8 *mac;
+
+ QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
+ cmd = (struct qeth_ipa_cmd *) data;
+ mac = &cmd->data.setdelmac.mac[0];
+ if (cmd->hdr.return_code)
+ PRINT_ERR("Could not delete group MAC " \
+ "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+ return 0;
+}
+
+static int
+qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+{
+ QETH_DBF_TEXT(trace, 2, "L2Dgmac");
+ return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
+ qeth_layer2_send_delgroupmac_cb);
+}
+
+static int
+qeth_layer2_send_setmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace, 2, "L2Smaccb");
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code) {
+ QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
+ PRINT_WARN("Error in registering MAC address on " \
+ "device %s: x%x\n", CARD_BUS_ID(card),
+ cmd->hdr.return_code);
+ card->info.layer2_mac_registered = 0;
+ cmd->hdr.return_code = -EIO;
+ } else {
+ card->info.layer2_mac_registered = 1;
+ memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
+ OSA_ADDR_LEN);
+ PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ "successfully registered on device %s\n",
+ card->dev->dev_addr[0], card->dev->dev_addr[1],
+ card->dev->dev_addr[2], card->dev->dev_addr[3],
+ card->dev->dev_addr[4], card->dev->dev_addr[5],
+ card->dev->name);
+ }
+ return 0;
+}
+
+static int
+qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+ QETH_DBF_TEXT(trace, 2, "L2Setmac");
+ return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
+ qeth_layer2_send_setmac_cb);
+}
+
+static int
+qeth_layer2_send_delmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code) {
+ PRINT_WARN("Error in deregistering MAC address on " \
+ "device %s: x%x\n", CARD_BUS_ID(card),
+ cmd->hdr.return_code);
+ QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
+ cmd->hdr.return_code = -EIO;
+ return 0;
+ }
+ card->info.layer2_mac_registered = 0;
+
+ return 0;
+}
+static int
+qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+ QETH_DBF_TEXT(trace, 2, "L2Delmac");
+ if (!card->info.layer2_mac_registered)
+ return 0;
+ return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
+ qeth_layer2_send_delmac_cb);
+}
+
+static int
+qeth_layer2_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct qeth_card *card;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 3, "setmac");
+
+ if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
+ QETH_DBF_TEXT(trace, 3, "setmcINV");
+ return -EOPNOTSUPP;
+ }
+ card = (struct qeth_card *) dev->priv;
+
+ if (!card->options.layer2) {
+ PRINT_WARN("Setting MAC address on %s is not supported"
+ "in Layer 3 mode.\n", dev->name);
+ QETH_DBF_TEXT(trace, 3, "setmcLY3");
+ return -EOPNOTSUPP;
+ }
+ QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
+ QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
+ rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
+ if (!rc)
+ rc = qeth_layer2_send_setmac(card, addr->sa_data);
+ return rc;
+}
+
+static void
+qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
+ __u8 command, enum qeth_prot_versions prot)
+{
+ memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
+ cmd->hdr.command = command;
+ cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
+ cmd->hdr.seqno = card->seqno.ipa;
+ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
+ cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+ if (card->options.layer2)
+ cmd->hdr.prim_version_no = 2;
+ else
+ cmd->hdr.prim_version_no = 1;
+ cmd->hdr.param_count = 1;
+ cmd->hdr.prot_version = prot;
+ cmd->hdr.ipa_supported = 0;
+ cmd->hdr.ipa_enabled = 0;
+}
+
+static struct qeth_cmd_buffer *
+qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
+ enum qeth_prot_versions prot)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ iob = qeth_wait_for_buffer(&card->write);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+
+ return iob;
+}
+
+static int
+qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"setdelmc");
+
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
+ if (addr->proto == QETH_PROT_IPV6)
+ memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
+ sizeof(struct in6_addr));
+ else
+ memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
+
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+ return rc;
+}
+static inline void
+qeth_fill_netmask(u8 *netmask, unsigned int len)
+{
+ int i,j;
+ for (i=0;i<16;i++) {
+ j=(len)-(i*8);
+ if (j >= 8)
+ netmask[i] = 0xff;
+ else if (j > 0)
+ netmask[i] = (u8)(0xFF00>>j);
+ else
+ netmask[i] = 0;
+ }
+}
+
+static int
+qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
+ int ipacmd, unsigned int flags)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ __u8 netmask[16];
+
+ QETH_DBF_TEXT(trace,4,"setdelip");
+ QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
+
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ if (addr->proto == QETH_PROT_IPV6) {
+ memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
+ sizeof(struct in6_addr));
+ qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
+ memcpy(cmd->data.setdelip6.mask, netmask,
+ sizeof(struct in6_addr));
+ cmd->data.setdelip6.flags = flags;
+ } else {
+ memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
+ memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+ cmd->data.setdelip4.flags = flags;
+ }
+
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+ return rc;
+}
+
+static int
+qeth_layer2_register_addr_entry(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
+{
+ if (!addr->is_multicast)
+ return 0;
+ QETH_DBF_TEXT(trace, 2, "setgmac");
+ QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
+ return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
+}
+
+static int
+qeth_layer2_deregister_addr_entry(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
+{
+ if (!addr->is_multicast)
+ return 0;
+ QETH_DBF_TEXT(trace, 2, "delgmac");
+ QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
+ return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
+}
+
+static int
+qeth_layer3_register_addr_entry(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
+{
+ char buf[50];
+ int rc;
+ int cnt = 3;
+
+ if (addr->proto == QETH_PROT_IPV4) {
+ QETH_DBF_TEXT(trace, 2,"setaddr4");
+ QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
+ } else if (addr->proto == QETH_PROT_IPV6) {
+ QETH_DBF_TEXT(trace, 2, "setaddr6");
+ QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
+ QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
+ } else {
+ QETH_DBF_TEXT(trace, 2, "setaddr?");
+ QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
+ }
+ do {
+ if (addr->is_multicast)
+ rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
+ else
+ rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
+ addr->set_flags);
+ if (rc)
+ QETH_DBF_TEXT(trace, 2, "failed");
+ } while ((--cnt > 0) && rc);
+ if (rc){
+ QETH_DBF_TEXT(trace, 2, "FAILED");
+ qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
+ PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
+ buf, rc, rc);
+ }
+ return rc;
+}
+
+static int
+qeth_layer3_deregister_addr_entry(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
+{
+ //char buf[50];
+ int rc;
+
+ if (addr->proto == QETH_PROT_IPV4) {
+ QETH_DBF_TEXT(trace, 2,"deladdr4");
+ QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
+ } else if (addr->proto == QETH_PROT_IPV6) {
+ QETH_DBF_TEXT(trace, 2, "deladdr6");
+ QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
+ QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
+ } else {
+ QETH_DBF_TEXT(trace, 2, "deladdr?");
+ QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
+ }
+ if (addr->is_multicast)
+ rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
+ else
+ rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
+ addr->del_flags);
+ if (rc) {
+ QETH_DBF_TEXT(trace, 2, "failed");
+ /* TODO: re-activate this warning as soon as we have a
+ * clean mirco code
+ qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
+ PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
+ buf, rc);
+ */
+ }
+ return rc;
+}
+
+static int
+qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+ if (card->options.layer2)
+ return qeth_layer2_register_addr_entry(card, addr);
+
+ return qeth_layer3_register_addr_entry(card, addr);
+}
+
+static int
+qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+ if (card->options.layer2)
+ return qeth_layer2_deregister_addr_entry(card, addr);
+
+ return qeth_layer3_deregister_addr_entry(card, addr);
+}
+
+static u32
+qeth_ethtool_get_tx_csum(struct net_device *dev)
+{
+ /* We may need to say that we support tx csum offload if
+ * we do EDDP or TSO. There are discussions going on to
+ * enforce rules in the stack and in ethtool that make
+ * SG and TSO depend on HW_CSUM. At the moment there are
+ * no such rules....
+ * If we say yes here, we have to checksum outbound packets
+ * any time. */
+ return 0;
+}
+
+static int
+qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
+{
+ return -EINVAL;
+}
+
+static u32
+qeth_ethtool_get_rx_csum(struct net_device *dev)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ return (card->options.checksum_type == HW_CHECKSUMMING);
+}
+
+static int
+qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+ if (data)
+ card->options.checksum_type = HW_CHECKSUMMING;
+ else
+ card->options.checksum_type = SW_CHECKSUMMING;
+ return 0;
+}
+
+static u32
+qeth_ethtool_get_sg(struct net_device *dev)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
+ (dev->features & NETIF_F_SG));
+}
+
+static int
+qeth_ethtool_set_sg(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ if (data) {
+ if (card->options.large_send != QETH_LARGE_SEND_NO)
+ dev->features |= NETIF_F_SG;
+ else {
+ dev->features &= ~NETIF_F_SG;
+ return -EINVAL;
+ }
+ } else
+ dev->features &= ~NETIF_F_SG;
+ return 0;
+}
+
+static u32
+qeth_ethtool_get_tso(struct net_device *dev)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
+ (dev->features & NETIF_F_TSO));
+}
+
+static int
+qeth_ethtool_set_tso(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = (struct qeth_card *)dev->priv;
+
+ if (data) {
+ if (card->options.large_send != QETH_LARGE_SEND_NO)
+ dev->features |= NETIF_F_TSO;
+ else {
+ dev->features &= ~NETIF_F_TSO;
+ return -EINVAL;
+ }
+ } else
+ dev->features &= ~NETIF_F_TSO;
+ return 0;
+}
+
+static struct ethtool_ops qeth_ethtool_ops = {
+ .get_tx_csum = qeth_ethtool_get_tx_csum,
+ .set_tx_csum = qeth_ethtool_set_tx_csum,
+ .get_rx_csum = qeth_ethtool_get_rx_csum,
+ .set_rx_csum = qeth_ethtool_set_rx_csum,
+ .get_sg = qeth_ethtool_get_sg,
+ .set_sg = qeth_ethtool_set_sg,
+ .get_tso = qeth_ethtool_get_tso,
+ .set_tso = qeth_ethtool_set_tso,
+};
+
+static int
+qeth_netdev_init(struct net_device *dev)
+{
+ struct qeth_card *card;
+
+ card = (struct qeth_card *) dev->priv;
+
+ QETH_DBF_TEXT(trace,3,"initdev");
+
+ dev->tx_timeout = &qeth_tx_timeout;
+ dev->watchdog_timeo = QETH_TX_TIMEOUT;
+ dev->open = qeth_open;
+ dev->stop = qeth_stop;
+ dev->hard_start_xmit = qeth_hard_start_xmit;
+ dev->do_ioctl = qeth_do_ioctl;
+ dev->get_stats = qeth_get_stats;
+ dev->change_mtu = qeth_change_mtu;
+ dev->neigh_setup = qeth_neigh_setup;
+ dev->set_multicast_list = qeth_set_multicast_list;
+#ifdef CONFIG_QETH_VLAN
+ dev->vlan_rx_register = qeth_vlan_rx_register;
+ dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
+ dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
+#endif
+ dev->hard_header = card->orig_hard_header;
+ if (qeth_get_netdev_flags(card) & IFF_NOARP) {
+ dev->rebuild_header = NULL;
+ dev->hard_header = NULL;
+ if (card->options.fake_ll)
+ dev->hard_header = qeth_fake_header;
+ dev->header_cache_update = NULL;
+ dev->hard_header_cache = NULL;
+ }
+#ifdef CONFIG_QETH_IPV6
+ /*IPv6 address autoconfiguration stuff*/
+ if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
+ card->dev->dev_id = card->info.unique_id & 0xffff;
+#endif
+ dev->hard_header_parse = NULL;
+ dev->set_mac_address = qeth_layer2_set_mac_address;
+ dev->flags |= qeth_get_netdev_flags(card);
+ if ((card->options.fake_broadcast) ||
+ (card->info.broadcast_capable))
+ dev->flags |= IFF_BROADCAST;
+ dev->hard_header_len =
+ qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
+ dev->addr_len = OSA_ADDR_LEN;
+ dev->mtu = card->info.initial_mtu;
+
+ SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
+
+ SET_MODULE_OWNER(dev);
+ return 0;
+}
+
+static void
+qeth_init_func_level(struct qeth_card *card)
+{
+ if (card->ipato.enabled) {
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+ card->info.func_level =
+ QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
+ else
+ card->info.func_level =
+ QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
+ } else {
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+ card->info.func_level =
+ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ else
+ card->info.func_level =
+ QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
+ }
+}
+
+/**
+ * hardsetup card, initialize MPC and QDIO stuff
+ */
+static int
+qeth_hardsetup_card(struct qeth_card *card)
+{
+ int retries = 3;
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "hrdsetup");
+
+retry:
+ if (retries < 3){
+ PRINT_WARN("Retrying to do IDX activates.\n");
+ ccw_device_set_offline(CARD_DDEV(card));
+ ccw_device_set_offline(CARD_WDEV(card));
+ ccw_device_set_offline(CARD_RDEV(card));
+ ccw_device_set_online(CARD_RDEV(card));
+ ccw_device_set_online(CARD_WDEV(card));
+ ccw_device_set_online(CARD_DDEV(card));
+ }
+ rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
+ if (rc == -ERESTARTSYS) {
+ QETH_DBF_TEXT(setup, 2, "break1");
+ return rc;
+ } else if (rc) {
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ if (--retries < 0)
+ goto out;
+ else
+ goto retry;
+ }
+ if ((rc = qeth_get_unitaddr(card))){
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ return rc;
+ }
+ qeth_init_tokens(card);
+ qeth_init_func_level(card);
+ rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
+ if (rc == -ERESTARTSYS) {
+ QETH_DBF_TEXT(setup, 2, "break2");
+ return rc;
+ } else if (rc) {
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ if (--retries < 0)
+ goto out;
+ else
+ goto retry;
+ }
+ rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
+ if (rc == -ERESTARTSYS) {
+ QETH_DBF_TEXT(setup, 2, "break3");
+ return rc;
+ } else if (rc) {
+ QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
+ if (--retries < 0)
+ goto out;
+ else
+ goto retry;
+ }
+ if ((rc = qeth_mpc_initialize(card))){
+ QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
+ goto out;
+ }
+ /*network device will be recovered*/
+ if (card->dev) {
+ card->dev->hard_header = card->orig_hard_header;
+ return 0;
+ }
+ /* at first set_online allocate netdev */
+ card->dev = qeth_get_netdevice(card->info.type,
+ card->info.link_type);
+ if (!card->dev){
+ qeth_qdio_clear_card(card, card->info.type ==
+ QETH_CARD_TYPE_OSAE);
+ rc = -ENODEV;
+ QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
+ goto out;
+ }
+ card->dev->priv = card;
+ card->orig_hard_header = card->dev->hard_header;
+ card->dev->type = qeth_get_arphdr_type(card->info.type,
+ card->info.link_type);
+ card->dev->init = qeth_netdev_init;
+ return 0;
+out:
+ PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
+ return rc;
+}
+
+static int
+qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"defadpcb");
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code == 0){
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+#ifdef CONFIG_QETH_IPV6
+ if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+#endif
+ }
+ if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
+ cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
+ QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
+ }
+ return 0;
+}
+
+static int
+qeth_default_setadapterparms_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"defadpcb");
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code == 0)
+ cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
+ return 0;
+}
+
+static int
+qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,3,"quyadpcb");
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
+ card->info.link_type =
+ cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+ card->options.adp.supported_funcs =
+ cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+ return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+}
+
+static int
+qeth_query_setadapterparms(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace,3,"queryadp");
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
+ sizeof(struct qeth_ipacmd_setadpparms));
+ rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
+ return rc;
+}
+
+static int
+qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"chgmaccb");
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ memcpy(card->dev->dev_addr,
+ &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
+ qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ return 0;
+}
+
+static int
+qeth_setadpparms_change_macaddr(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"chgmac");
+
+ iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
+ sizeof(struct qeth_ipacmd_setadpparms));
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
+ cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
+ memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
+ card->dev->dev_addr, OSA_ADDR_LEN);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
+ NULL);
+ return rc;
+}
+
+static int
+qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"adpmode");
+
+ iob = qeth_get_adapter_cmd(card, command,
+ sizeof(struct qeth_ipacmd_setadpparms));
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.data.mode = mode;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
+ NULL);
+ return rc;
+}
+
+static inline int
+qeth_setadapter_hstr(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,4,"adphstr");
+
+ if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
+ rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
+ card->options.broadcast_mode);
+ if (rc)
+ PRINT_WARN("couldn't set broadcast mode on "
+ "device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
+ card->options.macaddr_mode);
+ if (rc)
+ PRINT_WARN("couldn't set macaddr mode on "
+ "device %s: x%x\n", CARD_BUS_ID(card), rc);
+ return rc;
+ }
+ if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
+ PRINT_WARN("set adapter parameters not available "
+ "to set broadcast mode, using ALLRINGS "
+ "on device %s:\n", CARD_BUS_ID(card));
+ if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
+ PRINT_WARN("set adapter parameters not available "
+ "to set macaddr mode, using NONCANONICAL "
+ "on device %s:\n", CARD_BUS_ID(card));
+ return 0;
+}
+
+static int
+qeth_setadapter_parms(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "setadprm");
+
+ if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
+ PRINT_WARN("set adapter parameters not supported "
+ "on device %s.\n",
+ CARD_BUS_ID(card));
+ QETH_DBF_TEXT(setup, 2, " notsupp");
+ return 0;
+ }
+ rc = qeth_query_setadapterparms(card);
+ if (rc) {
+ PRINT_WARN("couldn't set adapter parameters on device %s: "
+ "x%x\n", CARD_BUS_ID(card), rc);
+ return rc;
+ }
+ if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
+ rc = qeth_setadpparms_change_macaddr(card);
+ if (rc)
+ PRINT_WARN("couldn't get MAC address on "
+ "device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ }
+
+ if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
+ rc = qeth_setadapter_hstr(card);
+
+ return rc;
+}
+
+static int
+qeth_layer2_initialize(struct qeth_card *card)
+{
+ int rc = 0;
+
+
+ QETH_DBF_TEXT(setup, 2, "doL2init");
+ QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
+
+ rc = qeth_setadpparms_change_macaddr(card);
+ if (rc) {
+ PRINT_WARN("couldn't get MAC address on "
+ "device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
+ return rc;
+ }
+ QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
+
+ rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
+ if (rc)
+ QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
+ return 0;
+}
+
+
+static int
+qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
+ enum qeth_prot_versions prot)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+ return rc;
+}
+
+static int
+qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
+{
+ int rc;
+
+ QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
+
+ rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
+ return rc;
+}
+
+static int
+qeth_send_stoplan(struct qeth_card *card)
+{
+ int rc = 0;
+
+ /*
+ * TODO: according to the IPA format document page 14,
+ * TCP/IP (we!) never issue a STOPLAN
+ * is this right ?!?
+ */
+ QETH_DBF_TEXT(trace, 2, "stoplan");
+
+ rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
+ return rc;
+}
+
+static int
+qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(setup, 2, "qipasscb");
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
+ card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
+ card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ } else {
+#ifdef CONFIG_QETH_IPV6
+ card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
+ card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+#endif
+ }
+ QETH_DBF_TEXT(setup, 2, "suppenbl");
+ QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
+ QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
+ return 0;
+}
+
+static int
+qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
+ if (card->options.layer2) {
+ QETH_DBF_TEXT(setup, 2, "noprmly2");
+ return -EPERM;
+ }
+
+ iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
+ return rc;
+}
+
+static struct qeth_cmd_buffer *
+qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
+ __u16 cmd_code, __u16 len,
+ enum qeth_prot_versions prot)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"getasscm");
+ iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
+
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setassparms.hdr.assist_no = ipa_func;
+ cmd->data.setassparms.hdr.length = 8 + len;
+ cmd->data.setassparms.hdr.command_code = cmd_code;
+ cmd->data.setassparms.hdr.return_code = 0;
+ cmd->data.setassparms.hdr.seq_no = 0;
+
+ return iob;
+}
+
+static int
+qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ __u16 len, long data,
+ int (*reply_cb)
+ (struct qeth_card *,struct qeth_reply *,unsigned long),
+ void *reply_param)
+{
+ int rc;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,4,"sendassp");
+
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ if (len <= sizeof(__u32))
+ cmd->data.setassparms.data.flags_32bit = (__u32) data;
+ else if (len > sizeof(__u32))
+ memcpy(&cmd->data.setassparms.data, (void *) data, len);
+
+ rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
+ return rc;
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int
+qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
+
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace,4,"simassp6");
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+ 0, QETH_PROT_IPV6);
+ rc = qeth_send_setassparms(card, iob, 0, 0,
+ qeth_default_setassparms_cb, NULL);
+ return rc;
+}
+#endif
+
+static int
+qeth_send_simple_setassparms(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ __u16 cmd_code, long data)
+{
+ int rc;
+ int length = 0;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace,4,"simassp4");
+ if (data)
+ length = sizeof(__u32);
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+ length, QETH_PROT_IPV4);
+ rc = qeth_send_setassparms(card, iob, length, data,
+ qeth_default_setassparms_cb, NULL);
+ return rc;
+}
+
+static inline int
+qeth_start_ipa_arp_processing(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"ipaarp");
+
+ if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
+ PRINT_WARN("ARP processing not supported "
+ "on %s!\n", QETH_CARD_IFNAME(card));
+ return 0;
+ }
+ rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_START, 0);
+ if (rc) {
+ PRINT_WARN("Could not start ARP processing "
+ "assist on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ }
+ return rc;
+}
+
+static int
+qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"ipaipfrg");
+
+ if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
+ PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+
+ rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
+ IPA_CMD_ASS_START, 0);
+ if (rc) {
+ PRINT_WARN("Could not start Hardware IP fragmentation "
+ "assist on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ } else
+ PRINT_INFO("Hardware IP fragmentation enabled \n");
+ return rc;
+}
+
+static int
+qeth_start_ipa_source_mac(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"stsrcmac");
+
+ if (!card->options.fake_ll)
+ return -EOPNOTSUPP;
+
+ if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
+ PRINT_INFO("Inbound source address not "
+ "supported on %s\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+
+ rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
+ IPA_CMD_ASS_START, 0);
+ if (rc)
+ PRINT_WARN("Could not start inbound source "
+ "assist on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+}
+
+static int
+qeth_start_ipa_vlan(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"strtvlan");
+
+#ifdef CONFIG_QETH_VLAN
+ if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
+ PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+
+ rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
+ IPA_CMD_ASS_START,0);
+ if (rc) {
+ PRINT_WARN("Could not start vlan "
+ "assist on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ } else {
+ PRINT_INFO("VLAN enabled \n");
+ card->dev->features |=
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+ }
+#endif /* QETH_VLAN */
+ return rc;
+}
+
+static int
+qeth_start_ipa_multicast(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"stmcast");
+
+ if (!qeth_is_supported(card, IPA_MULTICASTING)) {
+ PRINT_WARN("Multicast not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ return -EOPNOTSUPP;
+ }
+
+ rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
+ IPA_CMD_ASS_START,0);
+ if (rc) {
+ PRINT_WARN("Could not start multicast "
+ "assist on %s: rc=%i\n",
+ QETH_CARD_IFNAME(card), rc);
+ } else {
+ PRINT_INFO("Multicast enabled\n");
+ card->dev->flags |= IFF_MULTICAST;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int
+qeth_softsetup_ipv6(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"softipv6");
+
+ netif_stop_queue(card->dev);
+ rc = qeth_send_startlan(card, QETH_PROT_IPV6);
+ if (rc) {
+ PRINT_ERR("IPv6 startlan failed on %s\n",
+ QETH_CARD_IFNAME(card));
+ return rc;
+ }
+ netif_wake_queue(card->dev);
+ rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
+ if (rc) {
+ PRINT_ERR("IPv6 query ipassist failed on %s\n",
+ QETH_CARD_IFNAME(card));
+ return rc;
+ }
+ rc = qeth_send_simple_setassparms(card, IPA_IPV6,
+ IPA_CMD_ASS_START, 3);
+ if (rc) {
+ PRINT_WARN("IPv6 start assist (version 4) failed "
+ "on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+ }
+ rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
+ IPA_CMD_ASS_START);
+ if (rc) {
+ PRINT_WARN("IPV6 start assist (version 6) failed "
+ "on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+ }
+ rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
+ IPA_CMD_ASS_START);
+ if (rc) {
+ PRINT_WARN("Could not enable passthrough "
+ "on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+ }
+ PRINT_INFO("IPV6 enabled \n");
+ return 0;
+}
+
+#endif
+
+static int
+qeth_start_ipa_ipv6(struct qeth_card *card)
+{
+ int rc = 0;
+#ifdef CONFIG_QETH_IPV6
+ QETH_DBF_TEXT(trace,3,"strtipv6");
+
+ if (!qeth_is_supported(card, IPA_IPV6)) {
+ PRINT_WARN("IPv6 not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ return 0;
+ }
+ rc = qeth_softsetup_ipv6(card);
+#endif
+ return rc ;
+}
+
+static int
+qeth_start_ipa_broadcast(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"stbrdcst");
+ card->info.broadcast_capable = 0;
+ if (!qeth_is_supported(card, IPA_FILTERING)) {
+ PRINT_WARN("Broadcast not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+ IPA_CMD_ASS_START, 0);
+ if (rc) {
+ PRINT_WARN("Could not enable broadcasting filtering "
+ "on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ goto out;
+ }
+
+ rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+ IPA_CMD_ASS_CONFIGURE, 1);
+ if (rc) {
+ PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
+ QETH_CARD_IFNAME(card), rc);
+ goto out;
+ }
+ card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
+ PRINT_INFO("Broadcast enabled \n");
+ rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+ IPA_CMD_ASS_ENABLE, 1);
+ if (rc) {
+ PRINT_WARN("Could not set up broadcast echo filtering on "
+ "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
+ goto out;
+ }
+ card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
+out:
+ if (card->info.broadcast_capable)
+ card->dev->flags |= IFF_BROADCAST;
+ else
+ card->dev->flags &= ~IFF_BROADCAST;
+ return rc;
+}
+
+static int
+qeth_send_checksum_command(struct qeth_card *card)
+{
+ int rc;
+
+ rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+ IPA_CMD_ASS_START, 0);
+ if (rc) {
+ PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
+ "0x%x,\ncontinuing using Inbound SW Checksumming\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+ }
+ rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+ IPA_CMD_ASS_ENABLE,
+ card->info.csum_mask);
+ if (rc) {
+ PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
+ "0x%x,\ncontinuing using Inbound SW Checksumming\n",
+ QETH_CARD_IFNAME(card), rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int
+qeth_start_ipa_checksum(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"strtcsum");
+
+ if (card->options.checksum_type == NO_CHECKSUMMING) {
+ PRINT_WARN("Using no checksumming on %s.\n",
+ QETH_CARD_IFNAME(card));
+ return 0;
+ }
+ if (card->options.checksum_type == SW_CHECKSUMMING) {
+ PRINT_WARN("Using SW checksumming on %s.\n",
+ QETH_CARD_IFNAME(card));
+ return 0;
+ }
+ if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
+ PRINT_WARN("Inbound HW Checksumming not "
+ "supported on %s,\ncontinuing "
+ "using Inbound SW Checksumming\n",
+ QETH_CARD_IFNAME(card));
+ card->options.checksum_type = SW_CHECKSUMMING;
+ return 0;
+ }
+ rc = qeth_send_checksum_command(card);
+ if (!rc) {
+ PRINT_INFO("HW Checksumming (inbound) enabled \n");
+ }
+ return rc;
+}
+
+static int
+qeth_start_ipa_tso(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"sttso");
+
+ if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+ PRINT_WARN("Outbound TSO not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ rc = -EOPNOTSUPP;
+ } else {
+ rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_START,0);
+ if (rc)
+ PRINT_WARN("Could not start outbound TSO "
+ "assist on %s: rc=%i\n",
+ QETH_CARD_IFNAME(card), rc);
+ else
+ PRINT_INFO("Outbound TSO enabled\n");
+ }
+ if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
+ card->options.large_send = QETH_LARGE_SEND_NO;
+ card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
+ }
+ return rc;
+}
+
+static int
+qeth_start_ipassists(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(trace,3,"strtipas");
+ qeth_start_ipa_arp_processing(card); /* go on*/
+ qeth_start_ipa_ip_fragmentation(card); /* go on*/
+ qeth_start_ipa_source_mac(card); /* go on*/
+ qeth_start_ipa_vlan(card); /* go on*/
+ qeth_start_ipa_multicast(card); /* go on*/
+ qeth_start_ipa_ipv6(card); /* go on*/
+ qeth_start_ipa_broadcast(card); /* go on*/
+ qeth_start_ipa_checksum(card); /* go on*/
+ qeth_start_ipa_tso(card); /* go on*/
+ return 0;
+}
+
+static int
+qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
+ enum qeth_prot_versions prot)
+{
+ int rc;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_DBF_TEXT(trace,4,"setroutg");
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setrtg.type = (type);
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+ return rc;
+
+}
+
+static void
+qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
+ enum qeth_prot_versions prot)
+{
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
+ switch (*type) {
+ case NO_ROUTER:
+ case PRIMARY_CONNECTOR:
+ case SECONDARY_CONNECTOR:
+ case MULTICAST_ROUTER:
+ return;
+ default:
+ goto out_inval;
+ }
+ } else {
+ switch (*type) {
+ case NO_ROUTER:
+ case PRIMARY_ROUTER:
+ case SECONDARY_ROUTER:
+ return;
+ case MULTICAST_ROUTER:
+ if (qeth_is_ipafunc_supported(card, prot,
+ IPA_OSA_MC_ROUTER))
+ return;
+ default:
+ goto out_inval;
+ }
+ }
+out_inval:
+ PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
+ "Router status set to 'no router'.\n",
+ ((*type == PRIMARY_ROUTER)? "primary router" :
+ (*type == SECONDARY_ROUTER)? "secondary router" :
+ (*type == PRIMARY_CONNECTOR)? "primary connector" :
+ (*type == SECONDARY_CONNECTOR)? "secondary connector" :
+ (*type == MULTICAST_ROUTER)? "multicast router" :
+ "unknown"),
+ card->dev->name);
+ *type = NO_ROUTER;
+}
+
+int
+qeth_setrouting_v4(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(trace,3,"setrtg4");
+
+ qeth_correct_routing_type(card, &card->options.route4.type,
+ QETH_PROT_IPV4);
+
+ rc = qeth_send_setrouting(card, card->options.route4.type,
+ QETH_PROT_IPV4);
+ if (rc) {
+ card->options.route4.type = NO_ROUTER;
+ PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
+ "Type set to 'no router'.\n",
+ rc, QETH_CARD_IFNAME(card));
+ }
+ return rc;
+}
+
+int
+qeth_setrouting_v6(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace,3,"setrtg6");
+#ifdef CONFIG_QETH_IPV6
+
+ qeth_correct_routing_type(card, &card->options.route6.type,
+ QETH_PROT_IPV6);
+
+ if ((card->options.route6.type == NO_ROUTER) ||
+ ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ (card->options.route6.type == MULTICAST_ROUTER) &&
+ !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
+ return 0;
+ rc = qeth_send_setrouting(card, card->options.route6.type,
+ QETH_PROT_IPV6);
+ if (rc) {
+ card->options.route6.type = NO_ROUTER;
+ PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
+ "Type set to 'no router'.\n",
+ rc, QETH_CARD_IFNAME(card));
+ }
+#endif
+ return rc;
+}
+
+int
+qeth_set_large_send(struct qeth_card *card)
+{
+ int rc = 0;
+
+ if (card->dev == NULL)
+ return 0;
+
+ netif_stop_queue(card->dev);
+ switch (card->options.large_send) {
+ case QETH_LARGE_SEND_EDDP:
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ break;
+ case QETH_LARGE_SEND_TSO:
+ if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ } else {
+ PRINT_WARN("TSO not supported on %s. "
+ "large_send set to 'no'.\n",
+ card->dev->name);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->options.large_send = QETH_LARGE_SEND_NO;
+ rc = -EOPNOTSUPP;
+ }
+ break;
+ default: /* includes QETH_LARGE_SEND_NO */
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ break;
+ }
+
+ netif_wake_queue(card->dev);
+ return rc;
+}
+
+/*
+ * softsetup card: init IPA stuff
+ */
+static int
+qeth_softsetup_card(struct qeth_card *card)
+{
+ int rc;
+
+ QETH_DBF_TEXT(setup, 2, "softsetp");
+
+ if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ if (rc == 0xe080){
+ PRINT_WARN("LAN on card %s if offline! "
+ "Continuing softsetup.\n",
+ CARD_BUS_ID(card));
+ card->lan_online = 0;
+ } else
+ return rc;
+ } else
+ card->lan_online = 1;
+ if (card->options.layer2) {
+ card->dev->features |=
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+ card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
+ card->info.broadcast_capable=1;
+ if ((rc = qeth_layer2_initialize(card))) {
+ QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
+ return rc;
+ }
+#ifdef CONFIG_QETH_VLAN
+ qeth_layer2_process_vlans(card, 0);
+#endif
+ goto out;
+ }
+ if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
+ (card->options.large_send == QETH_LARGE_SEND_TSO))
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ else
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+
+ if ((rc = qeth_setadapter_parms(card)))
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ if ((rc = qeth_start_ipassists(card)))
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ if ((rc = qeth_setrouting_v4(card)))
+ QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
+ if ((rc = qeth_setrouting_v6(card)))
+ QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
+out:
+ netif_stop_queue(card->dev);
+ return 0;
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int
+qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+
+ cmd = (struct qeth_ipa_cmd *) data;
+ if (cmd->hdr.return_code == 0)
+ card->info.unique_id = *((__u16 *)
+ &cmd->data.create_destroy_addr.unique_id[6]);
+ else {
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+ PRINT_WARN("couldn't get a unique id from the card on device "
+ "%s (result=x%x), using default id. ipv6 "
+ "autoconfig on other lpars may lead to duplicate "
+ "ip addresses. please use manually "
+ "configured ones.\n",
+ CARD_BUS_ID(card), cmd->hdr.return_code);
+ }
+ return 0;
+}
+#endif
+
+static int
+qeth_put_unique_id(struct qeth_card *card)
+{
+
+ int rc = 0;
+#ifdef CONFIG_QETH_IPV6
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(trace,2,"puniqeid");
+
+ if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
+ UNIQUE_ID_NOT_BY_CARD)
+ return -1;
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
+ QETH_PROT_IPV6);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+ card->info.unique_id;
+ memcpy(&cmd->data.create_destroy_addr.unique_id[0],
+ card->dev->dev_addr, OSA_ADDR_LEN);
+ rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+#else
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+#endif
+ return rc;
+}
+
+/**
+ * Clear IP List
+ */
+static void
+qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
+{
+ struct qeth_ipaddr *addr, *tmp;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace,4,"clearip");
+ spin_lock_irqsave(&card->ip_lock, flags);
+ /* clear todo list */
+ list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
+ list_del(&addr->entry);
+ kfree(addr);
+ }
+
+ while (!list_empty(&card->ip_list)) {
+ addr = list_entry(card->ip_list.next,
+ struct qeth_ipaddr, entry);
+ list_del_init(&addr->entry);
+ if (clean) {
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ qeth_deregister_addr_entry(card, addr);
+ spin_lock_irqsave(&card->ip_lock, flags);
+ }
+ if (!recover || addr->is_multicast) {
+ kfree(addr);
+ continue;
+ }
+ list_add_tail(&addr->entry, card->ip_tbd_list);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+static void
+qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+ int clear_start_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_allowed_mask = threads;
+ if (clear_start_mask)
+ card->thread_start_mask &= threads;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+
+static inline int
+qeth_threads_running(struct qeth_card *card, unsigned long threads)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ rc = (card->thread_running_mask & threads);
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return rc;
+}
+
+static int
+qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
+{
+ return wait_event_interruptible(card->wait_q,
+ qeth_threads_running(card, threads) == 0);
+}
+
+static int
+qeth_stop_card(struct qeth_card *card)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(setup ,2,"stopcard");
+ QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+
+ qeth_set_allowed_threads(card, 0, 1);
+ if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
+ return -ERESTARTSYS;
+ if (card->read.state == CH_STATE_UP &&
+ card->write.state == CH_STATE_UP &&
+ (card->state == CARD_STATE_UP)) {
+ rtnl_lock();
+ dev_close(card->dev);
+ rtnl_unlock();
+ if (!card->use_hard_stop) {
+ __u8 *mac = &card->dev->dev_addr[0];
+ rc = qeth_layer2_send_delmac(card, mac);
+ QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
+ if ((rc = qeth_send_stoplan(card)))
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ }
+ card->state = CARD_STATE_SOFTSETUP;
+ }
+ if (card->state == CARD_STATE_SOFTSETUP) {
+#ifdef CONFIG_QETH_VLAN
+ if (card->options.layer2)
+ qeth_layer2_process_vlans(card, 1);
+#endif
+ qeth_clear_ip_list(card, !card->use_hard_stop, 1);
+ qeth_clear_ipacmd_list(card);
+ card->state = CARD_STATE_HARDSETUP;
+ }
+ if (card->state == CARD_STATE_HARDSETUP) {
+ if ((!card->use_hard_stop) &&
+ (!card->options.layer2))
+ if ((rc = qeth_put_unique_id(card)))
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_qdio_buffers(card);
+ qeth_clear_working_pool_list(card);
+ card->state = CARD_STATE_DOWN;
+ }
+ if (card->state == CARD_STATE_DOWN) {
+ qeth_clear_cmd_buffers(&card->read);
+ qeth_clear_cmd_buffers(&card->write);
+ }
+ card->use_hard_stop = 0;
+ return rc;
+}
+
+
+static int
+qeth_get_unique_id(struct qeth_card *card)
+{
+ int rc = 0;
+#ifdef CONFIG_QETH_IPV6
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(setup, 2, "guniqeid");
+
+ if (!qeth_is_supported(card,IPA_IPV6)) {
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+ return 0;
+ }
+
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
+ QETH_PROT_IPV6);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+ card->info.unique_id;
+
+ rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
+#else
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+#endif
+ return rc;
+}
+static void
+qeth_print_status_with_portname(struct qeth_card *card)
+{
+ char dbf_text[15];
+ int i;
+
+ sprintf(dbf_text, "%s", card->info.portname + 1);
+ for (i = 0; i < 8; i++)
+ dbf_text[i] =
+ (char) _ebcasc[(__u8) dbf_text[i]];
+ dbf_text[8] = 0;
+ printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
+ "with link type %s (portname: %s)\n",
+ CARD_RDEV_ID(card),
+ CARD_WDEV_ID(card),
+ CARD_DDEV_ID(card),
+ qeth_get_cardname(card),
+ (card->info.mcl_level[0]) ? " (level: " : "",
+ (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+ (card->info.mcl_level[0]) ? ")" : "",
+ qeth_get_cardname_short(card),
+ dbf_text);
+
+}
+
+static void
+qeth_print_status_no_portname(struct qeth_card *card)
+{
+ if (card->info.portname[0])
+ printk("qeth: Device %s/%s/%s is a%s "
+ "card%s%s%s\nwith link type %s "
+ "(no portname needed by interface).\n",
+ CARD_RDEV_ID(card),
+ CARD_WDEV_ID(card),
+ CARD_DDEV_ID(card),
+ qeth_get_cardname(card),
+ (card->info.mcl_level[0]) ? " (level: " : "",
+ (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+ (card->info.mcl_level[0]) ? ")" : "",
+ qeth_get_cardname_short(card));
+ else
+ printk("qeth: Device %s/%s/%s is a%s "
+ "card%s%s%s\nwith link type %s.\n",
+ CARD_RDEV_ID(card),
+ CARD_WDEV_ID(card),
+ CARD_DDEV_ID(card),
+ qeth_get_cardname(card),
+ (card->info.mcl_level[0]) ? " (level: " : "",
+ (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+ (card->info.mcl_level[0]) ? ")" : "",
+ qeth_get_cardname_short(card));
+}
+
+static void
+qeth_print_status_message(struct qeth_card *card)
+{
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSAE:
+ /* VM will use a non-zero first character
+ * to indicate a HiperSockets like reporting
+ * of the level OSA sets the first character to zero
+ * */
+ if (!card->info.mcl_level[0]) {
+ sprintf(card->info.mcl_level,"%02x%02x",
+ card->info.mcl_level[2],
+ card->info.mcl_level[3]);
+
+ card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+ break;
+ }
+ /* fallthrough */
+ case QETH_CARD_TYPE_IQD:
+ card->info.mcl_level[0] = (char) _ebcasc[(__u8)
+ card->info.mcl_level[0]];
+ card->info.mcl_level[1] = (char) _ebcasc[(__u8)
+ card->info.mcl_level[1]];
+ card->info.mcl_level[2] = (char) _ebcasc[(__u8)
+ card->info.mcl_level[2]];
+ card->info.mcl_level[3] = (char) _ebcasc[(__u8)
+ card->info.mcl_level[3]];
+ card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+ break;
+ default:
+ memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
+ }
+ if (card->info.portname_required)
+ qeth_print_status_with_portname(card);
+ else
+ qeth_print_status_no_portname(card);
+}
+
+static int
+qeth_register_netdev(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(setup, 3, "regnetd");
+ if (card->dev->reg_state != NETREG_UNINITIALIZED) {
+ qeth_netdev_init(card->dev);
+ return 0;
+ }
+ /* sysfs magic */
+ SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+ return register_netdev(card->dev);
+}
+
+static void
+qeth_start_again(struct qeth_card *card)
+{
+ QETH_DBF_TEXT(setup ,2, "startag");
+
+ rtnl_lock();
+ dev_open(card->dev);
+ rtnl_unlock();
+ /* this also sets saved unicast addresses */
+ qeth_set_multicast_list(card->dev);
+}
+
+
+/* Layer 2 specific stuff */
+#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
+ if (card->options.option == value) { \
+ PRINT_ERR("%s not supported with layer 2 " \
+ "functionality, ignoring option on read" \
+ "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
+ card->options.option = reset_value; \
+ }
+#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
+ if (card->options.option != value) { \
+ PRINT_ERR("%s not supported with layer 2 " \
+ "functionality, ignoring option on read" \
+ "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
+ card->options.option = reset_value; \
+ }
+
+
+static void qeth_make_parameters_consistent(struct qeth_card *card)
+{
+
+ if (card->options.layer2) {
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
+ PRINT_ERR("Device %s does not support " \
+ "layer 2 functionality. " \
+ "Ignoring layer2 option.\n",CARD_BUS_ID(card));
+ }
+ IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
+ "Routing options are");
+#ifdef CONFIG_QETH_IPV6
+ IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
+ "Routing options are");
+#endif
+ IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
+ QETH_CHECKSUM_DEFAULT,
+ "Checksumming options are");
+ IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
+ QETH_TR_BROADCAST_ALLRINGS,
+ "Broadcast mode options are");
+ IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
+ QETH_TR_MACADDR_NONCANONICAL,
+ "Canonical MAC addr options are");
+ IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
+ "Broadcast faking options are");
+ IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
+ DEFAULT_ADD_HHLEN,"Option add_hhlen is");
+ IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
+ }
+}
+
+
+static int
+qeth_set_online(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = gdev->dev.driver_data;
+ int rc = 0;
+ enum qeth_card_states recover_flag;
+
+ BUG_ON(!card);
+ QETH_DBF_TEXT(setup ,2, "setonlin");
+ QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
+
+ qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
+ if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
+ PRINT_WARN("set_online of card %s interrupted by user!\n",
+ CARD_BUS_ID(card));
+ return -ERESTARTSYS;
+ }
+
+ recover_flag = card->state;
+ if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
+ (rc = ccw_device_set_online(CARD_WDEV(card))) ||
+ (rc = ccw_device_set_online(CARD_DDEV(card)))){
+ QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
+ return -EIO;
+ }
+
+ if (card->options.layer2)
+ qeth_make_parameters_consistent(card);
+
+ if ((rc = qeth_hardsetup_card(card))){
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
+ goto out_remove;
+ }
+ card->state = CARD_STATE_HARDSETUP;
+
+ if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
+ rc = qeth_get_unique_id(card);
+
+ if (rc && card->options.layer2 == 0) {
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ goto out_remove;
+ }
+ qeth_print_status_message(card);
+ if ((rc = qeth_register_netdev(card))){
+ QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
+ goto out_remove;
+ }
+ if ((rc = qeth_softsetup_card(card))){
+ QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
+ goto out_remove;
+ }
+ card->state = CARD_STATE_SOFTSETUP;
+
+ if ((rc = qeth_init_qdio_queues(card))){
+ QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
+ goto out_remove;
+ }
+/*maybe it was set offline without ifconfig down
+ * we can also use this state for recovery purposes*/
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+ if (recover_flag == CARD_STATE_RECOVER)
+ qeth_start_again(card);
+ qeth_notify_processes();
+ return 0;
+out_remove:
+ card->use_hard_stop = 1;
+ qeth_stop_card(card);
+ ccw_device_set_offline(CARD_DDEV(card));
+ ccw_device_set_offline(CARD_WDEV(card));
+ ccw_device_set_offline(CARD_RDEV(card));
+ if (recover_flag == CARD_STATE_RECOVER)
+ card->state = CARD_STATE_RECOVER;
+ else
+ card->state = CARD_STATE_DOWN;
+ return -ENODEV;
+}
+
+static struct ccw_device_id qeth_ids[] = {
+ {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
+ {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, qeth_ids);
+
+struct device *qeth_root_dev = NULL;
+
+struct ccwgroup_driver qeth_ccwgroup_driver = {
+ .owner = THIS_MODULE,
+ .name = "qeth",
+ .driver_id = 0xD8C5E3C8,
+ .probe = qeth_probe_device,
+ .remove = qeth_remove_device,
+ .set_online = qeth_set_online,
+ .set_offline = qeth_set_offline,
+};
+
+struct ccw_driver qeth_ccw_driver = {
+ .name = "qeth",
+ .ids = qeth_ids,
+ .probe = ccwgroup_probe_ccwdev,
+ .remove = ccwgroup_remove_ccwdev,
+};
+
+
+static void
+qeth_unregister_dbf_views(void)
+{
+ if (qeth_dbf_setup)
+ debug_unregister(qeth_dbf_setup);
+ if (qeth_dbf_qerr)
+ debug_unregister(qeth_dbf_qerr);
+ if (qeth_dbf_sense)
+ debug_unregister(qeth_dbf_sense);
+ if (qeth_dbf_misc)
+ debug_unregister(qeth_dbf_misc);
+ if (qeth_dbf_data)
+ debug_unregister(qeth_dbf_data);
+ if (qeth_dbf_control)
+ debug_unregister(qeth_dbf_control);
+ if (qeth_dbf_trace)
+ debug_unregister(qeth_dbf_trace);
+}
+static int
+qeth_register_dbf_views(void)
+{
+ qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
+ QETH_DBF_SETUP_INDEX,
+ QETH_DBF_SETUP_NR_AREAS,
+ QETH_DBF_SETUP_LEN);
+ qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
+ QETH_DBF_MISC_INDEX,
+ QETH_DBF_MISC_NR_AREAS,
+ QETH_DBF_MISC_LEN);
+ qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
+ QETH_DBF_DATA_INDEX,
+ QETH_DBF_DATA_NR_AREAS,
+ QETH_DBF_DATA_LEN);
+ qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
+ QETH_DBF_CONTROL_INDEX,
+ QETH_DBF_CONTROL_NR_AREAS,
+ QETH_DBF_CONTROL_LEN);
+ qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
+ QETH_DBF_SENSE_INDEX,
+ QETH_DBF_SENSE_NR_AREAS,
+ QETH_DBF_SENSE_LEN);
+ qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
+ QETH_DBF_QERR_INDEX,
+ QETH_DBF_QERR_NR_AREAS,
+ QETH_DBF_QERR_LEN);
+ qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
+ QETH_DBF_TRACE_INDEX,
+ QETH_DBF_TRACE_NR_AREAS,
+ QETH_DBF_TRACE_LEN);
+
+ if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
+ (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
+ (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
+ (qeth_dbf_trace == NULL)) {
+ qeth_unregister_dbf_views();
+ return -ENOMEM;
+ }
+ debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
+
+ debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
+
+ debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
+
+ debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
+
+ debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
+
+ debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
+
+ debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
+
+ return 0;
+}
+
+#ifdef CONFIG_QETH_IPV6
+extern struct neigh_table arp_tbl;
+static struct neigh_ops *arp_direct_ops;
+static int (*qeth_old_arp_constructor) (struct neighbour *);
+
+static struct neigh_ops arp_direct_ops_template = {
+ .family = AF_INET,
+ .destructor = NULL,
+ .solicit = NULL,
+ .error_report = NULL,
+ .output = dev_queue_xmit,
+ .connected_output = dev_queue_xmit,
+ .hh_output = dev_queue_xmit,
+ .queue_xmit = dev_queue_xmit
+};
+
+static int
+qeth_arp_constructor(struct neighbour *neigh)
+{
+ struct net_device *dev = neigh->dev;
+ struct in_device *in_dev;
+ struct neigh_parms *parms;
+ struct qeth_card *card;
+
+ card = qeth_get_card_from_dev(dev);
+ if (card == NULL)
+ goto out;
+ if((card->options.layer2) ||
+ (card->dev->hard_header == qeth_fake_header))
+ goto out;
+
+ rcu_read_lock();
+ in_dev = rcu_dereference(__in_dev_get(dev));
+ if (in_dev == NULL) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ parms = in_dev->arp_parms;
+ __neigh_parms_put(neigh->parms);
+ neigh->parms = neigh_parms_clone(parms);
+ rcu_read_unlock();
+
+ neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
+ neigh->nud_state = NUD_NOARP;
+ neigh->ops = arp_direct_ops;
+ neigh->output = neigh->ops->queue_xmit;
+ return 0;
+out:
+ return qeth_old_arp_constructor(neigh);
+}
+#endif /*CONFIG_QETH_IPV6*/
+
+/*
+ * IP address takeover related functions
+ */
+static void
+qeth_clear_ipato_list(struct qeth_card *card)
+{
+ struct qeth_ipato_entry *ipatoe, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
+ list_del(&ipatoe->entry);
+ kfree(ipatoe);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+int
+qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
+{
+ struct qeth_ipato_entry *ipatoe;
+ unsigned long flags;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 2, "addipato");
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry(ipatoe, &card->ipato.entries, entry){
+ if (ipatoe->proto != new->proto)
+ continue;
+ if (!memcmp(ipatoe->addr, new->addr,
+ (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+ (ipatoe->mask_bits == new->mask_bits)){
+ PRINT_WARN("ipato entry already exists!\n");
+ rc = -EEXIST;
+ break;
+ }
+ }
+ if (!rc) {
+ list_add_tail(&new->entry, &card->ipato.entries);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ return rc;
+}
+
+void
+qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
+ u8 *addr, int mask_bits)
+{
+ struct qeth_ipato_entry *ipatoe, *tmp;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace, 2, "delipato");
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
+ if (ipatoe->proto != proto)
+ continue;
+ if (!memcmp(ipatoe->addr, addr,
+ (proto == QETH_PROT_IPV4)? 4:16) &&
+ (ipatoe->mask_bits == mask_bits)){
+ list_del(&ipatoe->entry);
+ kfree(ipatoe);
+ }
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+static inline void
+qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
+{
+ int i, j;
+ u8 octet;
+
+ for (i = 0; i < len; ++i){
+ octet = addr[i];
+ for (j = 7; j >= 0; --j){
+ bits[i*8 + j] = octet & 1;
+ octet >>= 1;
+ }
+ }
+}
+
+static int
+qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+ struct qeth_ipato_entry *ipatoe;
+ u8 addr_bits[128] = {0, };
+ u8 ipatoe_bits[128] = {0, };
+ int rc = 0;
+
+ if (!card->ipato.enabled)
+ return 0;
+
+ qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
+ (addr->proto == QETH_PROT_IPV4)? 4:16);
+ list_for_each_entry(ipatoe, &card->ipato.entries, entry){
+ if (addr->proto != ipatoe->proto)
+ continue;
+ qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
+ (ipatoe->proto==QETH_PROT_IPV4) ?
+ 4:16);
+ if (addr->proto == QETH_PROT_IPV4)
+ rc = !memcmp(addr_bits, ipatoe_bits,
+ min(32, ipatoe->mask_bits));
+ else
+ rc = !memcmp(addr_bits, ipatoe_bits,
+ min(128, ipatoe->mask_bits));
+ if (rc)
+ break;
+ }
+ /* invert? */
+ if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
+ rc = !rc;
+ else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
+ rc = !rc;
+
+ return rc;
+}
+
+/*
+ * VIPA related functions
+ */
+int
+qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
+{
+ struct qeth_ipaddr *ipaddr;
+ unsigned long flags;
+ int rc = 0;
+
+ ipaddr = qeth_get_addr_buffer(proto);
+ if (ipaddr){
+ if (proto == QETH_PROT_IPV4){
+ QETH_DBF_TEXT(trace, 2, "addvipa4");
+ memcpy(&ipaddr->u.a4.addr, addr, 4);
+ ipaddr->u.a4.mask = 0;
+#ifdef CONFIG_QETH_IPV6
+ } else if (proto == QETH_PROT_IPV6){
+ QETH_DBF_TEXT(trace, 2, "addvipa6");
+ memcpy(&ipaddr->u.a6.addr, addr, 16);
+ ipaddr->u.a6.pfxlen = 0;
+#endif
+ }
+ ipaddr->type = QETH_IP_TYPE_VIPA;
+ ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
+ ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
+ } else
+ return -ENOMEM;
+ spin_lock_irqsave(&card->ip_lock, flags);
+ if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
+ __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+ rc = -EEXIST;
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ if (rc){
+ PRINT_WARN("Cannot add VIPA. Address already exists!\n");
+ return rc;
+ }
+ if (!qeth_add_ip(card, ipaddr))
+ kfree(ipaddr);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+ return rc;
+}
+
+void
+qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
+{
+ struct qeth_ipaddr *ipaddr;
+
+ ipaddr = qeth_get_addr_buffer(proto);
+ if (ipaddr){
+ if (proto == QETH_PROT_IPV4){
+ QETH_DBF_TEXT(trace, 2, "delvipa4");
+ memcpy(&ipaddr->u.a4.addr, addr, 4);
+ ipaddr->u.a4.mask = 0;
+#ifdef CONFIG_QETH_IPV6
+ } else if (proto == QETH_PROT_IPV6){
+ QETH_DBF_TEXT(trace, 2, "delvipa6");
+ memcpy(&ipaddr->u.a6.addr, addr, 16);
+ ipaddr->u.a6.pfxlen = 0;
+#endif
+ }
+ ipaddr->type = QETH_IP_TYPE_VIPA;
+ } else
+ return;
+ if (!qeth_delete_ip(card, ipaddr))
+ kfree(ipaddr);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+}
+
+/*
+ * proxy ARP related functions
+ */
+int
+qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
+{
+ struct qeth_ipaddr *ipaddr;
+ unsigned long flags;
+ int rc = 0;
+
+ ipaddr = qeth_get_addr_buffer(proto);
+ if (ipaddr){
+ if (proto == QETH_PROT_IPV4){
+ QETH_DBF_TEXT(trace, 2, "addrxip4");
+ memcpy(&ipaddr->u.a4.addr, addr, 4);
+ ipaddr->u.a4.mask = 0;
+#ifdef CONFIG_QETH_IPV6
+ } else if (proto == QETH_PROT_IPV6){
+ QETH_DBF_TEXT(trace, 2, "addrxip6");
+ memcpy(&ipaddr->u.a6.addr, addr, 16);
+ ipaddr->u.a6.pfxlen = 0;
+#endif
+ }
+ ipaddr->type = QETH_IP_TYPE_RXIP;
+ ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
+ ipaddr->del_flags = 0;
+ } else
+ return -ENOMEM;
+ spin_lock_irqsave(&card->ip_lock, flags);
+ if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
+ __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+ rc = -EEXIST;
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ if (rc){
+ PRINT_WARN("Cannot add RXIP. Address already exists!\n");
+ return rc;
+ }
+ if (!qeth_add_ip(card, ipaddr))
+ kfree(ipaddr);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+ return 0;
+}
+
+void
+qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
+{
+ struct qeth_ipaddr *ipaddr;
+
+ ipaddr = qeth_get_addr_buffer(proto);
+ if (ipaddr){
+ if (proto == QETH_PROT_IPV4){
+ QETH_DBF_TEXT(trace, 2, "addrxip4");
+ memcpy(&ipaddr->u.a4.addr, addr, 4);
+ ipaddr->u.a4.mask = 0;
+#ifdef CONFIG_QETH_IPV6
+ } else if (proto == QETH_PROT_IPV6){
+ QETH_DBF_TEXT(trace, 2, "addrxip6");
+ memcpy(&ipaddr->u.a6.addr, addr, 16);
+ ipaddr->u.a6.pfxlen = 0;
+#endif
+ }
+ ipaddr->type = QETH_IP_TYPE_RXIP;
+ } else
+ return;
+ if (!qeth_delete_ip(card, ipaddr))
+ kfree(ipaddr);
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+}
+
+/**
+ * IP event handler
+ */
+static int
+qeth_ip_event(struct notifier_block *this,
+ unsigned long event,void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
+ struct qeth_ipaddr *addr;
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace,3,"ipevent");
+ card = qeth_get_card_from_dev(dev);
+ if (!card)
+ return NOTIFY_DONE;
+ if (card->options.layer2)
+ return NOTIFY_DONE;
+
+ addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
+ if (addr != NULL) {
+ addr->u.a4.addr = ifa->ifa_address;
+ addr->u.a4.mask = ifa->ifa_mask;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ } else
+ goto out;
+
+ switch(event) {
+ case NETDEV_UP:
+ if (!qeth_add_ip(card, addr))
+ kfree(addr);
+ break;
+ case NETDEV_DOWN:
+ if (!qeth_delete_ip(card, addr))
+ kfree(addr);
+ break;
+ default:
+ break;
+ }
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_ip_notifier = {
+ qeth_ip_event,
+ 0
+};
+
+#ifdef CONFIG_QETH_IPV6
+/**
+ * IPv6 event handler
+ */
+static int
+qeth_ip6_event(struct notifier_block *this,
+ unsigned long event,void *ptr)
+{
+
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ struct net_device *dev = (struct net_device *)ifa->idev->dev;
+ struct qeth_ipaddr *addr;
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace,3,"ip6event");
+
+ card = qeth_get_card_from_dev(dev);
+ if (!card)
+ return NOTIFY_DONE;
+ if (!qeth_is_supported(card, IPA_IPV6))
+ return NOTIFY_DONE;
+
+ addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
+ if (addr != NULL) {
+ memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
+ addr->u.a6.pfxlen = ifa->prefix_len;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ } else
+ goto out;
+
+ switch(event) {
+ case NETDEV_UP:
+ if (!qeth_add_ip(card, addr))
+ kfree(addr);
+ break;
+ case NETDEV_DOWN:
+ if (!qeth_delete_ip(card, addr))
+ kfree(addr);
+ break;
+ default:
+ break;
+ }
+ if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_ip6_notifier = {
+ qeth_ip6_event,
+ 0
+};
+#endif
+
+static int
+qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+
+ struct device *entry;
+ struct qeth_card *card;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+ list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
+ driver_list) {
+ card = (struct qeth_card *) entry->driver_data;
+ qeth_clear_ip_list(card, 0, 0);
+ qeth_qdio_clear_card(card, 0);
+ }
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block qeth_reboot_notifier = {
+ qeth_reboot_event,
+ 0
+};
+
+static int
+qeth_register_notifiers(void)
+{
+ int r;
+
+ QETH_DBF_TEXT(trace,5,"regnotif");
+ if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
+ return r;
+ if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
+ goto out_reboot;
+#ifdef CONFIG_QETH_IPV6
+ if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
+ goto out_ipv4;
+#endif
+ return 0;
+
+#ifdef CONFIG_QETH_IPV6
+out_ipv4:
+ unregister_inetaddr_notifier(&qeth_ip_notifier);
+#endif
+out_reboot:
+ unregister_reboot_notifier(&qeth_reboot_notifier);
+ return r;
+}
+
+/**
+ * unregister all event notifiers
+ */
+static void
+qeth_unregister_notifiers(void)
+{
+
+ QETH_DBF_TEXT(trace,5,"unregnot");
+ BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
+ BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
+#ifdef CONFIG_QETH_IPV6
+ BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
+#endif /* QETH_IPV6 */
+
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int
+qeth_ipv6_init(void)
+{
+ qeth_old_arp_constructor = arp_tbl.constructor;
+ write_lock(&arp_tbl.lock);
+ arp_tbl.constructor = qeth_arp_constructor;
+ write_unlock(&arp_tbl.lock);
+
+ arp_direct_ops = (struct neigh_ops*)
+ kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
+ if (!arp_direct_ops)
+ return -ENOMEM;
+
+ memcpy(arp_direct_ops, &arp_direct_ops_template,
+ sizeof(struct neigh_ops));
+
+ return 0;
+}
+
+static void
+qeth_ipv6_uninit(void)
+{
+ write_lock(&arp_tbl.lock);
+ arp_tbl.constructor = qeth_old_arp_constructor;
+ write_unlock(&arp_tbl.lock);
+ kfree(arp_direct_ops);
+}
+#endif /* CONFIG_QETH_IPV6 */
+
+static void
+qeth_sysfs_unregister(void)
+{
+ qeth_remove_driver_attributes();
+ ccw_driver_unregister(&qeth_ccw_driver);
+ ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
+ s390_root_dev_unregister(qeth_root_dev);
+}
+/**
+ * register qeth at sysfs
+ */
+static int
+qeth_sysfs_register(void)
+{
+ int rc=0;
+
+ rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
+ if (rc)
+ return rc;
+ rc = ccw_driver_register(&qeth_ccw_driver);
+ if (rc)
+ return rc;
+ rc = qeth_create_driver_attributes();
+ if (rc)
+ return rc;
+ qeth_root_dev = s390_root_dev_register("qeth");
+ if (IS_ERR(qeth_root_dev)) {
+ rc = PTR_ERR(qeth_root_dev);
+ return rc;
+ }
+ return 0;
+}
+
+/***
+ * init function
+ */
+static int __init
+qeth_init(void)
+{
+ int rc=0;
+
+ qeth_eyecatcher();
+ PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
+ version, VERSION_QETH_C, VERSION_QETH_H,
+ VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
+ VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
+ VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
+ QETH_VERSION_VLAN);
+
+ INIT_LIST_HEAD(&qeth_card_list.list);
+ INIT_LIST_HEAD(&qeth_notify_list);
+ spin_lock_init(&qeth_notify_lock);
+ rwlock_init(&qeth_card_list.rwlock);
+
+ if (qeth_register_dbf_views())
+ goto out_err;
+ if (qeth_sysfs_register())
+ goto out_sysfs;
+
+#ifdef CONFIG_QETH_IPV6
+ if (qeth_ipv6_init()) {
+ PRINT_ERR("Out of memory during ipv6 init.\n");
+ goto out_sysfs;
+ }
+#endif /* QETH_IPV6 */
+ if (qeth_register_notifiers())
+ goto out_ipv6;
+ if (qeth_create_procfs_entries())
+ goto out_notifiers;
+
+ return rc;
+
+out_notifiers:
+ qeth_unregister_notifiers();
+out_ipv6:
+#ifdef CONFIG_QETH_IPV6
+ qeth_ipv6_uninit();
+#endif /* QETH_IPV6 */
+out_sysfs:
+ qeth_sysfs_unregister();
+ qeth_unregister_dbf_views();
+out_err:
+ PRINT_ERR("Initialization failed");
+ return rc;
+}
+
+static void
+__exit qeth_exit(void)
+{
+ struct qeth_card *card, *tmp;
+ unsigned long flags;
+
+ QETH_DBF_TEXT(trace,1, "cleanup.");
+
+ /*
+ * Weed would not need to clean up our devices here, because the
+ * common device layer calls qeth_remove_device for each device
+ * as soon as we unregister our driver (done in qeth_sysfs_unregister).
+ * But we do cleanup here so we can do a "soft" shutdown of our cards.
+ * qeth_remove_device called by the common device layer would otherwise
+ * do a "hard" shutdown (card->use_hard_stop is set to one in
+ * qeth_remove_device).
+ */
+again:
+ read_lock_irqsave(&qeth_card_list.rwlock, flags);
+ list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
+ read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+ qeth_set_offline(card->gdev);
+ qeth_remove_device(card->gdev);
+ goto again;
+ }
+ read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+#ifdef CONFIG_QETH_IPV6
+ qeth_ipv6_uninit();
+#endif
+ qeth_unregister_notifiers();
+ qeth_remove_procfs_entries();
+ qeth_sysfs_unregister();
+ qeth_unregister_dbf_views();
+ printk("qeth: removed\n");
+}
+
+EXPORT_SYMBOL(qeth_eyecatcher);
+module_init(qeth_init);
+module_exit(qeth_exit);
+MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
+MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
+ "Copyright 2000,2003 IBM Corporation\n");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
new file mode 100644
index 000000000000..f685ecc7da99
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.c
@@ -0,0 +1,168 @@
+/*
+ * linux/drivers/s390/net/qeth_mpc.c
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ *
+ * Copyright 2000,2003 IBM Corporation
+ * Author(s): Frank Pavlic <pavlic@de.ibm.com>
+ * Thomas Spatzier <tspat@de.ibm.com>
+ *
+ */
+#include <asm/cio.h>
+#include "qeth_mpc.h"
+
+const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $";
+
+unsigned char IDX_ACTIVATE_READ[]={
+ 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
+ 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1,
+ 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
+ 0x00,0x00
+};
+
+unsigned char IDX_ACTIVATE_WRITE[]={
+ 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
+ 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
+ 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1,
+ 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
+ 0x00,0x00
+};
+
+unsigned char CM_ENABLE[]={
+ 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63,
+ 0x10,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00,
+ 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23,
+ 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40,
+ 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00,
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x0b,0x04,0x01,
+ 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f,
+ 0x00,
+ 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff
+};
+
+unsigned char CM_SETUP[]={
+ 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64,
+ 0x10,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00,
+ 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24,
+ 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40,
+ 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00,
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x09,0x04,0x04,
+ 0x05,0x00,0x01,0x01, 0x11,
+ 0x00,0x09,0x04,
+ 0x05,0x05,0x00,0x00, 0x00,0x00,
+ 0x00,0x06,
+ 0x04,0x06,0xc8,0x00
+};
+
+unsigned char ULP_ENABLE[]={
+ 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b,
+ 0x10,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00,
+ 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b,
+ 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40,
+ 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00,
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x0b,0x04,0x01,
+ 0x03,0x04,0x05,0x00, 0x01,0x01,0x12,
+ 0x00,
+ 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff,
+ 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7,
+ 0xf1,0x00,0x00
+};
+
+unsigned char ULP_SETUP[]={
+ 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c,
+ 0x10,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00,
+ 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02,
+ 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c,
+ 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40,
+ 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00,
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x09,0x04,0x04,
+ 0x05,0x00,0x01,0x01, 0x14,
+ 0x00,0x09,0x04,
+ 0x05,0x05,0x30,0x01, 0x00,0x00,
+ 0x00,0x06,
+ 0x04,0x06,0x40,0x00,
+ 0x00,0x08,0x04,0x0b,
+ 0x00,0x00,0x00,0x00
+};
+
+unsigned char DM_ACT[]={
+ 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55,
+ 0x10,0x00,0x00,0x01,
+ 0x00,0x00,0x00,0x00,
+ 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03,
+ 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15,
+ 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40,
+ 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00,
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x09,0x04,0x04,
+ 0x05,0x40,0x01,0x01, 0x00
+};
+
+unsigned char IPA_PDU_HEADER[]={
+ 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77,
+ 0x00,0x00,0x00,0x14, 0x00,0x00,
+ (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256,
+ (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256,
+ 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00,
+ 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x24,
+ sizeof(struct qeth_ipa_cmd)/256,
+ sizeof(struct qeth_ipa_cmd)%256,
+ 0x00,
+ sizeof(struct qeth_ipa_cmd)/256,
+ sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
+ 0x01,0x00,
+ sizeof(struct qeth_ipa_cmd)/256,
+ sizeof(struct qeth_ipa_cmd)%256,
+ 0x00,0x00,0x00,0x40,
+};
+
+unsigned char WRITE_CCW[]={
+ 0x01,CCW_FLAG_SLI,0,0,
+ 0,0,0,0
+};
+
+unsigned char READ_CCW[]={
+ 0x02,CCW_FLAG_SLI,0,0,
+ 0,0,0,0
+};
+
+
+
+
+
+
+
+
+
+
+
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
new file mode 100644
index 000000000000..3d916b5c5d09
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.h
@@ -0,0 +1,538 @@
+/*
+ * linux/drivers/s390/net/qeth_mpc.h
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ *
+ * Copyright 2000,2003 IBM Corporation
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Thomas Spatzier <tspat@de.ibm.com>
+ * Frank Pavlic <pavlic@de.ibm.com>
+ *
+ */
+#ifndef __QETH_MPC_H__
+#define __QETH_MPC_H__
+
+#include <asm/qeth.h>
+
+#define VERSION_QETH_MPC_H "$Revision: 1.43 $"
+
+extern const char *VERSION_QETH_MPC_C;
+
+#define IPA_PDU_HEADER_SIZE 0x40
+#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e)
+#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26)
+#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x2a)
+#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a)
+
+extern unsigned char IPA_PDU_HEADER[];
+#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c)
+
+#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
+
+#define QETH_SEQ_NO_LENGTH 4
+#define QETH_MPC_TOKEN_LENGTH 4
+#define QETH_MCL_LENGTH 4
+#define OSA_ADDR_LEN 6
+
+#define QETH_TIMEOUT (10 * HZ)
+#define QETH_IPA_TIMEOUT (45 * HZ)
+#define QETH_IDX_COMMAND_SEQNO 0xffff0000
+#define SR_INFO_LEN 16
+
+#define QETH_CLEAR_CHANNEL_PARM -10
+#define QETH_HALT_CHANNEL_PARM -11
+
+/*****************************************************************************/
+/* IP Assist related definitions */
+/*****************************************************************************/
+#define IPA_CMD_INITIATOR_HOST 0x00
+#define IPA_CMD_INITIATOR_HYDRA 0x01
+#define IPA_CMD_PRIM_VERSION_NO 0x01
+
+enum qeth_card_types {
+ QETH_CARD_TYPE_UNKNOWN = 0,
+ QETH_CARD_TYPE_OSAE = 10,
+ QETH_CARD_TYPE_IQD = 1234,
+};
+
+#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
+/* only the first two bytes are looked at in qeth_get_cardname_short */
+enum qeth_link_types {
+ QETH_LINK_TYPE_FAST_ETH = 0x01,
+ QETH_LINK_TYPE_HSTR = 0x02,
+ QETH_LINK_TYPE_GBIT_ETH = 0x03,
+ QETH_LINK_TYPE_10GBIT_ETH = 0x10,
+ QETH_LINK_TYPE_LANE_ETH100 = 0x81,
+ QETH_LINK_TYPE_LANE_TR = 0x82,
+ QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
+ QETH_LINK_TYPE_LANE = 0x88,
+ QETH_LINK_TYPE_ATM_NATIVE = 0x90,
+};
+
+enum qeth_tr_macaddr_modes {
+ QETH_TR_MACADDR_NONCANONICAL = 0,
+ QETH_TR_MACADDR_CANONICAL = 1,
+};
+
+enum qeth_tr_broadcast_modes {
+ QETH_TR_BROADCAST_ALLRINGS = 0,
+ QETH_TR_BROADCAST_LOCAL = 1,
+};
+
+/* these values match CHECKSUM_* in include/linux/skbuff.h */
+enum qeth_checksum_types {
+ SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
+ HW_CHECKSUMMING = 1,
+ NO_CHECKSUMMING = 2,
+};
+#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
+
+/*
+ * Routing stuff
+ */
+#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
+enum qeth_routing_types {
+ NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */
+ PRIMARY_ROUTER = 1,
+ SECONDARY_ROUTER = 2,
+ MULTICAST_ROUTER = 3,
+ PRIMARY_CONNECTOR = 4,
+ SECONDARY_CONNECTOR = 5,
+};
+
+
+/* IPA Commands */
+enum qeth_ipa_cmds {
+ IPA_CMD_STARTLAN = 0x01,
+ IPA_CMD_STOPLAN = 0x02,
+ IPA_CMD_SETVMAC = 0x21,
+ IPA_CMD_DELVMAC = 0x22,
+ IPA_CMD_SETGMAC = 0x23,
+ IPA_CMD_DELGMAC = 0x24,
+ IPA_CMD_SETVLAN = 0x25,
+ IPA_CMD_DELVLAN = 0x26,
+ IPA_CMD_SETIP = 0xb1,
+ IPA_CMD_DELIP = 0xb7,
+ IPA_CMD_QIPASSIST = 0xb2,
+ IPA_CMD_SETASSPARMS = 0xb3,
+ IPA_CMD_SETIPM = 0xb4,
+ IPA_CMD_DELIPM = 0xb5,
+ IPA_CMD_SETRTG = 0xb6,
+ IPA_CMD_SETADAPTERPARMS = 0xb8,
+ IPA_CMD_IPFRAME = 0xb9,
+ IPA_CMD_ADD_ADDR_ENTRY = 0xc1,
+ IPA_CMD_DELETE_ADDR_ENTRY = 0xc2,
+ IPA_CMD_CREATE_ADDR = 0xc3,
+ IPA_CMD_DESTROY_ADDR = 0xc4,
+ IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
+ IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
+};
+
+enum qeth_ip_ass_cmds {
+ IPA_CMD_ASS_START = 0x0001,
+ IPA_CMD_ASS_STOP = 0x0002,
+ IPA_CMD_ASS_CONFIGURE = 0x0003,
+ IPA_CMD_ASS_ENABLE = 0x0004,
+};
+
+enum qeth_arp_process_subcmds {
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
+ IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
+ IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
+ IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
+ IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
+ IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
+};
+
+/* Return Codes for IPA Commands */
+enum qeth_ipa_return_codes {
+ IPA_RC_SUCCESS = 0x0000,
+ IPA_RC_NOTSUPP = 0x0001,
+ IPA_RC_NO_ACCESS = 0x0002,
+ IPA_RC_FAILED = 0x0003,
+ IPA_RC_DATA_MISMATCH = 0xe001,
+ IPA_RC_INVALID_LAN_TYPE = 0xe003,
+ IPA_RC_INVALID_LAN_NO = 0xe004,
+ IPA_RC_IPADDR_ALREADY_REG = 0xe005,
+ IPA_RC_IPADDR_TABLE_FULL = 0xe006,
+ IPA_RC_IPADDR_ALREADY_USED = 0xe00a,
+ IPA_RC_ASSNO_NOT_SUPP = 0xe00d,
+ IPA_RC_ASSCMD_START_FAILED = 0xe00e,
+ IPA_RC_ASSCMD_PART_SUCCESS = 0xe00f,
+ IPA_RC_IPADDR_NOT_DEFINED = 0xe010,
+ IPA_RC_LAN_OFFLINE = 0xe080,
+};
+
+/* IPA function flags; each flag marks availability of respective function */
+enum qeth_ipa_funcs {
+ IPA_ARP_PROCESSING = 0x00000001L,
+ IPA_INBOUND_CHECKSUM = 0x00000002L,
+ IPA_OUTBOUND_CHECKSUM = 0x00000004L,
+ IPA_IP_FRAGMENTATION = 0x00000008L,
+ IPA_FILTERING = 0x00000010L,
+ IPA_IPV6 = 0x00000020L,
+ IPA_MULTICASTING = 0x00000040L,
+ IPA_IP_REASSEMBLY = 0x00000080L,
+ IPA_QUERY_ARP_COUNTERS = 0x00000100L,
+ IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
+ IPA_SETADAPTERPARMS = 0x00000400L,
+ IPA_VLAN_PRIO = 0x00000800L,
+ IPA_PASSTHRU = 0x00001000L,
+ IPA_FULL_VLAN = 0x00004000L,
+ IPA_SOURCE_MAC = 0x00010000L,
+ IPA_OSA_MC_ROUTER = 0x00020000L,
+ IPA_QUERY_ARP_ASSIST = 0x00040000L,
+ IPA_INBOUND_TSO = 0x00080000L,
+ IPA_OUTBOUND_TSO = 0x00100000L,
+};
+
+/* SETIP/DELIP IPA Command: ***************************************************/
+enum qeth_ipa_setdelip_flags {
+ QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
+ QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
+ QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
+ QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
+ QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
+ QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
+};
+
+/* SETADAPTER IPA Command: ****************************************************/
+enum qeth_ipa_setadp_cmd {
+ IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01,
+ IPA_SETADP_ALTER_MAC_ADDRESS = 0x02,
+ IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04,
+ IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08,
+ IPA_SETADP_SET_ADDRESSING_MODE = 0x10,
+ IPA_SETADP_SET_CONFIG_PARMS = 0x20,
+ IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40,
+ IPA_SETADP_SET_BROADCAST_MODE = 0x80,
+ IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
+ IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
+ IPA_SETADP_READ_SNMP_PARMS = 0x0400,
+ IPA_SETADP_WRITE_SNMP_PARMS = 0x0800,
+ IPA_SETADP_QUERY_CARD_INFO = 0x1000,
+};
+enum qeth_ipa_mac_ops {
+ CHANGE_ADDR_READ_MAC = 0,
+ CHANGE_ADDR_REPLACE_MAC = 1,
+ CHANGE_ADDR_ADD_MAC = 2,
+ CHANGE_ADDR_DEL_MAC = 4,
+ CHANGE_ADDR_RESET_MAC = 8,
+};
+enum qeth_ipa_addr_ops {
+ CHANGE_ADDR_READ_ADDR = 0,
+ CHANGE_ADDR_ADD_ADDR = 1,
+ CHANGE_ADDR_DEL_ADDR = 2,
+ CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
+
+
+};
+/* (SET)DELIP(M) IPA stuff ***************************************************/
+struct qeth_ipacmd_setdelip4 {
+ __u8 ip_addr[4];
+ __u8 mask[4];
+ __u32 flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelip6 {
+ __u8 ip_addr[16];
+ __u8 mask[16];
+ __u32 flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelipm {
+ __u8 mac[6];
+ __u8 padding[2];
+ __u8 ip6[12];
+ __u8 ip4[4];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelmac {
+ __u32 mac_length;
+ __u8 mac[6];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelvlan {
+ __u16 vlan_id;
+} __attribute__ ((packed));
+
+
+struct qeth_ipacmd_setassparms_hdr {
+ __u32 assist_no;
+ __u16 length;
+ __u16 command_code;
+ __u16 return_code;
+ __u8 number_of_replies;
+ __u8 seq_no;
+} __attribute__((packed));
+
+struct qeth_arp_query_data {
+ __u16 request_bits;
+ __u16 reply_bits;
+ __u32 no_entries;
+ char data;
+} __attribute__((packed));
+
+/* used as parameter for arp_query reply */
+struct qeth_arp_query_info {
+ __u32 udata_len;
+ __u16 mask_bits;
+ __u32 udata_offset;
+ __u32 no_entries;
+ char *udata;
+};
+
+/* SETASSPARMS IPA Command: */
+struct qeth_ipacmd_setassparms {
+ struct qeth_ipacmd_setassparms_hdr hdr;
+ union {
+ __u32 flags_32bit;
+ struct qeth_arp_cache_entry add_arp_entry;
+ struct qeth_arp_query_data query_arp;
+ __u8 ip[16];
+ } data;
+} __attribute__ ((packed));
+
+
+/* SETRTG IPA Command: ****************************************************/
+struct qeth_set_routing {
+ __u8 type;
+};
+
+/* SETADAPTERPARMS IPA Command: *******************************************/
+struct qeth_query_cmds_supp {
+ __u32 no_lantypes_supp;
+ __u8 lan_type;
+ __u8 reserved1[3];
+ __u32 supported_cmds;
+ __u8 reserved2[8];
+} __attribute__ ((packed));
+
+struct qeth_change_addr {
+ __u32 cmd;
+ __u32 addr_size;
+ __u32 no_macs;
+ __u8 addr[OSA_ADDR_LEN];
+} __attribute__ ((packed));
+
+
+struct qeth_snmp_cmd {
+ __u8 token[16];
+ __u32 request;
+ __u32 interface;
+ __u32 returncode;
+ __u32 firmwarelevel;
+ __u32 seqno;
+ __u8 data;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq_hdr {
+ __u32 data_len;
+ __u32 req_len;
+ __u32 reserved1;
+ __u32 reserved2;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq {
+ struct qeth_snmp_ureq_hdr hdr;
+ struct qeth_snmp_cmd cmd;
+} __attribute__((packed));
+
+struct qeth_ipacmd_setadpparms_hdr {
+ __u32 supp_hw_cmds;
+ __u32 reserved1;
+ __u16 cmdlength;
+ __u16 reserved2;
+ __u32 command_code;
+ __u16 return_code;
+ __u8 used_total;
+ __u8 seq_no;
+ __u32 reserved3;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setadpparms {
+ struct qeth_ipacmd_setadpparms_hdr hdr;
+ union {
+ struct qeth_query_cmds_supp query_cmds_supp;
+ struct qeth_change_addr change_addr;
+ struct qeth_snmp_cmd snmp;
+ __u32 mode;
+ } data;
+} __attribute__ ((packed));
+
+/* IPFRAME IPA Command: ***************************************************/
+/* TODO: define in analogy to commands define above */
+
+/* ADD_ADDR_ENTRY IPA Command: ********************************************/
+/* TODO: define in analogy to commands define above */
+
+/* DELETE_ADDR_ENTRY IPA Command: *****************************************/
+/* TODO: define in analogy to commands define above */
+
+/* CREATE_ADDR IPA Command: ***********************************************/
+struct qeth_create_destroy_address {
+ __u8 unique_id[8];
+} __attribute__ ((packed));
+
+/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/
+/* TODO: define in analogy to commands define above */
+
+/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/
+/* TODO: define in analogy to commands define above */
+
+/* Header for each IPA command */
+struct qeth_ipacmd_hdr {
+ __u8 command;
+ __u8 initiator;
+ __u16 seqno;
+ __u16 return_code;
+ __u8 adapter_type;
+ __u8 rel_adapter_no;
+ __u8 prim_version_no;
+ __u8 param_count;
+ __u16 prot_version;
+ __u32 ipa_supported;
+ __u32 ipa_enabled;
+} __attribute__ ((packed));
+
+/* The IPA command itself */
+struct qeth_ipa_cmd {
+ struct qeth_ipacmd_hdr hdr;
+ union {
+ struct qeth_ipacmd_setdelip4 setdelip4;
+ struct qeth_ipacmd_setdelip6 setdelip6;
+ struct qeth_ipacmd_setdelipm setdelipm;
+ struct qeth_ipacmd_setassparms setassparms;
+ struct qeth_ipacmd_layer2setdelmac setdelmac;
+ struct qeth_ipacmd_layer2setdelvlan setdelvlan;
+ struct qeth_create_destroy_address create_destroy_addr;
+ struct qeth_ipacmd_setadpparms setadapterparms;
+ struct qeth_set_routing setrtg;
+ } data;
+} __attribute__ ((packed));
+
+/*
+ * special command for ARP processing.
+ * this is not included in setassparms command before, because we get
+ * problem with the size of struct qeth_ipacmd_setassparms otherwise
+ */
+enum qeth_ipa_arp_return_codes {
+ QETH_IPA_ARP_RC_SUCCESS = 0x0000,
+ QETH_IPA_ARP_RC_FAILED = 0x0001,
+ QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
+ QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
+ QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
+ QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
+};
+
+#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+ sizeof(struct qeth_ipacmd_setassparms_hdr))
+#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
+ QETH_SETASS_BASE_LEN)
+#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+ sizeof(struct qeth_ipacmd_setadpparms_hdr))
+#define QETH_SNMP_SETADP_CMDLENGTH 16
+
+#define QETH_ARP_DATA_SIZE 3968
+#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
+/* Helper functions */
+#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST)
+
+/*****************************************************************************/
+/* END OF IP Assist related definitions */
+/*****************************************************************************/
+
+
+extern unsigned char WRITE_CCW[];
+extern unsigned char READ_CCW[];
+
+extern unsigned char CM_ENABLE[];
+#define CM_ENABLE_SIZE 0x63
+#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c)
+#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
+#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b)
+
+#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
+ (PDU_ENCAPSULATION(buffer)+ 0x13)
+
+
+extern unsigned char CM_SETUP[];
+#define CM_SETUP_SIZE 0x64
+#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
+#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
+#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
+
+#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
+ (PDU_ENCAPSULATION(buffer) + 0x1a)
+
+extern unsigned char ULP_ENABLE[];
+#define ULP_ENABLE_SIZE 0x6b
+#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61)
+#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c)
+#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
+#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62)
+#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
+ (PDU_ENCAPSULATION(buffer) + 0x13)
+#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
+ (PDU_ENCAPSULATION(buffer)+ 0x1f)
+#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
+ (PDU_ENCAPSULATION(buffer) + 0x17)
+#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
+ (PDU_ENCAPSULATION(buffer)+ 0x2b)
+/* Layer 2 defintions */
+#define QETH_PROT_LAYER2 0x08
+#define QETH_PROT_TCPIP 0x03
+#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
+#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
+
+extern unsigned char ULP_SETUP[];
+#define ULP_SETUP_SIZE 0x6c
+#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
+#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
+#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
+#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68)
+#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a)
+
+#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
+ (PDU_ENCAPSULATION(buffer)+0x1a)
+
+
+extern unsigned char DM_ACT[];
+#define DM_ACT_SIZE 0x55
+#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c)
+#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51)
+
+
+
+#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4)
+#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c)
+#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20)
+
+extern unsigned char IDX_ACTIVATE_READ[];
+extern unsigned char IDX_ACTIVATE_WRITE[];
+
+#define IDX_ACTIVATE_SIZE 0x22
+#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c)
+#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80)
+#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10)
+#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16)
+#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e)
+#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20)
+#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2)
+#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12)
+
+#define PDU_ENCAPSULATION(buffer) \
+ (buffer + *(buffer + (*(buffer+0x0b)) + \
+ *(buffer + *(buffer+0x0b)+0x11) +0x07))
+
+#define IS_IPA(buffer) \
+ ((buffer) && \
+ ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) )
+
+#define ADDR_FRAME_TYPE_DIX 1
+#define ADDR_FRAME_TYPE_802_3 2
+#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
+#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
+
+#endif
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
new file mode 100644
index 000000000000..04719196fd20
--- /dev/null
+++ b/drivers/s390/net/qeth_proc.c
@@ -0,0 +1,495 @@
+/*
+ *
+ * linux/drivers/s390/net/qeth_fs.c ($Revision: 1.13 $)
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ * This file contains code related to procfs.
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author(s): Thomas Spatzier <tspat@de.ibm.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/rwsem.h>
+
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_fs.h"
+
+const char *VERSION_QETH_PROC_C = "$Revision: 1.13 $";
+
+/***** /proc/qeth *****/
+#define QETH_PROCFILE_NAME "qeth"
+static struct proc_dir_entry *qeth_procfile;
+
+static void *
+qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
+{
+ struct list_head *next_card = NULL;
+ int i = 0;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+
+ if (*offset == 0)
+ return SEQ_START_TOKEN;
+
+ /* get card at pos *offset */
+ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices)
+ if (++i == *offset)
+ return next_card;
+
+ return NULL;
+}
+
+static void
+qeth_procfile_seq_stop(struct seq_file *s, void* it)
+{
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+}
+
+static void *
+qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct list_head *next_card = NULL;
+ struct list_head *current_card;
+
+ if (it == SEQ_START_TOKEN) {
+ next_card = qeth_ccwgroup_driver.driver.devices.next;
+ if (next_card->next == next_card) /* list empty */
+ return NULL;
+ (*offset)++;
+ } else {
+ current_card = (struct list_head *)it;
+ if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
+ return NULL; /* end of list reached */
+ next_card = current_card->next;
+ (*offset)++;
+ }
+
+ return next_card;
+}
+
+static inline const char *
+qeth_get_router_str(struct qeth_card *card, int ipv)
+{
+ int routing_type = 0;
+
+ if (ipv == 4){
+ routing_type = card->options.route4.type;
+ } else {
+#ifdef CONFIG_QETH_IPV6
+ routing_type = card->options.route6.type;
+#else
+ return "n/a";
+#endif /* CONFIG_QETH_IPV6 */
+ }
+
+ if (routing_type == PRIMARY_ROUTER)
+ return "pri";
+ else if (routing_type == SECONDARY_ROUTER)
+ return "sec";
+ else if (routing_type == MULTICAST_ROUTER) {
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return "mc+";
+ return "mc";
+ } else if (routing_type == PRIMARY_CONNECTOR) {
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return "p+c";
+ return "p.c";
+ } else if (routing_type == SECONDARY_CONNECTOR) {
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return "s+c";
+ return "s.c";
+ } else if (routing_type == NO_ROUTER)
+ return "no";
+ else
+ return "unk";
+}
+
+static int
+qeth_procfile_seq_show(struct seq_file *s, void *it)
+{
+ struct device *device;
+ struct qeth_card *card;
+ char tmp[12]; /* for qeth_get_prioq_str */
+
+ if (it == SEQ_START_TOKEN){
+ seq_printf(s, "devices CHPID interface "
+ "cardtype port chksum prio-q'ing rtr4 "
+ "rtr6 fsz cnt\n");
+ seq_printf(s, "-------------------------- ----- ---------- "
+ "-------------- ---- ------ ---------- ---- "
+ "---- ----- -----\n");
+ } else {
+ device = list_entry(it, struct device, driver_list);
+ card = device->driver_data;
+ seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ",
+ CARD_RDEV_ID(card),
+ CARD_WDEV_ID(card),
+ CARD_DDEV_ID(card),
+ card->info.chpid,
+ QETH_CARD_IFNAME(card),
+ qeth_get_cardname_short(card),
+ card->info.portno);
+ if (card->lan_online)
+ seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n",
+ qeth_get_checksum_str(card),
+ qeth_get_prioq_str(card, tmp),
+ qeth_get_router_str(card, 4),
+ qeth_get_router_str(card, 6),
+ qeth_get_bufsize_str(card),
+ card->qdio.in_buf_pool.buf_count);
+ else
+ seq_printf(s, " +++ LAN OFFLINE +++\n");
+ }
+ return 0;
+}
+
+static struct seq_operations qeth_procfile_seq_ops = {
+ .start = qeth_procfile_seq_start,
+ .stop = qeth_procfile_seq_stop,
+ .next = qeth_procfile_seq_next,
+ .show = qeth_procfile_seq_show,
+};
+
+static int
+qeth_procfile_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &qeth_procfile_seq_ops);
+}
+
+static struct file_operations qeth_procfile_fops = {
+ .owner = THIS_MODULE,
+ .open = qeth_procfile_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/***** /proc/qeth_perf *****/
+#define QETH_PERF_PROCFILE_NAME "qeth_perf"
+static struct proc_dir_entry *qeth_perf_procfile;
+
+#ifdef CONFIG_QETH_PERF_STATS
+
+static void *
+qeth_perf_procfile_seq_start(struct seq_file *s, loff_t *offset)
+{
+ struct list_head *next_card = NULL;
+ int i = 0;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+ /* get card at pos *offset */
+ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
+ if (i == *offset)
+ return next_card;
+ i++;
+ }
+ return NULL;
+}
+
+static void
+qeth_perf_procfile_seq_stop(struct seq_file *s, void* it)
+{
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+}
+
+static void *
+qeth_perf_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct list_head *current_card = (struct list_head *)it;
+
+ if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
+ return NULL; /* end of list reached */
+ (*offset)++;
+ return current_card->next;
+}
+
+static int
+qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
+{
+ struct device *device;
+ struct qeth_card *card;
+
+ device = list_entry(it, struct device, driver_list);
+ card = device->driver_data;
+ seq_printf(s, "For card with devnos %s/%s/%s (%s):\n",
+ CARD_RDEV_ID(card),
+ CARD_WDEV_ID(card),
+ CARD_DDEV_ID(card),
+ QETH_CARD_IFNAME(card)
+ );
+ seq_printf(s, " Skb's/buffers received : %li/%i\n"
+ " Skb's/buffers sent : %li/%i\n\n",
+ card->stats.rx_packets, card->perf_stats.bufs_rec,
+ card->stats.tx_packets, card->perf_stats.bufs_sent
+ );
+ seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n"
+ " Skb's/buffers sent with packing : %i/%i\n\n",
+ card->stats.tx_packets - card->perf_stats.skbs_sent_pack,
+ card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
+ card->perf_stats.skbs_sent_pack,
+ card->perf_stats.bufs_sent_pack
+ );
+ seq_printf(s, " Skbs sent in SG mode : %i\n"
+ " Skb fragments sent in SG mode : %i\n\n",
+ card->perf_stats.sg_skbs_sent,
+ card->perf_stats.sg_frags_sent);
+ seq_printf(s, " large_send tx (in Kbytes) : %i\n"
+ " large_send count : %i\n\n",
+ card->perf_stats.large_send_bytes >> 10,
+ card->perf_stats.large_send_cnt);
+ seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n"
+ " Watermarks L/H : %i/%i\n"
+ " Current buffer usage (outbound q's) : "
+ "%i/%i/%i/%i\n\n",
+ card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp,
+ QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK,
+ atomic_read(&card->qdio.out_qs[0]->used_buffers),
+ (card->qdio.no_out_queues > 1)?
+ atomic_read(&card->qdio.out_qs[1]->used_buffers)
+ : 0,
+ (card->qdio.no_out_queues > 2)?
+ atomic_read(&card->qdio.out_qs[2]->used_buffers)
+ : 0,
+ (card->qdio.no_out_queues > 3)?
+ atomic_read(&card->qdio.out_qs[3]->used_buffers)
+ : 0
+ );
+ seq_printf(s, " Inbound handler time (in us) : %i\n"
+ " Inbound handler count : %i\n"
+ " Inbound do_QDIO time (in us) : %i\n"
+ " Inbound do_QDIO count : %i\n\n"
+ " Outbound handler time (in us) : %i\n"
+ " Outbound handler count : %i\n\n"
+ " Outbound time (in us, incl QDIO) : %i\n"
+ " Outbound count : %i\n"
+ " Outbound do_QDIO time (in us) : %i\n"
+ " Outbound do_QDIO count : %i\n\n",
+ card->perf_stats.inbound_time,
+ card->perf_stats.inbound_cnt,
+ card->perf_stats.inbound_do_qdio_time,
+ card->perf_stats.inbound_do_qdio_cnt,
+ card->perf_stats.outbound_handler_time,
+ card->perf_stats.outbound_handler_cnt,
+ card->perf_stats.outbound_time,
+ card->perf_stats.outbound_cnt,
+ card->perf_stats.outbound_do_qdio_time,
+ card->perf_stats.outbound_do_qdio_cnt
+ );
+ return 0;
+}
+
+static struct seq_operations qeth_perf_procfile_seq_ops = {
+ .start = qeth_perf_procfile_seq_start,
+ .stop = qeth_perf_procfile_seq_stop,
+ .next = qeth_perf_procfile_seq_next,
+ .show = qeth_perf_procfile_seq_show,
+};
+
+static int
+qeth_perf_procfile_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &qeth_perf_procfile_seq_ops);
+}
+
+static struct file_operations qeth_perf_procfile_fops = {
+ .owner = THIS_MODULE,
+ .open = qeth_perf_procfile_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#define qeth_perf_procfile_created qeth_perf_procfile
+#else
+#define qeth_perf_procfile_created 1
+#endif /* CONFIG_QETH_PERF_STATS */
+
+/***** /proc/qeth_ipa_takeover *****/
+#define QETH_IPATO_PROCFILE_NAME "qeth_ipa_takeover"
+static struct proc_dir_entry *qeth_ipato_procfile;
+
+static void *
+qeth_ipato_procfile_seq_start(struct seq_file *s, loff_t *offset)
+{
+ struct list_head *next_card = NULL;
+ int i = 0;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+ /* TODO: finish this */
+ /*
+ * maybe SEQ_SATRT_TOKEN can be returned for offset 0
+ * output driver settings then;
+ * else output setting for respective card
+ */
+ /* get card at pos *offset */
+ list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
+ if (i == *offset)
+ return next_card;
+ i++;
+ }
+ return NULL;
+}
+
+static void
+qeth_ipato_procfile_seq_stop(struct seq_file *s, void* it)
+{
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+}
+
+static void *
+qeth_ipato_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct list_head *current_card = (struct list_head *)it;
+
+ /* TODO: finish this */
+ /*
+ * maybe SEQ_SATRT_TOKEN can be returned for offset 0
+ * output driver settings then;
+ * else output setting for respective card
+ */
+ if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
+ return NULL; /* end of list reached */
+ (*offset)++;
+ return current_card->next;
+}
+
+static int
+qeth_ipato_procfile_seq_show(struct seq_file *s, void *it)
+{
+ struct device *device;
+ struct qeth_card *card;
+
+ /* TODO: finish this */
+ /*
+ * maybe SEQ_SATRT_TOKEN can be returned for offset 0
+ * output driver settings then;
+ * else output setting for respective card
+ */
+ device = list_entry(it, struct device, driver_list);
+ card = device->driver_data;
+
+ return 0;
+}
+
+static struct seq_operations qeth_ipato_procfile_seq_ops = {
+ .start = qeth_ipato_procfile_seq_start,
+ .stop = qeth_ipato_procfile_seq_stop,
+ .next = qeth_ipato_procfile_seq_next,
+ .show = qeth_ipato_procfile_seq_show,
+};
+
+static int
+qeth_ipato_procfile_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &qeth_ipato_procfile_seq_ops);
+}
+
+static struct file_operations qeth_ipato_procfile_fops = {
+ .owner = THIS_MODULE,
+ .open = qeth_ipato_procfile_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int __init
+qeth_create_procfs_entries(void)
+{
+ qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME,
+ S_IFREG | 0444, NULL);
+ if (qeth_procfile)
+ qeth_procfile->proc_fops = &qeth_procfile_fops;
+
+#ifdef CONFIG_QETH_PERF_STATS
+ qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
+ S_IFREG | 0444, NULL);
+ if (qeth_perf_procfile)
+ qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
+#endif /* CONFIG_QETH_PERF_STATS */
+
+ qeth_ipato_procfile = create_proc_entry(QETH_IPATO_PROCFILE_NAME,
+ S_IFREG | 0444, NULL);
+ if (qeth_ipato_procfile)
+ qeth_ipato_procfile->proc_fops = &qeth_ipato_procfile_fops;
+
+ if (qeth_procfile &&
+ qeth_ipato_procfile &&
+ qeth_perf_procfile_created)
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+void __exit
+qeth_remove_procfs_entries(void)
+{
+ if (qeth_procfile)
+ remove_proc_entry(QETH_PROCFILE_NAME, NULL);
+ if (qeth_perf_procfile)
+ remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL);
+ if (qeth_ipato_procfile)
+ remove_proc_entry(QETH_IPATO_PROCFILE_NAME, NULL);
+}
+
+
+/* ONLY FOR DEVELOPMENT! -> make it as module */
+/*
+static void
+qeth_create_sysfs_entries(void)
+{
+ struct device *dev;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+
+ list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
+ driver_list)
+ qeth_create_device_attributes(dev);
+
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+}
+
+static void
+qeth_remove_sysfs_entries(void)
+{
+ struct device *dev;
+
+ down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+
+ list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
+ driver_list)
+ qeth_remove_device_attributes(dev);
+
+ up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
+}
+
+static int __init
+qeth_fs_init(void)
+{
+ printk(KERN_INFO "qeth_fs_init\n");
+ qeth_create_procfs_entries();
+ qeth_create_sysfs_entries();
+
+ return 0;
+}
+
+static void __exit
+qeth_fs_exit(void)
+{
+ printk(KERN_INFO "qeth_fs_exit\n");
+ qeth_remove_procfs_entries();
+ qeth_remove_sysfs_entries();
+}
+
+
+module_init(qeth_fs_init);
+module_exit(qeth_fs_exit);
+
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
new file mode 100644
index 000000000000..240348398211
--- /dev/null
+++ b/drivers/s390/net/qeth_sys.c
@@ -0,0 +1,1788 @@
+/*
+ *
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $)
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ * This file contains code related to sysfs.
+ *
+ * Copyright 2000,2003 IBM Corporation
+ *
+ * Author(s): Thomas Spatzier <tspat@de.ibm.com>
+ * Frank Pavlic <pavlic@de.ibm.com>
+ *
+ */
+#include <linux/list.h>
+#include <linux/rwsem.h>
+
+#include <asm/ebcdic.h>
+
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_fs.h"
+
+const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $";
+
+/*****************************************************************************/
+/* */
+/* /sys-fs stuff UNDER DEVELOPMENT !!! */
+/* */
+/*****************************************************************************/
+//low/high watermark
+
+static ssize_t
+qeth_dev_state_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ if (!card)
+ return -EINVAL;
+
+ switch (card->state) {
+ case CARD_STATE_DOWN:
+ return sprintf(buf, "DOWN\n");
+ case CARD_STATE_HARDSETUP:
+ return sprintf(buf, "HARDSETUP\n");
+ case CARD_STATE_SOFTSETUP:
+ return sprintf(buf, "SOFTSETUP\n");
+ case CARD_STATE_UP:
+ if (card->lan_online)
+ return sprintf(buf, "UP (LAN ONLINE)\n");
+ else
+ return sprintf(buf, "UP (LAN OFFLINE)\n");
+ case CARD_STATE_RECOVER:
+ return sprintf(buf, "RECOVER\n");
+ default:
+ return sprintf(buf, "UNKNOWN\n");
+ }
+}
+
+static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
+
+static ssize_t
+qeth_dev_chpid_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%02X\n", card->info.chpid);
+}
+
+static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
+
+static ssize_t
+qeth_dev_if_name_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ if (!card)
+ return -EINVAL;
+ return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+}
+
+static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
+
+static ssize_t
+qeth_dev_card_type_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
+}
+
+static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
+
+static ssize_t
+qeth_dev_portno_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->info.portno);
+}
+
+static ssize_t
+qeth_dev_portno_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ unsigned int portno;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ portno = simple_strtoul(buf, &tmp, 16);
+ if ((portno < 0) || (portno > MAX_PORTNO)){
+ PRINT_WARN("portno 0x%X is out of range\n", portno);
+ return -EINVAL;
+ }
+
+ card->info.portno = portno;
+ return count;
+}
+
+static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
+
+static ssize_t
+qeth_dev_portname_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+ char portname[9] = {0, };
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.portname_required) {
+ memcpy(portname, card->info.portname + 1, 8);
+ EBCASC(portname, 8);
+ return sprintf(buf, "%s\n", portname);
+ } else
+ return sprintf(buf, "no portname required\n");
+}
+
+static ssize_t
+qeth_dev_portname_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ tmp = strsep((char **) &buf, "\n");
+ if ((strlen(tmp) > 8) || (strlen(tmp) < 2))
+ return -EINVAL;
+
+ card->info.portname[0] = strlen(tmp);
+ /* for beauty reasons */
+ for (i = 1; i < 9; i++)
+ card->info.portname[i] = ' ';
+ strcpy(card->info.portname + 1, tmp);
+ ASCEBC(card->info.portname + 1, 8);
+
+ return count;
+}
+
+static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
+ qeth_dev_portname_store);
+
+static ssize_t
+qeth_dev_checksum_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card));
+}
+
+static ssize_t
+qeth_dev_checksum_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strcmp(tmp, "sw_checksumming"))
+ card->options.checksum_type = SW_CHECKSUMMING;
+ else if (!strcmp(tmp, "hw_checksumming"))
+ card->options.checksum_type = HW_CHECKSUMMING;
+ else if (!strcmp(tmp, "no_checksumming"))
+ card->options.checksum_type = NO_CHECKSUMMING;
+ else {
+ PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show,
+ qeth_dev_checksum_store);
+
+static ssize_t
+qeth_dev_prioqing_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ switch (card->qdio.do_prio_queueing) {
+ case QETH_PRIO_Q_ING_PREC:
+ return sprintf(buf, "%s\n", "by precedence");
+ case QETH_PRIO_Q_ING_TOS:
+ return sprintf(buf, "%s\n", "by type of service");
+ default:
+ return sprintf(buf, "always queue %i\n",
+ card->qdio.default_out_queue);
+ }
+}
+
+static ssize_t
+qeth_dev_prioqing_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ /* check if 1920 devices are supported ,
+ * if though we have to permit priority queueing
+ */
+ if (card->qdio.no_out_queues == 1) {
+ PRINT_WARN("Priority queueing disabled due "
+ "to hardware limitations!\n");
+ card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+ return -EPERM;
+ }
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strcmp(tmp, "prio_queueing_prec"))
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
+ else if (!strcmp(tmp, "prio_queueing_tos"))
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
+ else if (!strcmp(tmp, "no_prio_queueing:0")) {
+ card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.default_out_queue = 0;
+ } else if (!strcmp(tmp, "no_prio_queueing:1")) {
+ card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.default_out_queue = 1;
+ } else if (!strcmp(tmp, "no_prio_queueing:2")) {
+ card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.default_out_queue = 2;
+ } else if (!strcmp(tmp, "no_prio_queueing:3")) {
+ card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.default_out_queue = 3;
+ } else if (!strcmp(tmp, "no_prio_queueing")) {
+ card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else {
+ PRINT_WARN("Unknown queueing type '%s'\n", tmp);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
+ qeth_dev_prioqing_store);
+
+static ssize_t
+qeth_dev_bufcnt_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
+}
+
+static ssize_t
+qeth_dev_bufcnt_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int cnt, old_cnt;
+ int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ old_cnt = card->qdio.in_buf_pool.buf_count;
+ cnt = simple_strtoul(buf, &tmp, 10);
+ cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
+ ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
+ if (old_cnt != cnt) {
+ if ((rc = qeth_realloc_buffer_pool(card, cnt)))
+ PRINT_WARN("Error (%d) while setting "
+ "buffer count.\n", rc);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
+ qeth_dev_bufcnt_store);
+
+static inline ssize_t
+qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
+ char *buf)
+{
+ switch (route->type) {
+ case PRIMARY_ROUTER:
+ return sprintf(buf, "%s\n", "primary router");
+ case SECONDARY_ROUTER:
+ return sprintf(buf, "%s\n", "secondary router");
+ case MULTICAST_ROUTER:
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return sprintf(buf, "%s\n", "multicast router+");
+ else
+ return sprintf(buf, "%s\n", "multicast router");
+ case PRIMARY_CONNECTOR:
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return sprintf(buf, "%s\n", "primary connector+");
+ else
+ return sprintf(buf, "%s\n", "primary connector");
+ case SECONDARY_CONNECTOR:
+ if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+ return sprintf(buf, "%s\n", "secondary connector+");
+ else
+ return sprintf(buf, "%s\n", "secondary connector");
+ default:
+ return sprintf(buf, "%s\n", "no");
+ }
+}
+
+static ssize_t
+qeth_dev_route4_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_route_show(card, &card->options.route4, buf);
+}
+
+static inline ssize_t
+qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
+ enum qeth_prot_versions prot, const char *buf, size_t count)
+{
+ enum qeth_routing_types old_route_type = route->type;
+ char *tmp;
+ int rc;
+
+ tmp = strsep((char **) &buf, "\n");
+
+ if (!strcmp(tmp, "no_router")){
+ route->type = NO_ROUTER;
+ } else if (!strcmp(tmp, "primary_connector")) {
+ route->type = PRIMARY_CONNECTOR;
+ } else if (!strcmp(tmp, "secondary_connector")) {
+ route->type = SECONDARY_CONNECTOR;
+ } else if (!strcmp(tmp, "multicast_router")) {
+ route->type = MULTICAST_ROUTER;
+ } else if (!strcmp(tmp, "primary_router")) {
+ route->type = PRIMARY_ROUTER;
+ } else if (!strcmp(tmp, "secondary_router")) {
+ route->type = SECONDARY_ROUTER;
+ } else if (!strcmp(tmp, "multicast_router")) {
+ route->type = MULTICAST_ROUTER;
+ } else {
+ PRINT_WARN("Invalid routing type '%s'.\n", tmp);
+ return -EINVAL;
+ }
+ if (((card->state == CARD_STATE_SOFTSETUP) ||
+ (card->state == CARD_STATE_UP)) &&
+ (old_route_type != route->type)){
+ if (prot == QETH_PROT_IPV4)
+ rc = qeth_setrouting_v4(card);
+ else if (prot == QETH_PROT_IPV6)
+ rc = qeth_setrouting_v6(card);
+ }
+ return count;
+}
+
+static ssize_t
+qeth_dev_route4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_route_store(card, &card->options.route4,
+ QETH_PROT_IPV4, buf, count);
+}
+
+static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store);
+
+#ifdef CONFIG_QETH_IPV6
+static ssize_t
+qeth_dev_route6_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!qeth_is_supported(card, IPA_IPV6))
+ return sprintf(buf, "%s\n", "n/a");
+
+ return qeth_dev_route_show(card, &card->options.route6, buf);
+}
+
+static ssize_t
+qeth_dev_route6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!qeth_is_supported(card, IPA_IPV6)){
+ PRINT_WARN("IPv6 not supported for interface %s.\n"
+ "Routing status no changed.\n",
+ QETH_CARD_IFNAME(card));
+ return -ENOTSUPP;
+ }
+
+ return qeth_dev_route_store(card, &card->options.route6,
+ QETH_PROT_IPV6, buf, count);
+}
+
+static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store);
+#endif
+
+static ssize_t
+qeth_dev_add_hhlen_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.add_hhlen);
+}
+
+static ssize_t
+qeth_dev_add_hhlen_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 10);
+ if ((i < 0) || (i > MAX_ADD_HHLEN)) {
+ PRINT_WARN("add_hhlen out of range\n");
+ return -EINVAL;
+ }
+ card->options.add_hhlen = i;
+
+ return count;
+}
+
+static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show,
+ qeth_dev_add_hhlen_store);
+
+static ssize_t
+qeth_dev_fake_ll_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.fake_ll? 1:0);
+}
+
+static ssize_t
+qeth_dev_fake_ll_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i != 0) && (i != 1)) {
+ PRINT_WARN("fake_ll: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ card->options.fake_ll = i;
+ return count;
+}
+
+static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show,
+ qeth_dev_fake_ll_store);
+
+static ssize_t
+qeth_dev_fake_broadcast_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
+}
+
+static ssize_t
+qeth_dev_fake_broadcast_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1))
+ card->options.fake_broadcast = i;
+ else {
+ PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show,
+ qeth_dev_fake_broadcast_store);
+
+static ssize_t
+qeth_dev_recover_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->state != CARD_STATE_UP)
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if (i == 1)
+ qeth_schedule_recovery(card);
+
+ return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
+
+static ssize_t
+qeth_dev_broadcast_mode_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
+ return sprintf(buf, "n/a\n");
+
+ return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
+ QETH_TR_BROADCAST_ALLRINGS)?
+ "all rings":"local");
+}
+
+static ssize_t
+qeth_dev_broadcast_mode_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
+ PRINT_WARN("Device is not a tokenring device!\n");
+ return -EINVAL;
+ }
+
+ tmp = strsep((char **) &buf, "\n");
+
+ if (!strcmp(tmp, "local")){
+ card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
+ return count;
+ } else if (!strcmp(tmp, "all_rings")) {
+ card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
+ return count;
+ } else {
+ PRINT_WARN("broadcast_mode: invalid mode %s!\n",
+ tmp);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show,
+ qeth_dev_broadcast_mode_store);
+
+static ssize_t
+qeth_dev_canonical_macaddr_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
+ return sprintf(buf, "n/a\n");
+
+ return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
+ QETH_TR_MACADDR_CANONICAL)? 1:0);
+}
+
+static ssize_t
+qeth_dev_canonical_macaddr_store(struct device *dev, const char *buf,
+ size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+ (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
+ PRINT_WARN("Device is not a tokenring device!\n");
+ return -EINVAL;
+ }
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1))
+ card->options.macaddr_mode = i?
+ QETH_TR_MACADDR_CANONICAL :
+ QETH_TR_MACADDR_NONCANONICAL;
+ else {
+ PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show,
+ qeth_dev_canonical_macaddr_store);
+
+static ssize_t
+qeth_dev_layer2_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
+}
+
+static ssize_t
+qeth_dev_layer2_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER)) ||
+ (card->info.type != QETH_CARD_TYPE_OSAE))
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1))
+ card->options.layer2 = i;
+ else {
+ PRINT_WARN("layer2: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
+ qeth_dev_layer2_store);
+
+static ssize_t
+qeth_dev_large_send_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ switch (card->options.large_send) {
+ case QETH_LARGE_SEND_NO:
+ return sprintf(buf, "%s\n", "no");
+ case QETH_LARGE_SEND_EDDP:
+ return sprintf(buf, "%s\n", "EDDP");
+ case QETH_LARGE_SEND_TSO:
+ return sprintf(buf, "%s\n", "TSO");
+ default:
+ return sprintf(buf, "%s\n", "N/A");
+ }
+}
+
+static ssize_t
+qeth_dev_large_send_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ enum qeth_large_send_types type;
+ int rc = 0;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ tmp = strsep((char **) &buf, "\n");
+
+ if (!strcmp(tmp, "no")){
+ type = QETH_LARGE_SEND_NO;
+ } else if (!strcmp(tmp, "EDDP")) {
+ type = QETH_LARGE_SEND_EDDP;
+ } else if (!strcmp(tmp, "TSO")) {
+ type = QETH_LARGE_SEND_TSO;
+ } else {
+ PRINT_WARN("large_send: invalid mode %s!\n", tmp);
+ return -EINVAL;
+ }
+ if (card->options.large_send == type)
+ return count;
+ card->options.large_send = type;
+ if ((rc = qeth_set_large_send(card)))
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
+ qeth_dev_large_send_store);
+
+static ssize_t
+qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
+{
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", value);
+}
+
+static ssize_t
+qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count,
+ int *value, int max_value)
+{
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ i = simple_strtoul(buf, &tmp, 10);
+ if (i <= max_value) {
+ *value = i;
+ } else {
+ PRINT_WARN("blkt total time: write values between"
+ " 0 and %d to this file!\n", max_value);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t
+qeth_dev_blkt_total_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+}
+
+
+static ssize_t
+qeth_dev_blkt_total_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_store(card, buf, count,
+ &card->info.blkt.time_total,1000);
+}
+
+
+
+static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
+ qeth_dev_blkt_total_store);
+
+static ssize_t
+qeth_dev_blkt_inter_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+}
+
+
+static ssize_t
+qeth_dev_blkt_inter_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_store(card, buf, count,
+ &card->info.blkt.inter_packet,100);
+}
+
+static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
+ qeth_dev_blkt_inter_store);
+
+static ssize_t
+qeth_dev_blkt_inter_jumbo_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_show(buf, card,
+ card->info.blkt.inter_packet_jumbo);
+}
+
+
+static ssize_t
+qeth_dev_blkt_inter_jumbo_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ return qeth_dev_blkt_store(card, buf, count,
+ &card->info.blkt.inter_packet_jumbo,100);
+}
+
+static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
+ qeth_dev_blkt_inter_jumbo_store);
+
+static struct device_attribute * qeth_blkt_device_attrs[] = {
+ &dev_attr_total,
+ &dev_attr_inter,
+ &dev_attr_inter_jumbo,
+ NULL,
+};
+
+static struct attribute_group qeth_device_blkt_group = {
+ .name = "blkt",
+ .attrs = (struct attribute **)qeth_blkt_device_attrs,
+};
+
+static struct device_attribute * qeth_device_attrs[] = {
+ &dev_attr_state,
+ &dev_attr_chpid,
+ &dev_attr_if_name,
+ &dev_attr_card_type,
+ &dev_attr_portno,
+ &dev_attr_portname,
+ &dev_attr_checksumming,
+ &dev_attr_priority_queueing,
+ &dev_attr_buffer_count,
+ &dev_attr_route4,
+#ifdef CONFIG_QETH_IPV6
+ &dev_attr_route6,
+#endif
+ &dev_attr_add_hhlen,
+ &dev_attr_fake_ll,
+ &dev_attr_fake_broadcast,
+ &dev_attr_recover,
+ &dev_attr_broadcast_mode,
+ &dev_attr_canonical_macaddr,
+ &dev_attr_layer2,
+ &dev_attr_large_send,
+ NULL,
+};
+
+static struct attribute_group qeth_device_attr_group = {
+ .attrs = (struct attribute **)qeth_device_attrs,
+};
+
+
+#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_id = { \
+ .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\
+ .show = _show, \
+ .store = _store, \
+};
+
+int
+qeth_check_layer2(struct qeth_card *card)
+{
+ if (card->options.layer2)
+ return -EPERM;
+ return 0;
+}
+
+
+static ssize_t
+qeth_dev_ipato_enable_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+}
+
+static ssize_t
+qeth_dev_ipato_enable_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strcmp(tmp, "toggle")){
+ card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
+ } else if (!strcmp(tmp, "1")){
+ card->ipato.enabled = 1;
+ } else if (!strcmp(tmp, "0")){
+ card->ipato.enabled = 0;
+ } else {
+ PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
+ "this file\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
+ qeth_dev_ipato_enable_show,
+ qeth_dev_ipato_enable_store);
+
+static ssize_t
+qeth_dev_ipato_invert4_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+}
+
+static ssize_t
+qeth_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strcmp(tmp, "toggle")){
+ card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
+ } else if (!strcmp(tmp, "1")){
+ card->ipato.invert4 = 1;
+ } else if (!strcmp(tmp, "0")){
+ card->ipato.invert4 = 0;
+ } else {
+ PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
+ "this file\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
+ qeth_dev_ipato_invert4_show,
+ qeth_dev_ipato_invert4_store);
+
+static inline ssize_t
+qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
+ enum qeth_prot_versions proto)
+{
+ struct qeth_ipato_entry *ipatoe;
+ unsigned long flags;
+ char addr_str[40];
+ int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+ int i = 0;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+ /* add strlen for "/<mask>\n" */
+ entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry(ipatoe, &card->ipato.entries, entry){
+ if (ipatoe->proto != proto)
+ continue;
+ /* String must not be longer than PAGE_SIZE. So we check if
+ * string length gets near PAGE_SIZE. Then we can savely display
+ * the next IPv6 address (worst case, compared to IPv4) */
+ if ((PAGE_SIZE - i) <= entry_len)
+ break;
+ qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str);
+ i += snprintf(buf + i, PAGE_SIZE - i,
+ "%s/%i\n", addr_str, ipatoe->mask_bits);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+ return i;
+}
+
+static ssize_t
+qeth_dev_ipato_add4_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static inline int
+qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
+ u8 *addr, int *mask_bits)
+{
+ const char *start, *end;
+ char *tmp;
+ char buffer[49] = {0, };
+
+ start = buf;
+ /* get address string */
+ end = strchr(start, '/');
+ if (!end){
+ PRINT_WARN("Invalid format for ipato_addx/delx. "
+ "Use <ip addr>/<mask bits>\n");
+ return -EINVAL;
+ }
+ strncpy(buffer, start, end - start);
+ if (qeth_string_to_ipaddr(buffer, proto, addr)){
+ PRINT_WARN("Invalid IP address format!\n");
+ return -EINVAL;
+ }
+ start = end + 1;
+ *mask_bits = simple_strtoul(start, &tmp, 10);
+
+ return 0;
+}
+
+static inline ssize_t
+qeth_dev_ipato_add_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ struct qeth_ipato_entry *ipatoe;
+ u8 addr[16];
+ int mask_bits;
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
+ return rc;
+
+ if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){
+ PRINT_WARN("No memory to allocate ipato entry\n");
+ return -ENOMEM;
+ }
+ memset(ipatoe, 0, sizeof(struct qeth_ipato_entry));
+ ipatoe->proto = proto;
+ memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+ ipatoe->mask_bits = mask_bits;
+
+ if ((rc = qeth_add_ipato_entry(card, ipatoe))){
+ kfree(ipatoe);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_ipato_add4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
+ qeth_dev_ipato_add4_show,
+ qeth_dev_ipato_add4_store);
+
+static inline ssize_t
+qeth_dev_ipato_del_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ u8 addr[16];
+ int mask_bits;
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
+ return rc;
+
+ qeth_del_ipato_entry(card, proto, addr, mask_bits);
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_ipato_del4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
+ qeth_dev_ipato_del4_store);
+
+#ifdef CONFIG_QETH_IPV6
+static ssize_t
+qeth_dev_ipato_invert6_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+}
+
+static ssize_t
+qeth_dev_ipato_invert6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strcmp(tmp, "toggle")){
+ card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
+ } else if (!strcmp(tmp, "1")){
+ card->ipato.invert6 = 1;
+ } else if (!strcmp(tmp, "0")){
+ card->ipato.invert6 = 0;
+ } else {
+ PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
+ "this file\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
+ qeth_dev_ipato_invert6_show,
+ qeth_dev_ipato_invert6_store);
+
+
+static ssize_t
+qeth_dev_ipato_add6_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t
+qeth_dev_ipato_add6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
+ qeth_dev_ipato_add6_show,
+ qeth_dev_ipato_add6_store);
+
+static ssize_t
+qeth_dev_ipato_del6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
+ qeth_dev_ipato_del6_store);
+#endif /* CONFIG_QETH_IPV6 */
+
+static struct device_attribute * qeth_ipato_device_attrs[] = {
+ &dev_attr_ipato_enable,
+ &dev_attr_ipato_invert4,
+ &dev_attr_ipato_add4,
+ &dev_attr_ipato_del4,
+#ifdef CONFIG_QETH_IPV6
+ &dev_attr_ipato_invert6,
+ &dev_attr_ipato_add6,
+ &dev_attr_ipato_del6,
+#endif
+ NULL,
+};
+
+static struct attribute_group qeth_device_ipato_group = {
+ .name = "ipa_takeover",
+ .attrs = (struct attribute **)qeth_ipato_device_attrs,
+};
+
+static inline ssize_t
+qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
+ enum qeth_prot_versions proto)
+{
+ struct qeth_ipaddr *ipaddr;
+ char addr_str[40];
+ int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+ unsigned long flags;
+ int i = 0;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+ entry_len += 2; /* \n + terminator */
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry(ipaddr, &card->ip_list, entry){
+ if (ipaddr->proto != proto)
+ continue;
+ if (ipaddr->type != QETH_IP_TYPE_VIPA)
+ continue;
+ /* String must not be longer than PAGE_SIZE. So we check if
+ * string length gets near PAGE_SIZE. Then we can savely display
+ * the next IPv6 address (worst case, compared to IPv4) */
+ if ((PAGE_SIZE - i) <= entry_len)
+ break;
+ qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
+ i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+ return i;
+}
+
+static ssize_t
+qeth_dev_vipa_add4_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static inline int
+qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
+ u8 *addr)
+{
+ if (qeth_string_to_ipaddr(buf, proto, addr)){
+ PRINT_WARN("Invalid IP address format!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline ssize_t
+qeth_dev_vipa_add_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ u8 addr[16] = {0, };
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_vipae(buf, proto, addr)))
+ return rc;
+
+ if ((rc = qeth_add_vipa(card, proto, addr)))
+ return rc;
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_vipa_add4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
+ qeth_dev_vipa_add4_show,
+ qeth_dev_vipa_add4_store);
+
+static inline ssize_t
+qeth_dev_vipa_del_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ u8 addr[16];
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_vipae(buf, proto, addr)))
+ return rc;
+
+ qeth_del_vipa(card, proto, addr);
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_vipa_del4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
+ qeth_dev_vipa_del4_store);
+
+#ifdef CONFIG_QETH_IPV6
+static ssize_t
+qeth_dev_vipa_add6_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t
+qeth_dev_vipa_add6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
+ qeth_dev_vipa_add6_show,
+ qeth_dev_vipa_add6_store);
+
+static ssize_t
+qeth_dev_vipa_del6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
+ qeth_dev_vipa_del6_store);
+#endif /* CONFIG_QETH_IPV6 */
+
+static struct device_attribute * qeth_vipa_device_attrs[] = {
+ &dev_attr_vipa_add4,
+ &dev_attr_vipa_del4,
+#ifdef CONFIG_QETH_IPV6
+ &dev_attr_vipa_add6,
+ &dev_attr_vipa_del6,
+#endif
+ NULL,
+};
+
+static struct attribute_group qeth_device_vipa_group = {
+ .name = "vipa",
+ .attrs = (struct attribute **)qeth_vipa_device_attrs,
+};
+
+static inline ssize_t
+qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
+ enum qeth_prot_versions proto)
+{
+ struct qeth_ipaddr *ipaddr;
+ char addr_str[40];
+ int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+ unsigned long flags;
+ int i = 0;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+
+ entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+ entry_len += 2; /* \n + terminator */
+ spin_lock_irqsave(&card->ip_lock, flags);
+ list_for_each_entry(ipaddr, &card->ip_list, entry){
+ if (ipaddr->proto != proto)
+ continue;
+ if (ipaddr->type != QETH_IP_TYPE_RXIP)
+ continue;
+ /* String must not be longer than PAGE_SIZE. So we check if
+ * string length gets near PAGE_SIZE. Then we can savely display
+ * the next IPv6 address (worst case, compared to IPv4) */
+ if ((PAGE_SIZE - i) <= entry_len)
+ break;
+ qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
+ i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ }
+ spin_unlock_irqrestore(&card->ip_lock, flags);
+ i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+ return i;
+}
+
+static ssize_t
+qeth_dev_rxip_add4_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static inline int
+qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
+ u8 *addr)
+{
+ if (qeth_string_to_ipaddr(buf, proto, addr)){
+ PRINT_WARN("Invalid IP address format!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline ssize_t
+qeth_dev_rxip_add_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ u8 addr[16] = {0, };
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_rxipe(buf, proto, addr)))
+ return rc;
+
+ if ((rc = qeth_add_rxip(card, proto, addr)))
+ return rc;
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_rxip_add4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
+ qeth_dev_rxip_add4_show,
+ qeth_dev_rxip_add4_store);
+
+static inline ssize_t
+qeth_dev_rxip_del_store(const char *buf, size_t count,
+ struct qeth_card *card, enum qeth_prot_versions proto)
+{
+ u8 addr[16];
+ int rc;
+
+ if (qeth_check_layer2(card))
+ return -EPERM;
+ if ((rc = qeth_parse_rxipe(buf, proto, addr)))
+ return rc;
+
+ qeth_del_rxip(card, proto, addr);
+
+ return count;
+}
+
+static ssize_t
+qeth_dev_rxip_del4_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
+ qeth_dev_rxip_del4_store);
+
+#ifdef CONFIG_QETH_IPV6
+static ssize_t
+qeth_dev_rxip_add6_show(struct device *dev, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t
+qeth_dev_rxip_add6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
+ qeth_dev_rxip_add6_show,
+ qeth_dev_rxip_add6_store);
+
+static ssize_t
+qeth_dev_rxip_del6_store(struct device *dev, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
+ qeth_dev_rxip_del6_store);
+#endif /* CONFIG_QETH_IPV6 */
+
+static struct device_attribute * qeth_rxip_device_attrs[] = {
+ &dev_attr_rxip_add4,
+ &dev_attr_rxip_del4,
+#ifdef CONFIG_QETH_IPV6
+ &dev_attr_rxip_add6,
+ &dev_attr_rxip_del6,
+#endif
+ NULL,
+};
+
+static struct attribute_group qeth_device_rxip_group = {
+ .name = "rxip",
+ .attrs = (struct attribute **)qeth_rxip_device_attrs,
+};
+
+int
+qeth_create_device_attributes(struct device *dev)
+{
+ int ret;
+
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
+ return ret;
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
+ sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+ return ret;
+ }
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){
+ sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+ return ret;
+ }
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){
+ sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+ }
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group)))
+ return ret;
+
+ return ret;
+}
+
+void
+qeth_remove_device_attributes(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
+}
+
+/**********************/
+/* DRIVER ATTRIBUTES */
+/**********************/
+static ssize_t
+qeth_driver_group_store(struct device_driver *ddrv, const char *buf,
+ size_t count)
+{
+ const char *start, *end;
+ char bus_ids[3][BUS_ID_SIZE], *argv[3];
+ int i;
+ int err;
+
+ start = buf;
+ for (i = 0; i < 3; i++) {
+ static const char delim[] = { ',', ',', '\n' };
+ int len;
+
+ if (!(end = strchr(start, delim[i])))
+ return -EINVAL;
+ len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
+ strncpy(bus_ids[i], start, len);
+ bus_ids[i][len] = '\0';
+ start = end + 1;
+ argv[i] = bus_ids[i];
+ }
+ err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id,
+ &qeth_ccw_driver, 3, argv);
+ if (err)
+ return err;
+ else
+ return count;
+}
+
+
+static DRIVER_ATTR(group, 0200, 0, qeth_driver_group_store);
+
+static ssize_t
+qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf,
+ size_t count)
+{
+ int rc;
+ int signum;
+ char *tmp, *tmp2;
+
+ tmp = strsep((char **) &buf, "\n");
+ if (!strncmp(tmp, "unregister", 10)){
+ if ((rc = qeth_notifier_unregister(current)))
+ return rc;
+ return count;
+ }
+
+ signum = simple_strtoul(tmp, &tmp2, 10);
+ if ((signum < 0) || (signum > 32)){
+ PRINT_WARN("Signal number %d is out of range\n", signum);
+ return -EINVAL;
+ }
+ if ((rc = qeth_notifier_register(current, signum)))
+ return rc;
+
+ return count;
+}
+
+static DRIVER_ATTR(notifier_register, 0200, 0,
+ qeth_driver_notifier_register_store);
+
+int
+qeth_create_driver_attributes(void)
+{
+ int rc;
+
+ if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver,
+ &driver_attr_group)))
+ return rc;
+ return driver_create_file(&qeth_ccwgroup_driver.driver,
+ &driver_attr_notifier_register);
+}
+
+void
+qeth_remove_driver_attributes(void)
+{
+ driver_remove_file(&qeth_ccwgroup_driver.driver,
+ &driver_attr_group);
+ driver_remove_file(&qeth_ccwgroup_driver.driver,
+ &driver_attr_notifier_register);
+}
diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c
new file mode 100644
index 000000000000..c91976274e7b
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.c
@@ -0,0 +1,285 @@
+/*
+ * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
+ *
+ * Header file for qeth TCP Segmentation Offload support.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ * Author(s): Frank Pavlic <pavlic@de.ibm.com>
+ *
+ * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "qeth.h"
+#include "qeth_mpc.h"
+#include "qeth_tso.h"
+
+/**
+ * skb already partially prepared
+ * classic qdio header in skb->data
+ * */
+static inline struct qeth_hdr_tso *
+qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
+{
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 5, "tsoprsk");
+ rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
+ if (rc)
+ return NULL;
+
+ return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
+}
+
+/**
+ * fill header for a TSO packet
+ */
+static inline void
+qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
+{
+ struct qeth_hdr_tso *hdr;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+
+ QETH_DBF_TEXT(trace, 5, "tsofhdr");
+
+ hdr = (struct qeth_hdr_tso *) skb->data;
+ iph = skb->nh.iph;
+ tcph = skb->h.th;
+ /*fix header to TSO values ...*/
+ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+ /*set values which are fix for the first approach ...*/
+ hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
+ hdr->ext.imb_hdr_no = 1;
+ hdr->ext.hdr_type = 1;
+ hdr->ext.hdr_version = 1;
+ hdr->ext.hdr_len = 28;
+ /*insert non-fix values */
+ hdr->ext.mss = skb_shinfo(skb)->tso_size;
+ hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
+ hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
+ sizeof(struct qeth_hdr_tso));
+}
+
+/**
+ * change some header values as requested by hardware
+ */
+static inline void
+qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+
+ iph = skb->nh.iph;
+ ip6h = skb->nh.ipv6h;
+ tcph = skb->h.th;
+
+ tcph->check = 0;
+ if (skb->protocol == ETH_P_IPV6) {
+ ip6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ return;
+ }
+ /*OSA want us to set these values ...*/
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ iph->tot_len = 0;
+ iph->check = 0;
+}
+
+static inline struct qeth_hdr_tso *
+qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
+ int ipv, int cast_type)
+{
+ struct qeth_hdr_tso *hdr;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 5, "tsoprep");
+
+ /*get headroom for tso qdio header */
+ hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
+ if (hdr == NULL) {
+ QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
+ return NULL;
+ }
+ memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+ /*fill first 32 bytes of qdio header as used
+ *FIXME: TSO has two struct members
+ * with different names but same size
+ * */
+ qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
+ qeth_tso_fill_header(card, skb);
+ qeth_tso_set_tcpip_header(card, skb);
+ return hdr;
+}
+
+static inline int
+qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_cnt = 0;
+
+ QETH_DBF_TEXT(trace, 5, "tsobuf");
+
+ /* force to non-packing*/
+ if (queue->do_pack)
+ queue->do_pack = 0;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /* get a new buffer if current is already in use*/
+ if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+ (buffer->next_element_to_fill > 0)) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ flush_cnt++;
+ }
+ return flush_cnt;
+}
+
+static inline void
+__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag;
+ struct qdio_buffer *buffer;
+ int fragno, cnt, element;
+ unsigned long addr;
+
+ QETH_DBF_TEXT(trace, 6, "tsfilfrg");
+
+ /*initialize variables ...*/
+ fragno = skb_shinfo(skb)->nr_frags;
+ buffer = buf->buffer;
+ element = buf->next_element_to_fill;
+ /*fill buffer elements .....*/
+ for (cnt = 0; cnt < fragno; cnt++) {
+ frag = &skb_shinfo(skb)->frags[cnt];
+ addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
+ frag->page_offset;
+ buffer->element[element].addr = (char *)addr;
+ buffer->element[element].length = frag->size;
+ if (cnt < (fragno - 1))
+ buffer->element[element].flags =
+ SBAL_FLAGS_MIDDLE_FRAG;
+ else
+ buffer->element[element].flags =
+ SBAL_FLAGS_LAST_FRAG;
+ element++;
+ }
+ buf->next_element_to_fill = element;
+}
+
+static inline int
+qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb)
+{
+ int length, length_here, element;
+ int hdr_len;
+ struct qdio_buffer *buffer;
+ struct qeth_hdr_tso *hdr;
+ char *data;
+
+ QETH_DBF_TEXT(trace, 3, "tsfilbuf");
+
+ /*increment user count and queue skb ...*/
+ atomic_inc(&skb->users);
+ skb_queue_tail(&buf->skb_list, skb);
+
+ /*initialize all variables...*/
+ buffer = buf->buffer;
+ hdr = (struct qeth_hdr_tso *)skb->data;
+ hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
+ data = skb->data + hdr_len;
+ length = skb->len - hdr_len;
+ element = buf->next_element_to_fill;
+ /*fill first buffer entry only with header information */
+ buffer->element[element].addr = skb->data;
+ buffer->element[element].length = hdr_len;
+ buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+ buf->next_element_to_fill++;
+
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ __qeth_tso_fill_buffer_frag(buf, skb);
+ goto out;
+ }
+
+ /*start filling buffer entries ...*/
+ element++;
+ while (length > 0) {
+ /* length_here is the remaining amount of data in this page */
+ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
+ if (length < length_here)
+ length_here = length;
+ buffer->element[element].addr = data;
+ buffer->element[element].length = length_here;
+ length -= length_here;
+ if (!length)
+ buffer->element[element].flags =
+ SBAL_FLAGS_LAST_FRAG;
+ else
+ buffer->element[element].flags =
+ SBAL_FLAGS_MIDDLE_FRAG;
+ data += length_here;
+ element++;
+ }
+ /*set the buffer to primed ...*/
+ buf->next_element_to_fill = element;
+out:
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ return 1;
+}
+
+int
+qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+ int flush_cnt = 0;
+ struct qeth_hdr_tso *hdr;
+ struct qeth_qdio_out_buffer *buffer;
+ int start_index;
+
+ QETH_DBF_TEXT(trace, 3, "tsosend");
+
+ if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
+ return -ENOMEM;
+ /*check if skb fits in one SBAL ...*/
+ if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
+ return -EINVAL;
+ /*lock queue, force switching to non-packing and send it ...*/
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
+ start_index = queue->next_buf_to_fill;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /*check if card is too busy ...*/
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
+ card->stats.tx_dropped++;
+ goto out;
+ }
+ /*let's force to non-packing and get a new SBAL*/
+ flush_cnt += qeth_tso_get_queue_buffer(queue);
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+ card->stats.tx_dropped++;
+ goto out;
+ }
+ flush_cnt += qeth_tso_fill_buffer(buffer, skb);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+out:
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ if (flush_cnt)
+ qeth_flush_buffers(queue, 0, start_index, flush_cnt);
+ /*do some statistics */
+ card->stats.tx_packets++;
+ card->stats.tx_bytes += skb->len;
+ return 0;
+}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
new file mode 100644
index 000000000000..83504dee3f57
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.h
@@ -0,0 +1,58 @@
+/*
+ * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $)
+ *
+ * Header file for qeth TCP Segmentation Offload support.
+ *
+ * Copyright 2004 IBM Corporation
+ *
+ * Author(s): Frank Pavlic <pavlic@de.ibm.com>
+ *
+ * $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $
+ *
+ */
+#ifndef __QETH_TSO_H__
+#define __QETH_TSO_H__
+
+
+extern int
+qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
+ struct qeth_qdio_out_q *, int , int);
+
+struct qeth_hdr_ext_tso {
+ __u16 hdr_tot_len;
+ __u8 imb_hdr_no;
+ __u8 reserved;
+ __u8 hdr_type;
+ __u8 hdr_version;
+ __u16 hdr_len;
+ __u32 payload_len;
+ __u16 mss;
+ __u16 dg_hdr_len;
+ __u8 padding[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_tso {
+ struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
+ struct qeth_hdr_ext_tso ext;
+} __attribute__ ((packed));
+
+/*some helper functions*/
+
+static inline int
+qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
+{
+ int elements_needed = 0;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ elements_needed = (skb_shinfo(skb)->nr_frags + 1);
+ if (elements_needed == 0 )
+ elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ + skb->len) >> PAGE_SHIFT);
+ if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
+ PRINT_ERR("qeth_do_send_packet: invalid size of "
+ "IP packet. Discarded.");
+ return 0;
+ }
+ return elements_needed;
+}
+#endif /* __QETH_TSO_H__ */
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
new file mode 100644
index 000000000000..a3d285859564
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.c
@@ -0,0 +1,180 @@
+/*
+ * IUCV special message driver
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+
+#include "iucv.h"
+
+struct smsg_callback {
+ struct list_head list;
+ char *prefix;
+ int len;
+ void (*callback)(char *str);
+};
+
+MODULE_AUTHOR
+ ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
+
+static iucv_handle_t smsg_handle;
+static unsigned short smsg_pathid;
+static DEFINE_SPINLOCK(smsg_list_lock);
+static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
+
+static void
+smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data)
+{
+}
+
+
+static void
+smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
+{
+ struct smsg_callback *cb;
+ unsigned char *msg;
+ unsigned short len;
+ int rc;
+
+ len = eib->ln1msg2.ipbfln1f;
+ msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA);
+ if (!msg) {
+ iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
+ return;
+ }
+ rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls,
+ msg, len, 0, 0, 0);
+ if (rc == 0) {
+ msg[len] = 0;
+ EBCASC(msg, len);
+ spin_lock(&smsg_list_lock);
+ list_for_each_entry(cb, &smsg_list, list)
+ if (strncmp(msg + 8, cb->prefix, cb->len) == 0) {
+ cb->callback(msg + 8);
+ break;
+ }
+ spin_unlock(&smsg_list_lock);
+ }
+ kfree(msg);
+}
+
+static iucv_interrupt_ops_t smsg_ops = {
+ .ConnectionComplete = smsg_connection_complete,
+ .MessagePending = smsg_message_pending,
+};
+
+static struct device_driver smsg_driver = {
+ .name = "SMSGIUCV",
+ .bus = &iucv_bus,
+};
+
+int
+smsg_register_callback(char *prefix, void (*callback)(char *str))
+{
+ struct smsg_callback *cb;
+
+ cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
+ cb->prefix = prefix;
+ cb->len = strlen(prefix);
+ cb->callback = callback;
+ spin_lock(&smsg_list_lock);
+ list_add_tail(&cb->list, &smsg_list);
+ spin_unlock(&smsg_list_lock);
+ return 0;
+}
+
+void
+smsg_unregister_callback(char *prefix, void (*callback)(char *str))
+{
+ struct smsg_callback *cb, *tmp;
+
+ spin_lock(&smsg_list_lock);
+ cb = 0;
+ list_for_each_entry(tmp, &smsg_list, list)
+ if (tmp->callback == callback &&
+ strcmp(tmp->prefix, prefix) == 0) {
+ cb = tmp;
+ list_del(&cb->list);
+ break;
+ }
+ spin_unlock(&smsg_list_lock);
+ kfree(cb);
+}
+
+static void __exit
+smsg_exit(void)
+{
+ if (smsg_handle > 0) {
+ cpcmd("SET SMSG OFF", 0, 0);
+ iucv_sever(smsg_pathid, 0);
+ iucv_unregister_program(smsg_handle);
+ driver_unregister(&smsg_driver);
+ }
+ return;
+}
+
+static int __init
+smsg_init(void)
+{
+ static unsigned char pgmmask[24] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ int rc;
+
+ rc = driver_register(&smsg_driver);
+ if (rc != 0) {
+ printk(KERN_ERR "SMSGIUCV: failed to register driver.\n");
+ return rc;
+ }
+ smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
+ pgmmask, &smsg_ops, 0);
+ if (!smsg_handle) {
+ printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
+ driver_unregister(&smsg_driver);
+ return -EIO; /* better errno ? */
+ }
+ rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0,
+ smsg_handle, 0);
+ if (rc) {
+ printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
+ iucv_unregister_program(smsg_handle);
+ driver_unregister(&smsg_driver);
+ smsg_handle = 0;
+ return -EIO;
+ }
+ cpcmd("SET SMSG IUCV", 0, 0);
+ return 0;
+}
+
+module_init(smsg_init);
+module_exit(smsg_exit);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(smsg_register_callback);
+EXPORT_SYMBOL(smsg_unregister_callback);
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
new file mode 100644
index 000000000000..04cd87152964
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.h
@@ -0,0 +1,10 @@
+/*
+ * IUCV special message driver
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+int smsg_register_callback(char *, void (*)(char *));
+void smsg_unregister_callback(char *, void (*)(char *));
+
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
new file mode 100644
index 000000000000..ffa996c8a908
--- /dev/null
+++ b/drivers/s390/s390mach.c
@@ -0,0 +1,219 @@
+/*
+ * drivers/s390/s390mach.c
+ * S/390 machine check handler
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/workqueue.h>
+
+#include <asm/lowcore.h>
+
+#include "s390mach.h"
+
+#define DBG printk
+// #define DBG(args,...) do {} while (0);
+
+static struct semaphore m_sem;
+
+extern int css_process_crw(int);
+extern int chsc_process_crw(void);
+extern int chp_process_crw(int, int);
+extern void css_reiterate_subchannels(void);
+
+extern struct workqueue_struct *slow_path_wq;
+extern struct work_struct slow_path_work;
+
+static void
+s390_handle_damage(char *msg)
+{
+ printk(KERN_EMERG "%s\n", msg);
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ disabled_wait((unsigned long) __builtin_return_address(0));
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ *
+ * Note : we currently process CRWs for io and chsc subchannels only
+ */
+static int
+s390_collect_crw_info(void *param)
+{
+ struct crw crw;
+ int ccode, ret, slow;
+ struct semaphore *sem;
+
+ sem = (struct semaphore *)param;
+ /* Set a nice name. */
+ daemonize("kmcheck");
+repeat:
+ down_interruptible(sem);
+ slow = 0;
+ while (1) {
+ ccode = stcrw(&crw);
+ if (ccode != 0)
+ break;
+ DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc,
+ crw.erc, crw.rsid);
+ /* Check for overflows. */
+ if (crw.oflw) {
+ pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
+ css_reiterate_subchannels();
+ slow = 1;
+ continue;
+ }
+ switch (crw.rsc) {
+ case CRW_RSC_SCH:
+ pr_debug("source is subchannel %04X\n", crw.rsid);
+ ret = css_process_crw (crw.rsid);
+ if (ret == -EAGAIN)
+ slow = 1;
+ break;
+ case CRW_RSC_MONITOR:
+ pr_debug("source is monitoring facility\n");
+ break;
+ case CRW_RSC_CPATH:
+ pr_debug("source is channel path %02X\n", crw.rsid);
+ switch (crw.erc) {
+ case CRW_ERC_IPARM: /* Path has come. */
+ ret = chp_process_crw(crw.rsid, 1);
+ break;
+ case CRW_ERC_PERRI: /* Path has gone. */
+ case CRW_ERC_PERRN:
+ ret = chp_process_crw(crw.rsid, 0);
+ break;
+ default:
+ pr_debug("Don't know how to handle erc=%x\n",
+ crw.erc);
+ ret = 0;
+ }
+ if (ret == -EAGAIN)
+ slow = 1;
+ break;
+ case CRW_RSC_CONFIG:
+ pr_debug("source is configuration-alert facility\n");
+ break;
+ case CRW_RSC_CSS:
+ pr_debug("source is channel subsystem\n");
+ ret = chsc_process_crw();
+ if (ret == -EAGAIN)
+ slow = 1;
+ break;
+ default:
+ pr_debug("unknown source\n");
+ break;
+ }
+ }
+ if (slow)
+ queue_work(slow_path_wq, &slow_path_work);
+ goto repeat;
+ return 0;
+}
+
+/*
+ * machine check handler.
+ */
+void
+s390_do_machine_check(void)
+{
+ struct mci *mci;
+
+ mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
+
+ if (mci->sd) /* system damage */
+ s390_handle_damage("received system damage machine check\n");
+
+ if (mci->pd) /* instruction processing damage */
+ s390_handle_damage("received instruction processing "
+ "damage machine check\n");
+
+ if (mci->se) /* storage error uncorrected */
+ s390_handle_damage("received storage error uncorrected "
+ "machine check\n");
+
+ if (mci->sc) /* storage error corrected */
+ printk(KERN_WARNING
+ "received storage error corrected machine check\n");
+
+ if (mci->ke) /* storage key-error uncorrected */
+ s390_handle_damage("received storage key-error uncorrected "
+ "machine check\n");
+
+ if (mci->ds && mci->fa) /* storage degradation */
+ s390_handle_damage("received storage degradation machine "
+ "check\n");
+
+ if (mci->cp) /* channel report word pending */
+ up(&m_sem);
+
+#ifdef CONFIG_MACHCHK_WARNING
+/*
+ * The warning may remain for a prolonged period on the bare iron.
+ * (actually till the machine is powered off, or until the problem is gone)
+ * So we just stop listening for the WARNING MCH and prevent continuously
+ * being interrupted. One caveat is however, that we must do this per
+ * processor and cannot use the smp version of ctl_clear_bit().
+ * On VM we only get one interrupt per virtally presented machinecheck.
+ * Though one suffices, we may get one interrupt per (virtual) processor.
+ */
+ if (mci->w) { /* WARNING pending ? */
+ static int mchchk_wng_posted = 0;
+ /*
+ * Use single machine clear, as we cannot handle smp right now
+ */
+ __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
+ if (xchg(&mchchk_wng_posted, 1) == 0)
+ kill_proc(1, SIGPWR, 1);
+ }
+#endif
+}
+
+/*
+ * s390_init_machine_check
+ *
+ * initialize machine check handling
+ */
+static int
+machine_check_init(void)
+{
+ init_MUTEX_LOCKED(&m_sem);
+ ctl_clear_bit(14, 25); /* disable damage MCH */
+ ctl_set_bit(14, 26); /* enable degradation MCH */
+ ctl_set_bit(14, 27); /* enable system recovery MCH */
+#ifdef CONFIG_MACHCHK_WARNING
+ ctl_set_bit(14, 24); /* enable warning MCH */
+#endif
+ return 0;
+}
+
+/*
+ * Initialize the machine check handler really early to be able to
+ * catch all machine checks that happen during boot
+ */
+arch_initcall(machine_check_init);
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init
+machine_check_crw_init (void)
+{
+ kernel_thread(s390_collect_crw_info, &m_sem, CLONE_FS|CLONE_FILES);
+ ctl_set_bit(14, 28); /* enable channel report MCH */
+ return 0;
+}
+
+device_initcall (machine_check_crw_init);
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
new file mode 100644
index 000000000000..7e26f0f1b0dc
--- /dev/null
+++ b/drivers/s390/s390mach.h
@@ -0,0 +1,79 @@
+/*
+ * drivers/s390/s390mach.h
+ * S/390 data definitions for machine check processing
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ */
+
+#ifndef __s390mach_h
+#define __s390mach_h
+
+#include <asm/types.h>
+
+struct mci {
+ __u32 sd : 1; /* 00 system damage */
+ __u32 pd : 1; /* 01 instruction-processing damage */
+ __u32 sr : 1; /* 02 system recovery */
+ __u32 to_be_defined_1 : 4; /* 03-06 */
+ __u32 dg : 1; /* 07 degradation */
+ __u32 w : 1; /* 08 warning pending */
+ __u32 cp : 1; /* 09 channel-report pending */
+ __u32 to_be_defined_2 : 6; /* 10-15 */
+ __u32 se : 1; /* 16 storage error uncorrected */
+ __u32 sc : 1; /* 17 storage error corrected */
+ __u32 ke : 1; /* 18 storage-key error uncorrected */
+ __u32 ds : 1; /* 19 storage degradation */
+ __u32 to_be_defined_3 : 4; /* 20-23 */
+ __u32 fa : 1; /* 24 failing storage address validity */
+ __u32 to_be_defined_4 : 7; /* 25-31 */
+ __u32 ie : 1; /* 32 indirect storage error */
+ __u32 to_be_defined_5 : 31; /* 33-63 */
+};
+
+/*
+ * Channel Report Word
+ */
+struct crw {
+ __u32 res1 : 1; /* reserved zero */
+ __u32 slct : 1; /* solicited */
+ __u32 oflw : 1; /* overflow */
+ __u32 chn : 1; /* chained */
+ __u32 rsc : 4; /* reporting source code */
+ __u32 anc : 1; /* ancillary report */
+ __u32 res2 : 1; /* reserved zero */
+ __u32 erc : 6; /* error-recovery code */
+ __u32 rsid : 16; /* reporting-source ID */
+} __attribute__ ((packed));
+
+#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
+#define CRW_RSC_SCH 0x3 /* subchannel */
+#define CRW_RSC_CPATH 0x4 /* channel path */
+#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
+#define CRW_RSC_CSS 0xB /* channel subsystem */
+
+#define CRW_ERC_EVENT 0x00 /* event information pending */
+#define CRW_ERC_AVAIL 0x01 /* available */
+#define CRW_ERC_INIT 0x02 /* initialized */
+#define CRW_ERC_TERROR 0x03 /* temporary error */
+#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
+#define CRW_ERC_TERM 0x05 /* terminal */
+#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
+#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
+#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
+
+extern __inline__ int stcrw(struct crw *pcrw )
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ "STCRW 0(%1)\n\t"
+ "IPM %0\n\t"
+ "SRL %0,28\n\t"
+ : "=d" (ccode) : "a" (pcrw)
+ : "cc", "1" );
+ return ccode;
+}
+
+#endif /* __s390mach */
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
new file mode 100644
index 000000000000..fc145307a7d4
--- /dev/null
+++ b/drivers/s390/scsi/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the S/390 specific device drivers
+#
+
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
+ zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
+ zfcp_sysfs_unit.o zfcp_sysfs_driver.o
+
+obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
new file mode 100644
index 000000000000..6a43322ccb0a
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -0,0 +1,1977 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_aux.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_AUX_REVISION "$Revision: 1.145 $"
+
+#include "zfcp_ext.h"
+
+/* accumulated log level (module parameter) */
+static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
+static char *device;
+/*********************** FUNCTION PROTOTYPES *********************************/
+
+/* written against the module interface */
+static int __init zfcp_module_init(void);
+
+/* FCP related */
+static void zfcp_ns_gid_pn_handler(unsigned long);
+
+/* miscellaneous */
+static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
+static inline void zfcp_sg_list_free(struct zfcp_sg_list *);
+static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
+ void __user *, size_t);
+static inline int zfcp_sg_list_copy_to_user(void __user *,
+ struct zfcp_sg_list *, size_t);
+
+static int zfcp_cfdc_dev_ioctl(struct inode *, struct file *,
+ unsigned int, unsigned long);
+
+#define ZFCP_CFDC_IOC_MAGIC 0xDD
+#define ZFCP_CFDC_IOC \
+ _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
+
+#ifdef CONFIG_COMPAT
+static struct ioctl_trans zfcp_ioctl_trans = {ZFCP_CFDC_IOC, (void*) sys_ioctl};
+#endif
+
+static struct file_operations zfcp_cfdc_fops = {
+ .ioctl = zfcp_cfdc_dev_ioctl
+};
+
+static struct miscdevice zfcp_cfdc_misc = {
+ .minor = ZFCP_CFDC_DEV_MINOR,
+ .name = ZFCP_CFDC_DEV_NAME,
+ .fops = &zfcp_cfdc_fops
+};
+
+/*********************** KERNEL/MODULE PARAMETERS ***************************/
+
+/* declare driver module init/cleanup functions */
+module_init(zfcp_module_init);
+
+MODULE_AUTHOR("Heiko Carstens <heiko.carstens@de.ibm.com>, "
+ "Andreas Herrman <aherrman@de.ibm.com>, "
+ "Martin Peschke <mpeschke@de.ibm.com>, "
+ "Raimund Schroeder <raimund.schroeder@de.ibm.com>, "
+ "Wolfgang Taphorn <taphorn@de.ibm.com>, "
+ "Aron Zeh <arzeh@de.ibm.com>, "
+ "IBM Deutschland Entwicklung GmbH");
+MODULE_DESCRIPTION
+ ("FCP (SCSI over Fibre Channel) HBA driver for IBM eServer zSeries");
+MODULE_LICENSE("GPL");
+
+module_param(device, charp, 0);
+MODULE_PARM_DESC(device, "specify initial device");
+
+module_param(loglevel, uint, 0);
+MODULE_PARM_DESC(loglevel,
+ "log levels, 8 nibbles: "
+ "FC ERP QDIO CIO Config FSF SCSI Other, "
+ "levels: 0=none 1=normal 2=devel 3=trace");
+
+#ifdef ZFCP_PRINT_FLAGS
+u32 flags_dump = 0;
+module_param(flags_dump, uint, 0);
+#endif
+
+/****************************************************************/
+/************** Functions without logging ***********************/
+/****************************************************************/
+
+void
+_zfcp_hex_dump(char *addr, int count)
+{
+ int i;
+ for (i = 0; i < count; i++) {
+ printk("%02x", addr[i]);
+ if ((i % 4) == 3)
+ printk(" ");
+ if ((i % 32) == 31)
+ printk("\n");
+ }
+ if (((i-1) % 32) != 31)
+ printk("\n");
+}
+
+/****************************************************************/
+/************** Uncategorised Functions *************************/
+/****************************************************************/
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
+
+static inline int
+zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
+{
+ return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
+ !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
+}
+
+void
+zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
+ void *add_data, int add_length)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct scsi_cmnd *scsi_cmnd;
+ int level = 3;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dbf_lock, flags);
+ if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
+ scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
+ debug_text_event(adapter->cmd_dbf, level, "fsferror");
+ debug_text_event(adapter->cmd_dbf, level, text);
+ debug_event(adapter->cmd_dbf, level, &fsf_req,
+ sizeof (unsigned long));
+ debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
+ sizeof (u32));
+ debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
+ sizeof (unsigned long));
+ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
+ min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
+ for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
+ debug_event(adapter->cmd_dbf,
+ level,
+ (char *) add_data + i,
+ min(ZFCP_CMD_DBF_LENGTH, add_length - i));
+ }
+ spin_unlock_irqrestore(&adapter->dbf_lock, flags);
+}
+
+/* XXX additionally log unit if available */
+/* ---> introduce new parameter for unit, see 2.4 code */
+void
+zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
+{
+ struct zfcp_adapter *adapter;
+ union zfcp_req_data *req_data;
+ struct zfcp_fsf_req *fsf_req;
+ int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
+ unsigned long flags;
+
+ adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
+ req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
+ fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
+ spin_lock_irqsave(&adapter->dbf_lock, flags);
+ debug_text_event(adapter->cmd_dbf, level, "hostbyte");
+ debug_text_event(adapter->cmd_dbf, level, text);
+ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
+ debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
+ sizeof (unsigned long));
+ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
+ min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
+ if (likely(fsf_req)) {
+ debug_event(adapter->cmd_dbf, level, &fsf_req,
+ sizeof (unsigned long));
+ debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
+ sizeof (u32));
+ } else {
+ debug_text_event(adapter->cmd_dbf, level, "");
+ debug_text_event(adapter->cmd_dbf, level, "");
+ }
+ spin_unlock_irqrestore(&adapter->dbf_lock, flags);
+}
+
+void
+zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
+ struct fsf_status_read_buffer *status_buffer, int length)
+{
+ int level = 1;
+ int i;
+
+ debug_text_event(adapter->in_els_dbf, level, text);
+ debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
+ for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
+ debug_event(adapter->in_els_dbf,
+ level,
+ (char *) status_buffer->payload + i,
+ min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
+}
+
+/**
+ * zfcp_device_setup - setup function
+ * @str: pointer to parameter string
+ *
+ * Parse "device=..." parameter string.
+ */
+static int __init
+zfcp_device_setup(char *str)
+{
+ char *tmp;
+
+ if (!str)
+ return 0;
+
+ tmp = strchr(str, ',');
+ if (!tmp)
+ goto err_out;
+ *tmp++ = '\0';
+ strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE);
+ zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0';
+
+ zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0);
+ if (*tmp++ != ',')
+ goto err_out;
+ if (*tmp == '\0')
+ goto err_out;
+
+ zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0);
+ if (*tmp != '\0')
+ goto err_out;
+ return 1;
+
+ err_out:
+ ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str);
+ return 0;
+}
+
+static void __init
+zfcp_init_device_configure(void)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+
+ down(&zfcp_data.config_sema);
+ read_lock_irq(&zfcp_data.config_lock);
+ adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid);
+ if (adapter)
+ zfcp_adapter_get(adapter);
+ read_unlock_irq(&zfcp_data.config_lock);
+
+ if (adapter == NULL)
+ goto out_adapter;
+ port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
+ if (!port)
+ goto out_port;
+ unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
+ if (!unit)
+ goto out_unit;
+ up(&zfcp_data.config_sema);
+ ccw_device_set_online(adapter->ccw_device);
+ zfcp_erp_wait(adapter);
+ down(&zfcp_data.config_sema);
+ zfcp_unit_put(unit);
+ out_unit:
+ zfcp_port_put(port);
+ out_port:
+ zfcp_adapter_put(adapter);
+ out_adapter:
+ up(&zfcp_data.config_sema);
+ return;
+}
+
+static int __init
+zfcp_module_init(void)
+{
+
+ int retval = 0;
+
+ atomic_set(&zfcp_data.loglevel, loglevel);
+
+ /* initialize adapter list */
+ INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
+
+ /* initialize adapters to be removed list head */
+ INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
+
+ zfcp_transport_template = fc_attach_transport(&zfcp_transport_functions);
+ if (!zfcp_transport_template)
+ return -ENODEV;
+
+ retval = register_ioctl32_conversion(zfcp_ioctl_trans.cmd,
+ zfcp_ioctl_trans.handler);
+ if (retval != 0) {
+ ZFCP_LOG_INFO("registration of ioctl32 conversion failed\n");
+ goto out;
+ }
+
+ retval = misc_register(&zfcp_cfdc_misc);
+ if (retval != 0) {
+ ZFCP_LOG_INFO("registration of misc device "
+ "zfcp_cfdc failed\n");
+ goto out_misc_register;
+ } else {
+ ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
+ ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
+ }
+
+ /* Initialise proc semaphores */
+ sema_init(&zfcp_data.config_sema, 1);
+
+ /* initialise configuration rw lock */
+ rwlock_init(&zfcp_data.config_lock);
+
+ /* save address of data structure managing the driver module */
+ zfcp_data.scsi_host_template.module = THIS_MODULE;
+
+ /* setup dynamic I/O */
+ retval = zfcp_ccw_register();
+ if (retval) {
+ ZFCP_LOG_NORMAL("registration with common I/O layer failed\n");
+ goto out_ccw_register;
+ }
+
+ if (zfcp_device_setup(device))
+ zfcp_init_device_configure();
+
+ goto out;
+
+ out_ccw_register:
+ misc_deregister(&zfcp_cfdc_misc);
+ out_misc_register:
+ unregister_ioctl32_conversion(zfcp_ioctl_trans.cmd);
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_cfdc_dev_ioctl
+ *
+ * purpose: Handle control file upload/download transaction via IOCTL
+ * interface
+ *
+ * returns: 0 - Operation completed successfuly
+ * -ENOTTY - Unknown IOCTL command
+ * -EINVAL - Invalid sense data record
+ * -ENXIO - The FCP adapter is not available
+ * -EOPNOTSUPP - The FCP adapter does not have CFDC support
+ * -ENOMEM - Insufficient memory
+ * -EFAULT - User space memory I/O operation fault
+ * -EPERM - Cannot create or queue FSF request or create SBALs
+ * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
+ */
+static int
+zfcp_cfdc_dev_ioctl(struct inode *inode, struct file *file,
+ unsigned int command, unsigned long buffer)
+{
+ struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
+ struct zfcp_adapter *adapter = NULL;
+ struct zfcp_fsf_req *fsf_req = NULL;
+ struct zfcp_sg_list *sg_list = NULL;
+ u32 fsf_command, option;
+ char *bus_id = NULL;
+ int retval = 0;
+
+ sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL);
+ if (sense_data == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ sg_list = kmalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL);
+ if (sg_list == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(sg_list, 0, sizeof(*sg_list));
+
+ if (command != ZFCP_CFDC_IOC) {
+ ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command);
+ retval = -ENOTTY;
+ goto out;
+ }
+
+ if ((sense_data_user = (void __user *) buffer) == NULL) {
+ ZFCP_LOG_INFO("sense data record is required\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ retval = copy_from_user(sense_data, sense_data_user,
+ sizeof(struct zfcp_cfdc_sense_data));
+ if (retval) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
+ ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n",
+ ZFCP_CFDC_SIGNATURE);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ switch (sense_data->command) {
+
+ case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
+ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+ option = FSF_CFDC_OPTION_NORMAL_MODE;
+ break;
+
+ case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
+ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+ option = FSF_CFDC_OPTION_FORCE;
+ break;
+
+ case ZFCP_CFDC_CMND_FULL_ACCESS:
+ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+ option = FSF_CFDC_OPTION_FULL_ACCESS;
+ break;
+
+ case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
+ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+ option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
+ break;
+
+ case ZFCP_CFDC_CMND_UPLOAD:
+ fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
+ option = 0;
+ break;
+
+ default:
+ ZFCP_LOG_INFO("invalid command code 0x%08x\n",
+ sense_data->command);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
+ if (bus_id == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x",
+ (sense_data->devno >> 24),
+ (sense_data->devno >> 16) & 0xFF,
+ (sense_data->devno & 0xFFFF));
+
+ read_lock_irq(&zfcp_data.config_lock);
+ adapter = zfcp_get_adapter_by_busid(bus_id);
+ if (adapter)
+ zfcp_adapter_get(adapter);
+ read_unlock_irq(&zfcp_data.config_lock);
+
+ kfree(bus_id);
+
+ if (adapter == NULL) {
+ ZFCP_LOG_INFO("invalid adapter\n");
+ retval = -ENXIO;
+ goto out;
+ }
+
+ if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
+ retval = zfcp_sg_list_alloc(sg_list,
+ ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
+ if (retval) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
+ (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
+ retval = zfcp_sg_list_copy_from_user(
+ sg_list, &sense_data_user->control_file,
+ ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
+ if (retval) {
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command,
+ option, sg_list);
+ if (retval)
+ goto out;
+
+ if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
+ memcpy(&sense_data->fsf_status_qual,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof(union fsf_status_qual));
+ memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
+
+ retval = copy_to_user(sense_data_user, sense_data,
+ sizeof(struct zfcp_cfdc_sense_data));
+ if (retval) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (sense_data->command & ZFCP_CFDC_UPLOAD) {
+ retval = zfcp_sg_list_copy_to_user(
+ &sense_data_user->control_file, sg_list,
+ ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
+ if (retval) {
+ retval = -EFAULT;
+ goto out;
+ }
+ }
+
+ out:
+ if (fsf_req != NULL)
+ zfcp_fsf_req_cleanup(fsf_req);
+
+ if ((adapter != NULL) && (retval != -ENXIO))
+ zfcp_adapter_put(adapter);
+
+ if (sg_list != NULL) {
+ zfcp_sg_list_free(sg_list);
+ kfree(sg_list);
+ }
+
+ if (sense_data != NULL)
+ kfree(sense_data);
+
+ return retval;
+}
+
+
+/**
+ * zfcp_sg_list_alloc - create a scatter-gather list of the specified size
+ * @sg_list: structure describing a scatter gather list
+ * @size: size of scatter-gather list
+ * Return: 0 on success, else -ENOMEM
+ *
+ * In sg_list->sg a pointer to the created scatter-gather list is returned,
+ * or NULL if we run out of memory. sg_list->count specifies the number of
+ * elements of the scatter-gather list. The maximum size of a single element
+ * in the scatter-gather list is PAGE_SIZE.
+ */
+static inline int
+zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+ int retval = 0;
+ void *address;
+
+ BUG_ON(sg_list == NULL);
+
+ sg_list->count = size >> PAGE_SHIFT;
+ if (size & ~PAGE_MASK)
+ sg_list->count++;
+ sg_list->sg = kmalloc(sg_list->count * sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (sg_list->sg == NULL) {
+ sg_list->count = 0;
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(sg_list->sg, 0, sg_list->count * sizeof(struct scatterlist));
+
+ for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
+ sg->length = min(size, PAGE_SIZE);
+ sg->offset = 0;
+ address = (void *) get_zeroed_page(GFP_KERNEL);
+ if (address == NULL) {
+ sg_list->count = i;
+ zfcp_sg_list_free(sg_list);
+ retval = -ENOMEM;
+ goto out;
+ }
+ zfcp_address_to_sg(address, sg);
+ size -= sg->length;
+ }
+
+ out:
+ return retval;
+}
+
+
+/**
+ * zfcp_sg_list_free - free memory of a scatter-gather list
+ * @sg_list: structure describing a scatter-gather list
+ *
+ * Memory for each element in the scatter-gather list is freed.
+ * Finally sg_list->sg is freed itself and sg_list->count is reset.
+ */
+static inline void
+zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+
+ BUG_ON(sg_list == NULL);
+
+ for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
+ free_page((unsigned long) zfcp_sg_to_address(sg));
+
+ sg_list->count = 0;
+ kfree(sg_list->sg);
+}
+
+/**
+ * zfcp_sg_size - determine size of a scatter-gather list
+ * @sg: array of (struct scatterlist)
+ * @sg_count: elements in array
+ * Return: size of entire scatter-gather list
+ */
+size_t
+zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
+{
+ unsigned int i;
+ struct scatterlist *p;
+ size_t size;
+
+ size = 0;
+ for (i = 0, p = sg; i < sg_count; i++, p++) {
+ BUG_ON(p == NULL);
+ size += p->length;
+ }
+
+ return size;
+}
+
+
+/**
+ * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list
+ * @sg_list: structure describing a scatter-gather list
+ * @user_buffer: pointer to buffer in user space
+ * @size: number of bytes to be copied
+ * Return: 0 on success, -EFAULT if copy_from_user fails.
+ */
+static inline int
+zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
+ void __user *user_buffer,
+ size_t size)
+{
+ struct scatterlist *sg;
+ unsigned int length;
+ void *zfcp_buffer;
+ int retval = 0;
+
+ BUG_ON(sg_list == NULL);
+
+ if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
+ return -EFAULT;
+
+ for (sg = sg_list->sg; size > 0; sg++) {
+ length = min((unsigned int)size, sg->length);
+ zfcp_buffer = zfcp_sg_to_address(sg);
+ if (copy_from_user(zfcp_buffer, user_buffer, length)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ user_buffer += length;
+ size -= length;
+ }
+
+ out:
+ return retval;
+}
+
+
+/**
+ * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space
+ * @user_buffer: pointer to buffer in user space
+ * @sg_list: structure describing a scatter-gather list
+ * @size: number of bytes to be copied
+ * Return: 0 on success, -EFAULT if copy_to_user fails
+ */
+static inline int
+zfcp_sg_list_copy_to_user(void __user *user_buffer,
+ struct zfcp_sg_list *sg_list,
+ size_t size)
+{
+ struct scatterlist *sg;
+ unsigned int length;
+ void *zfcp_buffer;
+ int retval = 0;
+
+ BUG_ON(sg_list == NULL);
+
+ if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
+ return -EFAULT;
+
+ for (sg = sg_list->sg; size > 0; sg++) {
+ length = min((unsigned int) size, sg->length);
+ zfcp_buffer = zfcp_sg_to_address(sg);
+ if (copy_to_user(user_buffer, zfcp_buffer, length)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ user_buffer += length;
+ size -= length;
+ }
+
+ out:
+ return retval;
+}
+
+
+#undef ZFCP_LOG_AREA
+
+/****************************************************************/
+/****** Functions for configuration/set-up of structures ********/
+/****************************************************************/
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+/**
+ * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
+ * @port: pointer to port to search for unit
+ * @fcp_lun: FCP LUN to search for
+ * Traverse list of all units of a port and return pointer to a unit
+ * with the given FCP LUN.
+ */
+struct zfcp_unit *
+zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun)
+{
+ struct zfcp_unit *unit;
+ int found = 0;
+
+ list_for_each_entry(unit, &port->unit_list_head, list) {
+ if ((unit->fcp_lun == fcp_lun) &&
+ !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status))
+ {
+ found = 1;
+ break;
+ }
+ }
+ return found ? unit : NULL;
+}
+
+/**
+ * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
+ * @adapter: pointer to adapter to search for port
+ * @wwpn: wwpn to search for
+ * Traverse list of all ports of an adapter and return pointer to a port
+ * with the given wwpn.
+ */
+struct zfcp_port *
+zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn)
+{
+ struct zfcp_port *port;
+ int found = 0;
+
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if ((port->wwpn == wwpn) &&
+ !(atomic_read(&port->status) &
+ (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) {
+ found = 1;
+ break;
+ }
+ }
+ return found ? port : NULL;
+}
+
+/**
+ * zfcp_get_port_by_did - find port in port list of adapter by d_id
+ * @adapter: pointer to adapter to search for port
+ * @d_id: d_id to search for
+ * Traverse list of all ports of an adapter and return pointer to a port
+ * with the given d_id.
+ */
+struct zfcp_port *
+zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id)
+{
+ struct zfcp_port *port;
+ int found = 0;
+
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if ((port->d_id == d_id) &&
+ !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
+ {
+ found = 1;
+ break;
+ }
+ }
+ return found ? port : NULL;
+}
+
+/**
+ * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id
+ * @bus_id: bus_id to search for
+ * Traverse list of all adapters and return pointer to an adapter
+ * with the given bus_id.
+ */
+struct zfcp_adapter *
+zfcp_get_adapter_by_busid(char *bus_id)
+{
+ struct zfcp_adapter *adapter;
+ int found = 0;
+
+ list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) {
+ if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter),
+ BUS_ID_SIZE) == 0) &&
+ !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE,
+ &adapter->status)){
+ found = 1;
+ break;
+ }
+ }
+ return found ? adapter : NULL;
+}
+
+/**
+ * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * @port: pointer to port where unit is added
+ * @fcp_lun: FCP LUN of unit to be enqueued
+ * Return: pointer to enqueued unit on success, NULL on error
+ * Locks: config_sema must be held to serialize changes to the unit list
+ *
+ * Sets up some unit internal structures and creates sysfs entry.
+ */
+struct zfcp_unit *
+zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
+{
+ struct zfcp_unit *unit, *tmp_unit;
+ scsi_lun_t scsi_lun;
+ int found;
+
+ /*
+ * check that there is no unit with this FCP_LUN already in list
+ * and enqueue it.
+ * Note: Unlike for the adapter and the port, this is an error
+ */
+ read_lock_irq(&zfcp_data.config_lock);
+ unit = zfcp_get_unit_by_lun(port, fcp_lun);
+ read_unlock_irq(&zfcp_data.config_lock);
+ if (unit)
+ return NULL;
+
+ unit = kmalloc(sizeof (struct zfcp_unit), GFP_KERNEL);
+ if (!unit)
+ return NULL;
+ memset(unit, 0, sizeof (struct zfcp_unit));
+
+ /* initialise reference count stuff */
+ atomic_set(&unit->refcount, 0);
+ init_waitqueue_head(&unit->remove_wq);
+
+ unit->port = port;
+ unit->fcp_lun = fcp_lun;
+
+ /* setup for sysfs registration */
+ snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
+ unit->sysfs_device.parent = &port->sysfs_device;
+ unit->sysfs_device.release = zfcp_sysfs_unit_release;
+ dev_set_drvdata(&unit->sysfs_device, unit);
+
+ /* mark unit unusable as long as sysfs registration is not complete */
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+
+ if (device_register(&unit->sysfs_device)) {
+ kfree(unit);
+ return NULL;
+ }
+
+ if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) {
+ device_unregister(&unit->sysfs_device);
+ return NULL;
+ }
+
+ zfcp_unit_get(unit);
+
+ scsi_lun = 0;
+ found = 0;
+ write_lock_irq(&zfcp_data.config_lock);
+ list_for_each_entry(tmp_unit, &port->unit_list_head, list) {
+ if (tmp_unit->scsi_lun != scsi_lun) {
+ found = 1;
+ break;
+ }
+ scsi_lun++;
+ }
+ unit->scsi_lun = scsi_lun;
+ if (found)
+ list_add_tail(&unit->list, &tmp_unit->list);
+ else
+ list_add_tail(&unit->list, &port->unit_list_head);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ port->units++;
+ zfcp_port_get(port);
+
+ return unit;
+}
+
+void
+zfcp_unit_dequeue(struct zfcp_unit *unit)
+{
+ zfcp_unit_wait(unit);
+ write_lock_irq(&zfcp_data.config_lock);
+ list_del(&unit->list);
+ write_unlock_irq(&zfcp_data.config_lock);
+ unit->port->units--;
+ zfcp_port_put(unit->port);
+ zfcp_sysfs_unit_remove_files(&unit->sysfs_device);
+ device_unregister(&unit->sysfs_device);
+}
+
+static void *
+zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
+{
+ return kmalloc((size_t) size, gfp_mask);
+}
+
+static void
+zfcp_mempool_free(void *element, void *size)
+{
+ kfree(element);
+}
+
+/*
+ * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
+ * commands.
+ * It also genrates fcp-nameserver request/response buffer and unsolicited
+ * status read fsf_req buffers.
+ *
+ * locks: must only be called with zfcp_data.config_sema taken
+ */
+static int
+zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+ adapter->pool.fsf_req_erp =
+ mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free, (void *)
+ sizeof(struct zfcp_fsf_req_pool_element));
+
+ if (NULL == adapter->pool.fsf_req_erp)
+ return -ENOMEM;
+
+ adapter->pool.fsf_req_scsi =
+ mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free, (void *)
+ sizeof(struct zfcp_fsf_req_pool_element));
+
+ if (NULL == adapter->pool.fsf_req_scsi)
+ return -ENOMEM;
+
+ adapter->pool.fsf_req_abort =
+ mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free, (void *)
+ sizeof(struct zfcp_fsf_req_pool_element));
+
+ if (NULL == adapter->pool.fsf_req_abort)
+ return -ENOMEM;
+
+ adapter->pool.fsf_req_status_read =
+ mempool_create(ZFCP_POOL_STATUS_READ_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free,
+ (void *) sizeof(struct zfcp_fsf_req));
+
+ if (NULL == adapter->pool.fsf_req_status_read)
+ return -ENOMEM;
+
+ adapter->pool.data_status_read =
+ mempool_create(ZFCP_POOL_STATUS_READ_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free,
+ (void *) sizeof(struct fsf_status_read_buffer));
+
+ if (NULL == adapter->pool.data_status_read)
+ return -ENOMEM;
+
+ adapter->pool.data_gid_pn =
+ mempool_create(ZFCP_POOL_DATA_GID_PN_NR,
+ zfcp_mempool_alloc, zfcp_mempool_free, (void *)
+ sizeof(struct zfcp_gid_pn_data));
+
+ if (NULL == adapter->pool.data_gid_pn)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * zfcp_free_low_mem_buffers - free memory pools of an adapter
+ * @adapter: pointer to zfcp_adapter for which memory pools should be freed
+ * locking: zfcp_data.config_sema must be held
+ */
+static void
+zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+ if (adapter->pool.fsf_req_erp)
+ mempool_destroy(adapter->pool.fsf_req_erp);
+ if (adapter->pool.fsf_req_scsi)
+ mempool_destroy(adapter->pool.fsf_req_scsi);
+ if (adapter->pool.fsf_req_abort)
+ mempool_destroy(adapter->pool.fsf_req_abort);
+ if (adapter->pool.fsf_req_status_read)
+ mempool_destroy(adapter->pool.fsf_req_status_read);
+ if (adapter->pool.data_status_read)
+ mempool_destroy(adapter->pool.data_status_read);
+ if (adapter->pool.data_gid_pn)
+ mempool_destroy(adapter->pool.data_gid_pn);
+}
+
+/**
+ * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be registered
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int
+zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
+{
+ char dbf_name[20];
+
+ /* debug feature area which records SCSI command failures (hostbyte) */
+ spin_lock_init(&adapter->dbf_lock);
+
+ sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
+ zfcp_get_busid_by_adapter(adapter));
+ adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
+ ZFCP_CMD_DBF_AREAS,
+ ZFCP_CMD_DBF_LENGTH);
+ debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
+
+ /* debug feature area which records SCSI command aborts */
+ sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
+ zfcp_get_busid_by_adapter(adapter));
+ adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
+ ZFCP_ABORT_DBF_AREAS,
+ ZFCP_ABORT_DBF_LENGTH);
+ debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
+
+ /* debug feature area which records incoming ELS commands */
+ sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
+ zfcp_get_busid_by_adapter(adapter));
+ adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
+ ZFCP_IN_ELS_DBF_AREAS,
+ ZFCP_IN_ELS_DBF_LENGTH);
+ debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
+
+ /* debug feature area which records erp events */
+ sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
+ zfcp_get_busid_by_adapter(adapter));
+ adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
+ ZFCP_ERP_DBF_AREAS,
+ ZFCP_ERP_DBF_LENGTH);
+ debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
+ debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
+
+ if (!(adapter->cmd_dbf && adapter->abort_dbf &&
+ adapter->in_els_dbf && adapter->erp_dbf)) {
+ zfcp_adapter_debug_unregister(adapter);
+ return -ENOMEM;
+ }
+
+ return 0;
+
+}
+
+/**
+ * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be unregistered
+ */
+void
+zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
+{
+ debug_unregister(adapter->abort_dbf);
+ debug_unregister(adapter->cmd_dbf);
+ debug_unregister(adapter->erp_dbf);
+ debug_unregister(adapter->in_els_dbf);
+ adapter->abort_dbf = NULL;
+ adapter->cmd_dbf = NULL;
+ adapter->erp_dbf = NULL;
+ adapter->in_els_dbf = NULL;
+}
+
+void
+zfcp_dummy_release(struct device *dev)
+{
+ return;
+}
+
+/*
+ * Enqueues an adapter at the end of the adapter list in the driver data.
+ * All adapter internal structures are set up.
+ * Proc-fs entries are also created.
+ *
+ * returns: 0 if a new adapter was successfully enqueued
+ * ZFCP_KNOWN if an adapter with this devno was already present
+ * -ENOMEM if alloc failed
+ * locks: config_sema must be held to serialise changes to the adapter list
+ */
+struct zfcp_adapter *
+zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter;
+
+ /*
+ * Note: It is safe to release the list_lock, as any list changes
+ * are protected by the config_sema, which must be held to get here
+ */
+
+ /* try to allocate new adapter data structure (zeroed) */
+ adapter = kmalloc(sizeof (struct zfcp_adapter), GFP_KERNEL);
+ if (!adapter) {
+ ZFCP_LOG_INFO("error: allocation of base adapter "
+ "structure failed\n");
+ goto out;
+ }
+ memset(adapter, 0, sizeof (struct zfcp_adapter));
+
+ ccw_device->handler = NULL;
+
+ /* save ccw_device pointer */
+ adapter->ccw_device = ccw_device;
+
+ retval = zfcp_qdio_allocate_queues(adapter);
+ if (retval)
+ goto queues_alloc_failed;
+
+ retval = zfcp_qdio_allocate(adapter);
+ if (retval)
+ goto qdio_allocate_failed;
+
+ retval = zfcp_allocate_low_mem_buffers(adapter);
+ if (retval) {
+ ZFCP_LOG_INFO("error: pool allocation failed\n");
+ goto failed_low_mem_buffers;
+ }
+
+ /* initialise reference count stuff */
+ atomic_set(&adapter->refcount, 0);
+ init_waitqueue_head(&adapter->remove_wq);
+
+ /* initialise list of ports */
+ INIT_LIST_HEAD(&adapter->port_list_head);
+
+ /* initialise list of ports to be removed */
+ INIT_LIST_HEAD(&adapter->port_remove_lh);
+
+ /* initialize list of fsf requests */
+ rwlock_init(&adapter->fsf_req_list_lock);
+ INIT_LIST_HEAD(&adapter->fsf_req_list_head);
+
+ /* initialize abort lock */
+ rwlock_init(&adapter->abort_lock);
+
+ /* initialise some erp stuff */
+ init_waitqueue_head(&adapter->erp_thread_wqh);
+ init_waitqueue_head(&adapter->erp_done_wqh);
+
+ /* initialize lock of associated request queue */
+ rwlock_init(&adapter->request_queue.queue_lock);
+
+ /* intitialise SCSI ER timer */
+ init_timer(&adapter->scsi_er_timer);
+
+ /* set FC service class used per default */
+ adapter->fc_service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
+
+ sprintf(adapter->name, "%s", zfcp_get_busid_by_adapter(adapter));
+ ASCEBC(adapter->name, strlen(adapter->name));
+
+ /* mark adapter unusable as long as sysfs registration is not complete */
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+
+ adapter->ccw_device = ccw_device;
+ dev_set_drvdata(&ccw_device->dev, adapter);
+
+ if (zfcp_sysfs_adapter_create_files(&ccw_device->dev))
+ goto sysfs_failed;
+
+ adapter->generic_services.parent = &adapter->ccw_device->dev;
+ adapter->generic_services.release = zfcp_dummy_release;
+ snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
+ "generic_services");
+
+ if (device_register(&adapter->generic_services))
+ goto generic_services_failed;
+
+ /* put allocated adapter at list tail */
+ write_lock_irq(&zfcp_data.config_lock);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+ list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ zfcp_data.adapters++;
+
+ goto out;
+
+ generic_services_failed:
+ zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
+ sysfs_failed:
+ dev_set_drvdata(&ccw_device->dev, NULL);
+ failed_low_mem_buffers:
+ zfcp_free_low_mem_buffers(adapter);
+ if (qdio_free(ccw_device) != 0)
+ ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ qdio_allocate_failed:
+ zfcp_qdio_free_queues(adapter);
+ queues_alloc_failed:
+ kfree(adapter);
+ adapter = NULL;
+ out:
+ return adapter;
+}
+
+/*
+ * returns: 0 - struct zfcp_adapter data structure successfully removed
+ * !0 - struct zfcp_adapter data structure could not be removed
+ * (e.g. still used)
+ * locks: adapter list write lock is assumed to be held by caller
+ * adapter->fsf_req_list_lock is taken and released within this
+ * function and must not be held on entry
+ */
+void
+zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ device_unregister(&adapter->generic_services);
+ zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
+ dev_set_drvdata(&adapter->ccw_device->dev, NULL);
+ /* sanity check: no pending FSF requests */
+ read_lock_irqsave(&adapter->fsf_req_list_lock, flags);
+ retval = !list_empty(&adapter->fsf_req_list_head);
+ read_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ if (retval) {
+ ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
+ "%i requests outstanding\n",
+ zfcp_get_busid_by_adapter(adapter), adapter,
+ atomic_read(&adapter->fsf_reqs_active));
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* remove specified adapter data structure from list */
+ write_lock_irq(&zfcp_data.config_lock);
+ list_del(&adapter->list);
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ /* decrease number of adapters in list */
+ zfcp_data.adapters--;
+
+ ZFCP_LOG_TRACE("adapter %s (%p) removed from list, "
+ "%i adapters still in list\n",
+ zfcp_get_busid_by_adapter(adapter),
+ adapter, zfcp_data.adapters);
+
+ retval = qdio_free(adapter->ccw_device);
+ if (retval)
+ ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+
+ zfcp_free_low_mem_buffers(adapter);
+ /* free memory of adapter data structure and queues */
+ zfcp_qdio_free_queues(adapter);
+ ZFCP_LOG_TRACE("freeing adapter structure\n");
+ kfree(adapter);
+ out:
+ return;
+}
+
+/**
+ * zfcp_port_enqueue - enqueue port to port list of adapter
+ * @adapter: adapter where remote port is added
+ * @wwpn: WWPN of the remote port to be enqueued
+ * @status: initial status for the port
+ * @d_id: destination id of the remote port to be enqueued
+ * Return: pointer to enqueued port on success, NULL on error
+ * Locks: config_sema must be held to serialize changes to the port list
+ *
+ * All port internal structures are set up and the sysfs entry is generated.
+ * d_id is used to enqueue ports with a well known address like the Directory
+ * Service for nameserver lookup.
+ */
+struct zfcp_port *
+zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
+ u32 d_id)
+{
+ struct zfcp_port *port, *tmp_port;
+ int check_wwpn;
+ scsi_id_t scsi_id;
+ int found;
+
+ check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
+
+ /*
+ * check that there is no port with this WWPN already in list
+ */
+ if (check_wwpn) {
+ read_lock_irq(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ read_unlock_irq(&zfcp_data.config_lock);
+ if (port)
+ return NULL;
+ }
+
+ port = kmalloc(sizeof (struct zfcp_port), GFP_KERNEL);
+ if (!port)
+ return NULL;
+ memset(port, 0, sizeof (struct zfcp_port));
+
+ /* initialise reference count stuff */
+ atomic_set(&port->refcount, 0);
+ init_waitqueue_head(&port->remove_wq);
+
+ INIT_LIST_HEAD(&port->unit_list_head);
+ INIT_LIST_HEAD(&port->unit_remove_lh);
+
+ port->adapter = adapter;
+
+ if (check_wwpn)
+ port->wwpn = wwpn;
+
+ atomic_set_mask(status, &port->status);
+
+ /* setup for sysfs registration */
+ if (status & ZFCP_STATUS_PORT_WKA) {
+ switch (d_id) {
+ case ZFCP_DID_DIRECTORY_SERVICE:
+ snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
+ "directory");
+ break;
+ case ZFCP_DID_MANAGEMENT_SERVICE:
+ snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
+ "management");
+ break;
+ case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
+ snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
+ "key_distribution");
+ break;
+ case ZFCP_DID_ALIAS_SERVICE:
+ snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
+ "alias");
+ break;
+ case ZFCP_DID_TIME_SERVICE:
+ snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
+ "time");
+ break;
+ default:
+ kfree(port);
+ return NULL;
+ }
+ port->d_id = d_id;
+ port->sysfs_device.parent = &adapter->generic_services;
+ } else {
+ snprintf(port->sysfs_device.bus_id,
+ BUS_ID_SIZE, "0x%016llx", wwpn);
+ port->sysfs_device.parent = &adapter->ccw_device->dev;
+ }
+ port->sysfs_device.release = zfcp_sysfs_port_release;
+ dev_set_drvdata(&port->sysfs_device, port);
+
+ /* mark port unusable as long as sysfs registration is not complete */
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+
+ if (device_register(&port->sysfs_device)) {
+ kfree(port);
+ return NULL;
+ }
+
+ if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) {
+ device_unregister(&port->sysfs_device);
+ return NULL;
+ }
+
+ zfcp_port_get(port);
+
+ scsi_id = 1;
+ found = 0;
+ write_lock_irq(&zfcp_data.config_lock);
+ list_for_each_entry(tmp_port, &adapter->port_list_head, list) {
+ if (atomic_test_mask(ZFCP_STATUS_PORT_NO_SCSI_ID,
+ &tmp_port->status))
+ continue;
+ if (tmp_port->scsi_id != scsi_id) {
+ found = 1;
+ break;
+ }
+ scsi_id++;
+ }
+ port->scsi_id = scsi_id;
+ if (found)
+ list_add_tail(&port->list, &tmp_port->list);
+ else
+ list_add_tail(&port->list, &adapter->port_list_head);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
+ if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
+ if (!adapter->nameserver_port)
+ adapter->nameserver_port = port;
+ adapter->ports++;
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ zfcp_adapter_get(adapter);
+
+ return port;
+}
+
+void
+zfcp_port_dequeue(struct zfcp_port *port)
+{
+ zfcp_port_wait(port);
+ write_lock_irq(&zfcp_data.config_lock);
+ list_del(&port->list);
+ port->adapter->ports--;
+ write_unlock_irq(&zfcp_data.config_lock);
+ zfcp_adapter_put(port->adapter);
+ zfcp_sysfs_port_remove_files(&port->sysfs_device,
+ atomic_read(&port->status));
+ device_unregister(&port->sysfs_device);
+}
+
+/* Enqueues a nameserver port */
+int
+zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
+{
+ struct zfcp_port *port;
+
+ port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
+ ZFCP_DID_DIRECTORY_SERVICE);
+ if (!port) {
+ ZFCP_LOG_INFO("error: enqueue of nameserver port for "
+ "adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ return -ENXIO;
+ }
+ zfcp_port_put(port);
+
+ return 0;
+}
+
+#undef ZFCP_LOG_AREA
+
+/****************************************************************/
+/******* Fibre Channel Standard related Functions **************/
+/****************************************************************/
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
+
+void
+zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ struct fcp_rscn_head *fcp_rscn_head;
+ struct fcp_rscn_element *fcp_rscn_element;
+ struct zfcp_port *port;
+ u16 i;
+ u16 no_entries;
+ u32 range_mask;
+ unsigned long flags;
+
+ fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload;
+ fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload;
+
+ /* see FC-FS */
+ no_entries = (fcp_rscn_head->payload_len / 4);
+
+ zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
+ fcp_rscn_head->payload_len);
+
+ debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
+ for (i = 1; i < no_entries; i++) {
+ /* skip head and start with 1st element */
+ fcp_rscn_element++;
+ switch (fcp_rscn_element->addr_format) {
+ case ZFCP_PORT_ADDRESS:
+ ZFCP_LOG_FLAGS(1, "ZFCP_PORT_ADDRESS\n");
+ range_mask = ZFCP_PORTS_RANGE_PORT;
+ break;
+ case ZFCP_AREA_ADDRESS:
+ ZFCP_LOG_FLAGS(1, "ZFCP_AREA_ADDRESS\n");
+ range_mask = ZFCP_PORTS_RANGE_AREA;
+ break;
+ case ZFCP_DOMAIN_ADDRESS:
+ ZFCP_LOG_FLAGS(1, "ZFCP_DOMAIN_ADDRESS\n");
+ range_mask = ZFCP_PORTS_RANGE_DOMAIN;
+ break;
+ case ZFCP_FABRIC_ADDRESS:
+ ZFCP_LOG_FLAGS(1, "ZFCP_FABRIC_ADDRESS\n");
+ range_mask = ZFCP_PORTS_RANGE_FABRIC;
+ break;
+ default:
+ ZFCP_LOG_INFO("incoming RSCN with unknown "
+ "address format\n");
+ continue;
+ }
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (atomic_test_mask
+ (ZFCP_STATUS_PORT_WKA, &port->status))
+ continue;
+ /* Do we know this port? If not skip it. */
+ if (!atomic_test_mask
+ (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
+ ZFCP_LOG_INFO("incoming RSCN, trying to open "
+ "port 0x%016Lx\n", port->wwpn);
+ debug_text_event(adapter->erp_dbf, 1,
+ "unsol_els_rscnu:");
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
+ continue;
+ }
+
+ /*
+ * FIXME: race: d_id might being invalidated
+ * (...DID_DID reset)
+ */
+ if ((port->d_id & range_mask)
+ == (fcp_rscn_element->nport_did & range_mask)) {
+ ZFCP_LOG_TRACE("reopen did 0x%08x\n",
+ fcp_rscn_element->nport_did);
+ /*
+ * Unfortunately, an RSCN does not specify the
+ * type of change a target underwent. We assume
+ * that it makes sense to reopen the link.
+ * FIXME: Shall we try to find out more about
+ * the target and link state before closing it?
+ * How to accomplish this? (nameserver?)
+ * Where would such code be put in?
+ * (inside or outside erp)
+ */
+ ZFCP_LOG_INFO("incoming RSCN, trying to open "
+ "port 0x%016Lx\n", port->wwpn);
+ debug_text_event(adapter->erp_dbf, 1,
+ "unsol_els_rscnk:");
+ zfcp_test_link(port);
+ }
+ }
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ }
+}
+
+static void
+zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ logi *els_logi = (logi *) status_buffer->payload;
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
+ break;
+ }
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ if (!port || (port->wwpn != (*(wwn_t *) & els_logi->nport_wwn))) {
+ ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
+ "with d_id 0x%08x on adapter %s\n",
+ status_buffer->d_id,
+ zfcp_get_busid_by_adapter(adapter));
+ } else {
+ debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
+ debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
+ zfcp_erp_port_forced_reopen(port, 0);
+ }
+}
+
+static void
+zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (port->wwpn == els_logo->nport_wwpn)
+ break;
+ }
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ if (!port || (port->wwpn != els_logo->nport_wwpn)) {
+ ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
+ "with d_id 0x%08x on adapter %s\n",
+ status_buffer->d_id,
+ zfcp_get_busid_by_adapter(adapter));
+ } else {
+ debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
+ debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
+ zfcp_erp_port_forced_reopen(port, 0);
+ }
+}
+
+static void
+zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *status_buffer)
+{
+ zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
+ ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
+ "for adapter %s\n", *(u32 *) (status_buffer->payload),
+ zfcp_get_busid_by_adapter(adapter));
+
+}
+
+void
+zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+ struct fsf_status_read_buffer *status_buffer;
+ u32 els_type;
+ struct zfcp_adapter *adapter;
+
+ status_buffer = fsf_req->data.status_read.buffer;
+ els_type = *(u32 *) (status_buffer->payload);
+ adapter = fsf_req->adapter;
+
+ if (els_type == LS_PLOGI)
+ zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
+ else if (els_type == LS_LOGO)
+ zfcp_fsf_incoming_els_logo(adapter, status_buffer);
+ else if ((els_type & 0xffff0000) == LS_RSCN)
+ /* we are only concerned with the command, not the length */
+ zfcp_fsf_incoming_els_rscn(adapter, status_buffer);
+ else
+ zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
+}
+
+
+/**
+ * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request
+ * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data
+ * @pool: pointer to mempool_t if non-null memory pool is used for allocation
+ */
+static int
+zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
+{
+ struct zfcp_gid_pn_data *data;
+
+ if (pool != NULL) {
+ data = mempool_alloc(pool, GFP_ATOMIC);
+ if (likely(data != NULL)) {
+ data->ct.pool = pool;
+ }
+ } else {
+ data = kmalloc(sizeof(struct zfcp_gid_pn_data), GFP_ATOMIC);
+ }
+
+ if (NULL == data)
+ return -ENOMEM;
+
+ memset(data, 0, sizeof(*data));
+ data->ct.req = &data->req;
+ data->ct.resp = &data->resp;
+ data->ct.req_count = data->ct.resp_count = 1;
+ zfcp_address_to_sg(&data->ct_iu_req, &data->req);
+ zfcp_address_to_sg(&data->ct_iu_resp, &data->resp);
+ data->req.length = sizeof(struct ct_iu_gid_pn_req);
+ data->resp.length = sizeof(struct ct_iu_gid_pn_resp);
+
+ *gid_pn = data;
+ return 0;
+}
+
+/**
+ * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
+ * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
+ */
+static void
+zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
+{
+ if ((gid_pn->ct.pool != 0))
+ mempool_free(gid_pn, gid_pn->ct.pool);
+ else
+ kfree(gid_pn);
+
+ return;
+}
+
+/**
+ * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request
+ * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
+ */
+int
+zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
+{
+ int ret;
+ struct ct_iu_gid_pn_req *ct_iu_req;
+ struct zfcp_gid_pn_data *gid_pn;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn);
+ if (ret < 0) {
+ ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver "
+ "request failed for adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto out;
+ }
+
+ /* setup nameserver request */
+ ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req);
+ ct_iu_req->header.revision = ZFCP_CT_REVISION;
+ ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
+ ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
+ ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
+ ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
+ ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
+ ct_iu_req->wwpn = erp_action->port->wwpn;
+
+ /* setup parameters for send generic command */
+ gid_pn->ct.port = adapter->nameserver_port;
+ gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
+ gid_pn->ct.handler_data = (unsigned long) gid_pn;
+ gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
+ gid_pn->ct.timer = &erp_action->timer;
+ gid_pn->port = erp_action->port;
+
+ ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
+ erp_action);
+ if (ret) {
+ ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request "
+ "failed for adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+
+ zfcp_gid_pn_buffers_free(gid_pn);
+ }
+
+ out:
+ return ret;
+}
+
+/**
+ * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request
+ * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data
+ */
+static void zfcp_ns_gid_pn_handler(unsigned long data)
+{
+ struct zfcp_port *port;
+ struct zfcp_send_ct *ct;
+ struct ct_iu_gid_pn_req *ct_iu_req;
+ struct ct_iu_gid_pn_resp *ct_iu_resp;
+ struct zfcp_gid_pn_data *gid_pn;
+
+
+ gid_pn = (struct zfcp_gid_pn_data *) data;
+ port = gid_pn->port;
+ ct = &gid_pn->ct;
+ ct_iu_req = zfcp_sg_to_address(ct->req);
+ ct_iu_resp = zfcp_sg_to_address(ct->resp);
+
+ if ((ct->status != 0) || zfcp_check_ct_response(&ct_iu_resp->header)) {
+ /* FIXME: do we need some specific erp entry points */
+ atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
+ goto failed;
+ }
+ /* paranoia */
+ if (ct_iu_req->wwpn != port->wwpn) {
+ ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver "
+ "lookup does not match expected wwpn 0x%016Lx "
+ "for adapter %s\n", ct_iu_req->wwpn, port->wwpn,
+ zfcp_get_busid_by_port(port));
+ goto mismatch;
+ }
+
+ /* looks like a valid d_id */
+ port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
+ atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
+ ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%08x\n",
+ zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
+ goto out;
+
+ mismatch:
+ ZFCP_LOG_DEBUG("CT IUs do not match:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req,
+ sizeof(struct ct_iu_gid_pn_req));
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp,
+ sizeof(struct ct_iu_gid_pn_resp));
+
+ failed:
+ ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn "
+ "0x%016Lx for adapter %s\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+ out:
+ zfcp_gid_pn_buffers_free(gid_pn);
+ return;
+}
+
+/* reject CT_IU reason codes acc. to FC-GS-4 */
+static const struct zfcp_rc_entry zfcp_ct_rc[] = {
+ {0x01, "invalid command code"},
+ {0x02, "invalid version level"},
+ {0x03, "logical error"},
+ {0x04, "invalid CT_IU size"},
+ {0x05, "logical busy"},
+ {0x07, "protocol error"},
+ {0x09, "unable to perform command request"},
+ {0x0b, "command not supported"},
+ {0x0d, "server not available"},
+ {0x0e, "session could not be established"},
+ {0xff, "vendor specific error"},
+ {0, NULL},
+};
+
+/* LS_RJT reason codes acc. to FC-FS */
+static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
+ {0x01, "invalid LS_Command code"},
+ {0x03, "logical error"},
+ {0x05, "logical busy"},
+ {0x07, "protocol error"},
+ {0x09, "unable to perform command request"},
+ {0x0b, "command not supported"},
+ {0x0e, "command already in progress"},
+ {0xff, "vendor specific error"},
+ {0, NULL},
+};
+
+/* reject reason codes according to FC-PH/FC-FS */
+static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
+ {0x01, "invalid D_ID"},
+ {0x02, "invalid S_ID"},
+ {0x03, "Nx_Port not available, temporary"},
+ {0x04, "Nx_Port not available, permament"},
+ {0x05, "class not supported"},
+ {0x06, "delimiter usage error"},
+ {0x07, "TYPE not supported"},
+ {0x08, "invalid Link_Control"},
+ {0x09, "invalid R_CTL field"},
+ {0x0a, "invalid F_CTL field"},
+ {0x0b, "invalid OX_ID"},
+ {0x0c, "invalid RX_ID"},
+ {0x0d, "invalid SEQ_ID"},
+ {0x0e, "invalid DF_CTL"},
+ {0x0f, "invalid SEQ_CNT"},
+ {0x10, "invalid parameter field"},
+ {0x11, "exchange error"},
+ {0x12, "protocol error"},
+ {0x13, "incorrect length"},
+ {0x14, "unsupported ACK"},
+ {0x15, "class of service not supported by entity at FFFFFE"},
+ {0x16, "login required"},
+ {0x17, "excessive sequences attempted"},
+ {0x18, "unable to establish exchange"},
+ {0x1a, "fabric path not available"},
+ {0x1b, "invalid VC_ID (class 4)"},
+ {0x1c, "invalid CS_CTL field"},
+ {0x1d, "insufficient resources for VC (class 4)"},
+ {0x1f, "invalid class of service"},
+ {0x20, "preemption request rejected"},
+ {0x21, "preemption not enabled"},
+ {0x22, "multicast error"},
+ {0x23, "multicast error terminate"},
+ {0x24, "process login required"},
+ {0xff, "vendor specific reject"},
+ {0, NULL},
+};
+
+/**
+ * zfcp_rc_description - return description for given reaon code
+ * @code: reason code
+ * @rc_table: table of reason codes and descriptions
+ */
+static inline const char *
+zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
+{
+ const char *descr = "unknown reason code";
+
+ do {
+ if (code == rc_table->code) {
+ descr = rc_table->description;
+ break;
+ }
+ rc_table++;
+ } while (rc_table->code && rc_table->description);
+
+ return descr;
+}
+
+/**
+ * zfcp_check_ct_response - evaluate reason code for CT_IU
+ * @rjt: response payload to an CT_IU request
+ * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code
+ */
+int
+zfcp_check_ct_response(struct ct_hdr *rjt)
+{
+ if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT)
+ return 0;
+
+ if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) {
+ ZFCP_LOG_NORMAL("error: invalid Generic Service command/"
+ "response code (0x%04hx)\n",
+ rjt->cmd_rsp_code);
+ return 1;
+ }
+
+ ZFCP_LOG_INFO("Generic Service command rejected\n");
+ ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
+ zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
+ (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
+ (u32) rjt->vendor_unique);
+
+ return 1;
+}
+
+/**
+ * zfcp_print_els_rjt - print reject parameter and description for ELS reject
+ * @rjt_par: reject parameter acc. to FC-PH/FC-FS
+ * @rc_table: table of reason codes and descriptions
+ */
+static inline void
+zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
+ const struct zfcp_rc_entry *rc_table)
+{
+ ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
+ zfcp_rc_description(rjt_par->reason_code, rc_table),
+ (u32) rjt_par->action, (u32) rjt_par->reason_code,
+ (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
+}
+
+/**
+ * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
+ * @sq: status qualifier word
+ * @rjt_par: reject parameter as described in FC-PH and FC-FS
+ * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
+ */
+int
+zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
+{
+ int ret = -EIO;
+
+ if (sq == FSF_IOSTAT_NPORT_RJT) {
+ ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
+ zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
+ /* invalid d_id */
+ if (rjt_par->reason_code == 0x01)
+ ret = -EREMCHG;
+ } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
+ ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
+ zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
+ /* invalid d_id */
+ if (rjt_par->reason_code == 0x01)
+ ret = -EREMCHG;
+ } else if (sq == FSF_IOSTAT_LS_RJT) {
+ ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
+ zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
+ ret = -EREMOTEIO;
+ } else
+ ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
+
+ return ret;
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
new file mode 100644
index 000000000000..0fc46381fc22
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -0,0 +1,312 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_ccw.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * CCW driver related routines
+ *
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_CCW_C_REVISION "$Revision: 1.58 $"
+
+#include "zfcp_ext.h"
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+static int zfcp_ccw_probe(struct ccw_device *);
+static void zfcp_ccw_remove(struct ccw_device *);
+static int zfcp_ccw_set_online(struct ccw_device *);
+static int zfcp_ccw_set_offline(struct ccw_device *);
+static int zfcp_ccw_notify(struct ccw_device *, int);
+static void zfcp_ccw_shutdown(struct device *);
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+ {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
+ ZFCP_CONTROL_UNIT_MODEL,
+ ZFCP_DEVICE_TYPE,
+ ZFCP_DEVICE_MODEL)},
+ {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
+ ZFCP_CONTROL_UNIT_MODEL,
+ ZFCP_DEVICE_TYPE,
+ ZFCP_DEVICE_MODEL_PRIV)},
+ {},
+};
+
+static struct ccw_driver zfcp_ccw_driver = {
+ .owner = THIS_MODULE,
+ .name = ZFCP_NAME,
+ .ids = zfcp_ccw_device_id,
+ .probe = zfcp_ccw_probe,
+ .remove = zfcp_ccw_remove,
+ .set_online = zfcp_ccw_set_online,
+ .set_offline = zfcp_ccw_set_offline,
+ .notify = zfcp_ccw_notify,
+ .driver = {
+ .shutdown = zfcp_ccw_shutdown,
+ },
+};
+
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_probe - probe function of zfcp driver
+ * @ccw_device: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets up the initial
+ * data structures for each fcp adapter, which was detected by the system.
+ * Also the sysfs files for this adapter will be created by this function.
+ * In addition the nameserver port will be added to the ports of the adapter
+ * and its sysfs representation will be created too.
+ */
+static int
+zfcp_ccw_probe(struct ccw_device *ccw_device)
+{
+ struct zfcp_adapter *adapter;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+ adapter = zfcp_adapter_enqueue(ccw_device);
+ if (!adapter)
+ retval = -EINVAL;
+ else
+ ZFCP_LOG_DEBUG("Probed adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ up(&zfcp_data.config_sema);
+ return retval;
+}
+
+/**
+ * zfcp_ccw_remove - remove function of zfcp driver
+ * @ccw_device: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and removes an adapter
+ * from the system. Task of this function is to get rid of all units and
+ * ports that belong to this adapter. And in addition all resources of this
+ * adapter will be freed too.
+ */
+static void
+zfcp_ccw_remove(struct ccw_device *ccw_device)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port, *p;
+ struct zfcp_unit *unit, *u;
+
+ ccw_device_set_offline(ccw_device);
+ down(&zfcp_data.config_sema);
+ adapter = dev_get_drvdata(&ccw_device->dev);
+
+ ZFCP_LOG_DEBUG("Removing adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ write_lock_irq(&zfcp_data.config_lock);
+ list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
+ list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
+ list_move(&unit->list, &port->unit_remove_lh);
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
+ &unit->status);
+ }
+ list_move(&port->list, &adapter->port_remove_lh);
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+ }
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
+ list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
+ zfcp_unit_dequeue(unit);
+ }
+ zfcp_port_dequeue(port);
+ }
+ zfcp_adapter_wait(adapter);
+ zfcp_adapter_dequeue(adapter);
+
+ up(&zfcp_data.config_sema);
+}
+
+/**
+ * zfcp_ccw_set_online - set_online function of zfcp driver
+ * @ccw_device: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state online. Setting an fcp device online means that it will be
+ * registered with the SCSI stack, that the QDIO queues will be set up
+ * and that the adapter will be opened (asynchronously).
+ */
+static int
+zfcp_ccw_set_online(struct ccw_device *ccw_device)
+{
+ struct zfcp_adapter *adapter;
+ int retval;
+
+ down(&zfcp_data.config_sema);
+ adapter = dev_get_drvdata(&ccw_device->dev);
+
+ retval = zfcp_adapter_debug_register(adapter);
+ if (retval)
+ goto out;
+ retval = zfcp_erp_thread_setup(adapter);
+ if (retval) {
+ ZFCP_LOG_INFO("error: start of error recovery thread for "
+ "adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto out_erp_thread;
+ }
+
+ retval = zfcp_adapter_scsi_register(adapter);
+ if (retval)
+ goto out_scsi_register;
+ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
+ ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+ zfcp_erp_wait(adapter);
+ goto out;
+
+ out_scsi_register:
+ zfcp_erp_thread_kill(adapter);
+ out_erp_thread:
+ zfcp_adapter_debug_unregister(adapter);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval;
+}
+
+/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @ccw_device: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline. Setting an fcp device offline means that it will be
+ * unregistered from the SCSI stack and that the adapter will be shut down
+ * asynchronously.
+ */
+static int
+zfcp_ccw_set_offline(struct ccw_device *ccw_device)
+{
+ struct zfcp_adapter *adapter;
+
+ down(&zfcp_data.config_sema);
+ adapter = dev_get_drvdata(&ccw_device->dev);
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_erp_wait(adapter);
+ zfcp_adapter_scsi_unregister(adapter);
+ zfcp_erp_thread_kill(adapter);
+ zfcp_adapter_debug_unregister(adapter);
+ up(&zfcp_data.config_sema);
+ return 0;
+}
+
+/**
+ * zfcp_ccw_notify
+ * @ccw_device: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+ *
+ * This function gets called by the common i/o layer if an adapter has gone
+ * or reappeared.
+ */
+static int
+zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
+{
+ struct zfcp_adapter *adapter;
+
+ down(&zfcp_data.config_sema);
+ adapter = dev_get_drvdata(&ccw_device->dev);
+ switch (event) {
+ case CIO_GONE:
+ ZFCP_LOG_NORMAL("adapter %s: device gone\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf,1,"dev_gone");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ break;
+ case CIO_NO_PATH:
+ ZFCP_LOG_NORMAL("adapter %s: no path\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf,1,"no_path");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ break;
+ case CIO_OPER:
+ ZFCP_LOG_NORMAL("adapter %s: operational again\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf,1,"dev_oper");
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING,
+ ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
+ break;
+ }
+ zfcp_erp_wait(adapter);
+ up(&zfcp_data.config_sema);
+ return 1;
+}
+
+/**
+ * zfcp_ccw_register - ccw register function
+ *
+ * Registers the driver at the common i/o layer. This function will be called
+ * at module load time/system start.
+ */
+int __init
+zfcp_ccw_register(void)
+{
+ int retval;
+
+ retval = ccw_driver_register(&zfcp_ccw_driver);
+ if (retval)
+ goto out;
+ retval = zfcp_sysfs_driver_create_files(&zfcp_ccw_driver.driver);
+ if (retval)
+ ccw_driver_unregister(&zfcp_ccw_driver);
+ out:
+ return retval;
+}
+
+/**
+ * zfcp_ccw_unregister - ccw unregister function
+ *
+ * Unregisters the driver from common i/o layer. Function will be called at
+ * module unload/system shutdown.
+ */
+void __exit
+zfcp_ccw_unregister(void)
+{
+ zfcp_sysfs_driver_remove_files(&zfcp_ccw_driver.driver);
+ ccw_driver_unregister(&zfcp_ccw_driver);
+}
+
+/**
+ * zfcp_ccw_shutdown - gets called on reboot/shutdown
+ *
+ * Makes sure that QDIO queues are down when the system gets stopped.
+ */
+static void
+zfcp_ccw_shutdown(struct device *dev)
+{
+ struct zfcp_adapter *adapter;
+
+ down(&zfcp_data.config_sema);
+ adapter = dev_get_drvdata(dev);
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_erp_wait(adapter);
+ up(&zfcp_data.config_sema);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
new file mode 100644
index 000000000000..53fcccbb424c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -0,0 +1,1121 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_def.h
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#ifndef ZFCP_DEF_H
+#define ZFCP_DEF_H
+
+#define ZFCP_DEF_REVISION "$Revision: 1.111 $"
+
+/*************************** INCLUDES *****************************************/
+
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include "../../fc4/fc.h"
+#include "zfcp_fsf.h"
+#include <asm/ccwdev.h>
+#include <asm/qdio.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <linux/mempool.h>
+#include <linux/syscalls.h>
+#include <linux/ioctl.h>
+#include <linux/ioctl32.h>
+
+/************************ DEBUG FLAGS *****************************************/
+
+#define ZFCP_PRINT_FLAGS
+
+/********************* GENERAL DEFINES *********************************/
+
+/* zfcp version number, it consists of major, minor, and patch-level number */
+#define ZFCP_VERSION "4.2.0"
+
+/**
+ * zfcp_sg_to_address - determine kernel address from struct scatterlist
+ * @list: struct scatterlist
+ * Return: kernel address
+ */
+static inline void *
+zfcp_sg_to_address(struct scatterlist *list)
+{
+ return (void *) (page_address(list->page) + list->offset);
+}
+
+/**
+ * zfcp_address_to_sg - set up struct scatterlist from kernel address
+ * @address: kernel address
+ * @list: struct scatterlist
+ */
+static inline void
+zfcp_address_to_sg(void *address, struct scatterlist *list)
+{
+ list->page = virt_to_page(address);
+ list->offset = ((unsigned long) address) & (PAGE_SIZE - 1);
+}
+
+/********************* SCSI SPECIFIC DEFINES *********************************/
+
+/* 32 bit for SCSI ID and LUN as long as the SCSI stack uses this type */
+typedef u32 scsi_id_t;
+typedef u32 scsi_lun_t;
+
+#define ZFCP_ERP_SCSI_LOW_MEM_TIMEOUT (100*HZ)
+#define ZFCP_SCSI_ER_TIMEOUT (100*HZ)
+
+/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
+
+/* Adapter Identification Parameters */
+#define ZFCP_CONTROL_UNIT_TYPE 0x1731
+#define ZFCP_CONTROL_UNIT_MODEL 0x03
+#define ZFCP_DEVICE_TYPE 0x1732
+#define ZFCP_DEVICE_MODEL 0x03
+#define ZFCP_DEVICE_MODEL_PRIV 0x04
+
+/* allow as many chained SBALs as are supported by hardware */
+#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
+#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
+#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
+
+/* DMQ bug workaround: don't use last SBALE */
+#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+
+/* index of last SBALE (with respect to DMQ bug workaround) */
+#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain */
+#define ZFCP_MAX_SBALES_PER_REQ \
+ (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
+ /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+
+/* FIXME(tune): free space should be one max. SBAL chain plus what? */
+#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
+ - (ZFCP_MAX_SBALS_PER_REQ + 4))
+
+#define ZFCP_SBAL_TIMEOUT (5*HZ)
+
+#define ZFCP_TYPE2_RECOVERY_TIME (8*HZ)
+
+/* queue polling (values in microseconds) */
+#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
+#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
+#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
+#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
+
+#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
+
+/********************* FSF SPECIFIC DEFINES *********************************/
+
+#define ZFCP_ULP_INFO_VERSION 26
+#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
+/* ATTENTION: value must not be used by hardware */
+#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
+#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
+#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
+#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 6
+#define ZFCP_EXCHANGE_CONFIG_DATA_SLEEP 50
+
+/* timeout value for "default timer" for fsf requests */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
+
+/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
+
+typedef unsigned long long wwn_t;
+typedef unsigned int fc_id_t;
+typedef unsigned long long fcp_lun_t;
+/* data length field may be at variable position in FCP-2 FCP_CMND IU */
+typedef unsigned int fcp_dl_t;
+
+#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
+
+/* timeout for name-server lookup (in seconds) */
+#define ZFCP_NS_GID_PN_TIMEOUT 10
+
+/* largest SCSI command we can process */
+/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
+#define ZFCP_MAX_SCSI_CMND_LENGTH 255
+/* maximum number of commands in LUN queue (tagged queueing) */
+#define ZFCP_CMND_PER_LUN 32
+
+/* task attribute values in FCP-2 FCP_CMND IU */
+#define SIMPLE_Q 0
+#define HEAD_OF_Q 1
+#define ORDERED_Q 2
+#define ACA_Q 4
+#define UNTAGGED 5
+
+/* task management flags in FCP-2 FCP_CMND IU */
+#define FCP_CLEAR_ACA 0x40
+#define FCP_TARGET_RESET 0x20
+#define FCP_LOGICAL_UNIT_RESET 0x10
+#define FCP_CLEAR_TASK_SET 0x04
+#define FCP_ABORT_TASK_SET 0x02
+
+#define FCP_CDB_LENGTH 16
+
+#define ZFCP_DID_MASK 0x00FFFFFF
+
+/* FCP(-2) FCP_CMND IU */
+struct fcp_cmnd_iu {
+ fcp_lun_t fcp_lun; /* FCP logical unit number */
+ u8 crn; /* command reference number */
+ u8 reserved0:5; /* reserved */
+ u8 task_attribute:3; /* task attribute */
+ u8 task_management_flags; /* task management flags */
+ u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
+ u8 rddata:1; /* read data */
+ u8 wddata:1; /* write data */
+ u8 fcp_cdb[FCP_CDB_LENGTH];
+} __attribute__((packed));
+
+/* FCP(-2) FCP_RSP IU */
+struct fcp_rsp_iu {
+ u8 reserved0[10];
+ union {
+ struct {
+ u8 reserved1:3;
+ u8 fcp_conf_req:1;
+ u8 fcp_resid_under:1;
+ u8 fcp_resid_over:1;
+ u8 fcp_sns_len_valid:1;
+ u8 fcp_rsp_len_valid:1;
+ } bits;
+ u8 value;
+ } validity;
+ u8 scsi_status;
+ u32 fcp_resid;
+ u32 fcp_sns_len;
+ u32 fcp_rsp_len;
+} __attribute__((packed));
+
+
+#define RSP_CODE_GOOD 0
+#define RSP_CODE_LENGTH_MISMATCH 1
+#define RSP_CODE_FIELD_INVALID 2
+#define RSP_CODE_RO_MISMATCH 3
+#define RSP_CODE_TASKMAN_UNSUPP 4
+#define RSP_CODE_TASKMAN_FAILED 5
+
+/* see fc-fs */
+#define LS_FAN 0x60000000
+#define LS_RSCN 0x61040000
+
+struct fcp_rscn_head {
+ u8 command;
+ u8 page_length; /* always 0x04 */
+ u16 payload_len;
+} __attribute__((packed));
+
+struct fcp_rscn_element {
+ u8 reserved:2;
+ u8 event_qual:4;
+ u8 addr_format:2;
+ u32 nport_did:24;
+} __attribute__((packed));
+
+#define ZFCP_PORT_ADDRESS 0x0
+#define ZFCP_AREA_ADDRESS 0x1
+#define ZFCP_DOMAIN_ADDRESS 0x2
+#define ZFCP_FABRIC_ADDRESS 0x3
+
+#define ZFCP_PORTS_RANGE_PORT 0xFFFFFF
+#define ZFCP_PORTS_RANGE_AREA 0xFFFF00
+#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
+#define ZFCP_PORTS_RANGE_FABRIC 0x000000
+
+#define ZFCP_NO_PORTS_PER_AREA 0x100
+#define ZFCP_NO_PORTS_PER_DOMAIN 0x10000
+#define ZFCP_NO_PORTS_PER_FABRIC 0x1000000
+
+struct fcp_fan {
+ u32 command;
+ u32 fport_did;
+ wwn_t fport_wwpn;
+ wwn_t fport_wwname;
+} __attribute__((packed));
+
+/* see fc-ph */
+struct fcp_logo {
+ u32 command;
+ u32 nport_did;
+ wwn_t nport_wwpn;
+} __attribute__((packed));
+
+/*
+ * FC-FS stuff
+ */
+#define R_A_TOV 10 /* seconds */
+#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
+
+#define ZFCP_LS_RLS 0x0f
+#define ZFCP_LS_ADISC 0x52
+#define ZFCP_LS_RPS 0x56
+#define ZFCP_LS_RSCN 0x61
+#define ZFCP_LS_RNID 0x78
+
+struct zfcp_ls_rjt_par {
+ u8 action;
+ u8 reason_code;
+ u8 reason_expl;
+ u8 vendor_unique;
+} __attribute__ ((packed));
+
+struct zfcp_ls_adisc {
+ u8 code;
+ u8 field[3];
+ u32 hard_nport_id;
+ u64 wwpn;
+ u64 wwnn;
+ u32 nport_id;
+} __attribute__ ((packed));
+
+struct zfcp_ls_adisc_acc {
+ u8 code;
+ u8 field[3];
+ u32 hard_nport_id;
+ u64 wwpn;
+ u64 wwnn;
+ u32 nport_id;
+} __attribute__ ((packed));
+
+struct zfcp_rc_entry {
+ u8 code;
+ const char *description;
+};
+
+/*
+ * FC-GS-2 stuff
+ */
+#define ZFCP_CT_REVISION 0x01
+#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
+#define ZFCP_CT_NAME_SERVER 0x02
+#define ZFCP_CT_SYNCHRONOUS 0x00
+#define ZFCP_CT_GID_PN 0x0121
+#define ZFCP_CT_MAX_SIZE 0x1020
+#define ZFCP_CT_ACCEPT 0x8002
+#define ZFCP_CT_REJECT 0x8001
+
+/*
+ * FC-GS-4 stuff
+ */
+#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
+
+
+/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
+
+/* debug feature entries per adapter */
+#define ZFCP_ERP_DBF_INDEX 1
+#define ZFCP_ERP_DBF_AREAS 2
+#define ZFCP_ERP_DBF_LENGTH 16
+#define ZFCP_ERP_DBF_LEVEL 3
+#define ZFCP_ERP_DBF_NAME "zfcperp"
+
+#define ZFCP_CMD_DBF_INDEX 2
+#define ZFCP_CMD_DBF_AREAS 1
+#define ZFCP_CMD_DBF_LENGTH 8
+#define ZFCP_CMD_DBF_LEVEL 3
+#define ZFCP_CMD_DBF_NAME "zfcpcmd"
+
+#define ZFCP_ABORT_DBF_INDEX 2
+#define ZFCP_ABORT_DBF_AREAS 1
+#define ZFCP_ABORT_DBF_LENGTH 8
+#define ZFCP_ABORT_DBF_LEVEL 6
+#define ZFCP_ABORT_DBF_NAME "zfcpabt"
+
+#define ZFCP_IN_ELS_DBF_INDEX 2
+#define ZFCP_IN_ELS_DBF_AREAS 1
+#define ZFCP_IN_ELS_DBF_LENGTH 8
+#define ZFCP_IN_ELS_DBF_LEVEL 6
+#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
+
+/******************** LOGGING MACROS AND DEFINES *****************************/
+
+/*
+ * Logging may be applied on certain kinds of driver operations
+ * independently. Additionally, different log-levels are supported for
+ * each of these areas.
+ */
+
+#define ZFCP_NAME "zfcp"
+
+/* read-only LUN sharing switch initial value */
+#define ZFCP_RO_LUN_SHARING_DEFAULTS 0
+
+/* independent log areas */
+#define ZFCP_LOG_AREA_OTHER 0
+#define ZFCP_LOG_AREA_SCSI 1
+#define ZFCP_LOG_AREA_FSF 2
+#define ZFCP_LOG_AREA_CONFIG 3
+#define ZFCP_LOG_AREA_CIO 4
+#define ZFCP_LOG_AREA_QDIO 5
+#define ZFCP_LOG_AREA_ERP 6
+#define ZFCP_LOG_AREA_FC 7
+
+/* log level values*/
+#define ZFCP_LOG_LEVEL_NORMAL 0
+#define ZFCP_LOG_LEVEL_INFO 1
+#define ZFCP_LOG_LEVEL_DEBUG 2
+#define ZFCP_LOG_LEVEL_TRACE 3
+
+/*
+ * this allows removal of logging code by the preprocessor
+ * (the most detailed log level still to be compiled in is specified,
+ * higher log levels are removed)
+ */
+#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE
+
+/* get "loglevel" nibble assignment */
+#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
+ ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
+
+/* set "loglevel" nibble */
+#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
+ (value << (zfcp_lognibble << 2))
+
+/* all log-level defaults are combined to generate initial log-level */
+#define ZFCP_LOG_LEVEL_DEFAULTS \
+ (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
+ ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
+
+/* check whether we have the right level for logging */
+#define ZFCP_LOG_CHECK(level) \
+ ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
+
+/* logging routine for zfcp */
+#define _ZFCP_LOG(fmt, args...) \
+ printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \
+ __LINE__ , ##args)
+
+#define ZFCP_LOG(level, fmt, args...) \
+do { \
+ if (ZFCP_LOG_CHECK(level)) \
+ _ZFCP_LOG(fmt, ##args); \
+} while (0)
+
+#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
+# define ZFCP_LOG_NORMAL(fmt, args...)
+#else
+# define ZFCP_LOG_NORMAL(fmt, args...) \
+do { \
+ if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
+ printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
+} while (0)
+#endif
+
+#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
+# define ZFCP_LOG_INFO(fmt, args...)
+#else
+# define ZFCP_LOG_INFO(fmt, args...) \
+do { \
+ if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
+ printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
+} while (0)
+#endif
+
+#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
+# define ZFCP_LOG_DEBUG(fmt, args...)
+#else
+# define ZFCP_LOG_DEBUG(fmt, args...) \
+ ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
+#endif
+
+#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
+# define ZFCP_LOG_TRACE(fmt, args...)
+#else
+# define ZFCP_LOG_TRACE(fmt, args...) \
+ ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
+#endif
+
+#ifndef ZFCP_PRINT_FLAGS
+# define ZFCP_LOG_FLAGS(level, fmt, args...)
+#else
+extern u32 flags_dump;
+# define ZFCP_LOG_FLAGS(level, fmt, args...) \
+do { \
+ if (level <= flags_dump) \
+ _ZFCP_LOG(fmt, ##args); \
+} while (0)
+#endif
+
+/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
+
+/*
+ * Note, the leftmost status byte is common among adapter, port
+ * and unit
+ */
+#define ZFCP_COMMON_FLAGS 0xfff00000
+#define ZFCP_SPECIFIC_FLAGS 0x000fffff
+
+/* common status bits */
+#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
+#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
+#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
+#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
+#define ZFCP_STATUS_COMMON_OPENING 0x08000000
+#define ZFCP_STATUS_COMMON_OPEN 0x04000000
+#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
+#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
+#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
+
+/* adapter status */
+#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
+#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
+#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
+#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
+#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
+#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
+#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
+#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+
+#define ZFCP_STATUS_ADAPTER_SCSI_UP \
+ (ZFCP_STATUS_COMMON_UNBLOCKED | \
+ ZFCP_STATUS_ADAPTER_REGISTERED)
+
+
+/* FC-PH/FC-GS well-known address identifiers for generic services */
+#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
+#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
+#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
+#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
+#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
+
+/* remote port status */
+#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
+#define ZFCP_STATUS_PORT_DID_DID 0x00000002
+#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
+#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
+#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
+#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
+#define ZFCP_STATUS_PORT_ACCESS_DENIED 0x00000040
+
+/* for ports with well known addresses */
+#define ZFCP_STATUS_PORT_WKA \
+ (ZFCP_STATUS_PORT_NO_WWPN | \
+ ZFCP_STATUS_PORT_NO_SCSI_ID)
+
+/* logical unit status */
+#define ZFCP_STATUS_UNIT_NOTSUPPUNITRESET 0x00000001
+#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
+#define ZFCP_STATUS_UNIT_SHARED 0x00000004
+#define ZFCP_STATUS_UNIT_READONLY 0x00000008
+
+/* FSF request status (this does not have a common part) */
+#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
+#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
+#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
+#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
+#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
+#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
+#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
+#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
+#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
+#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
+#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
+#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
+#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
+#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
+
+/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
+
+#define ZFCP_MAX_ERPS 3
+
+#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ)
+#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
+
+#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
+#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
+#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
+#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
+#define ZFCP_STATUS_ERP_LOWMEM 0x00400000
+
+#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
+#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
+#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
+#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
+#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
+#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
+#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
+#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
+#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
+
+/* Ordered by escalation level (necessary for proper erp-code operation) */
+#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
+#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
+#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
+#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
+
+#define ZFCP_ERP_ACTION_RUNNING 0x1
+#define ZFCP_ERP_ACTION_READY 0x2
+
+#define ZFCP_ERP_SUCCEEDED 0x0
+#define ZFCP_ERP_FAILED 0x1
+#define ZFCP_ERP_CONTINUES 0x2
+#define ZFCP_ERP_EXIT 0x3
+#define ZFCP_ERP_DISMISSED 0x4
+#define ZFCP_ERP_NOMEM 0x5
+
+
+/******************** CFDC SPECIFIC STUFF *****************************/
+
+/* Firewall data channel sense data record */
+struct zfcp_cfdc_sense_data {
+ u32 signature; /* Request signature */
+ u32 devno; /* FCP adapter device number */
+ u32 command; /* Command code */
+ u32 fsf_status; /* FSF request status and status qualifier */
+ u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u8 payloads[256]; /* Access conflicts list */
+ u8 control_file[0]; /* Access control table */
+};
+
+#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
+
+#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
+#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
+#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
+#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
+#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
+
+#define ZFCP_CFDC_DOWNLOAD 0x00000001
+#define ZFCP_CFDC_UPLOAD 0x00000002
+#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
+
+#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
+#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
+#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
+
+#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
+
+/************************* STRUCTURE DEFINITIONS *****************************/
+
+struct zfcp_fsf_req;
+
+/* holds various memory pools of an adapter */
+struct zfcp_adapter_mempool {
+ mempool_t *fsf_req_erp;
+ mempool_t *fsf_req_scsi;
+ mempool_t *fsf_req_abort;
+ mempool_t *fsf_req_status_read;
+ mempool_t *data_status_read;
+ mempool_t *data_gid_pn;
+};
+
+struct zfcp_exchange_config_data{
+};
+
+struct zfcp_open_port {
+ struct zfcp_port *port;
+};
+
+struct zfcp_close_port {
+ struct zfcp_port *port;
+};
+
+struct zfcp_open_unit {
+ struct zfcp_unit *unit;
+};
+
+struct zfcp_close_unit {
+ struct zfcp_unit *unit;
+};
+
+struct zfcp_close_physical_port {
+ struct zfcp_port *port;
+};
+
+struct zfcp_send_fcp_command_task {
+ struct zfcp_fsf_req *fsf_req;
+ struct zfcp_unit *unit;
+ struct scsi_cmnd *scsi_cmnd;
+ unsigned long start_jiffies;
+};
+
+struct zfcp_send_fcp_command_task_management {
+ struct zfcp_unit *unit;
+};
+
+struct zfcp_abort_fcp_command {
+ struct zfcp_fsf_req *fsf_req;
+ struct zfcp_unit *unit;
+};
+
+/*
+ * header for CT_IU
+ */
+struct ct_hdr {
+ u8 revision; // 0x01
+ u8 in_id[3]; // 0x00
+ u8 gs_type; // 0xFC Directory Service
+ u8 gs_subtype; // 0x02 Name Server
+ u8 options; // 0x00 single bidirectional exchange
+ u8 reserved0;
+ u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
+ u16 max_res_size; // <= (4096 - 16) / 4
+ u8 reserved1;
+ u8 reason_code;
+ u8 reason_code_expl;
+ u8 vendor_unique;
+} __attribute__ ((packed));
+
+/* nameserver request CT_IU -- for requests where
+ * a port name is required */
+struct ct_iu_gid_pn_req {
+ struct ct_hdr header;
+ wwn_t wwpn;
+} __attribute__ ((packed));
+
+/* FS_ACC IU and data unit for GID_PN nameserver request */
+struct ct_iu_gid_pn_resp {
+ struct ct_hdr header;
+ fc_id_t d_id;
+} __attribute__ ((packed));
+
+typedef void (*zfcp_send_ct_handler_t)(unsigned long);
+
+/**
+ * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
+ * @port: port where the request is sent to
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @req_count: number of elements in request scatter-gather list
+ * @resp_count: number of elements in response scatter-gather list
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @pool: pointer to memory pool for ct request structure
+ * @timeout: FSF timeout for this request
+ * @timer: timer (e.g. for request initiated by erp)
+ * @completion: completion for synchronization purposes
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_send_ct {
+ struct zfcp_port *port;
+ struct scatterlist *req;
+ struct scatterlist *resp;
+ unsigned int req_count;
+ unsigned int resp_count;
+ zfcp_send_ct_handler_t handler;
+ unsigned long handler_data;
+ mempool_t *pool;
+ int timeout;
+ struct timer_list *timer;
+ struct completion *completion;
+ int status;
+};
+
+/* used for name server requests in error recovery */
+struct zfcp_gid_pn_data {
+ struct zfcp_send_ct ct;
+ struct scatterlist req;
+ struct scatterlist resp;
+ struct ct_iu_gid_pn_req ct_iu_req;
+ struct ct_iu_gid_pn_resp ct_iu_resp;
+ struct zfcp_port *port;
+};
+
+typedef void (*zfcp_send_els_handler_t)(unsigned long);
+
+/**
+ * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
+ * @adapter: adapter where request is sent from
+ * @d_id: destiniation id of port where request is sent to
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @req_count: number of elements in request scatter-gather list
+ * @resp_count: number of elements in response scatter-gather list
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @timer: timer (e.g. for request initiated by erp)
+ * @completion: completion for synchronization purposes
+ * @ls_code: hex code of ELS command
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_send_els {
+ struct zfcp_adapter *adapter;
+ fc_id_t d_id;
+ struct scatterlist *req;
+ struct scatterlist *resp;
+ unsigned int req_count;
+ unsigned int resp_count;
+ zfcp_send_els_handler_t handler;
+ unsigned long handler_data;
+ struct timer_list *timer;
+ struct completion *completion;
+ int ls_code;
+ int status;
+};
+
+struct zfcp_status_read {
+ struct fsf_status_read_buffer *buffer;
+};
+
+struct zfcp_fsf_done {
+ struct completion *complete;
+ int status;
+};
+
+/* request specific data */
+union zfcp_req_data {
+ struct zfcp_exchange_config_data exchange_config_data;
+ struct zfcp_open_port open_port;
+ struct zfcp_close_port close_port;
+ struct zfcp_open_unit open_unit;
+ struct zfcp_close_unit close_unit;
+ struct zfcp_close_physical_port close_physical_port;
+ struct zfcp_send_fcp_command_task send_fcp_command_task;
+ struct zfcp_send_fcp_command_task_management
+ send_fcp_command_task_management;
+ struct zfcp_abort_fcp_command abort_fcp_command;
+ struct zfcp_send_ct *send_ct;
+ struct zfcp_send_els *send_els;
+ struct zfcp_status_read status_read;
+ struct fsf_qtcb_bottom_port *port_data;
+};
+
+struct zfcp_qdio_queue {
+ struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
+ u8 free_index; /* index of next free bfr
+ in queue (free_count>0) */
+ atomic_t free_count; /* number of free buffers
+ in queue */
+ rwlock_t queue_lock; /* lock for operations on queue */
+ int distance_from_int; /* SBALs used since PCI indication
+ was last set */
+};
+
+struct zfcp_erp_action {
+ struct list_head list;
+ int action; /* requested action code */
+ struct zfcp_adapter *adapter; /* device which should be recovered */
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ volatile u32 status; /* recovery status */
+ u32 step; /* active step of this erp action */
+ struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
+ for this action */
+ struct timer_list timer;
+};
+
+
+struct zfcp_adapter {
+ struct list_head list; /* list of adapters */
+ atomic_t refcount; /* reference count */
+ wait_queue_head_t remove_wq; /* can be used to wait for
+ refcount drop to zero */
+ wwn_t wwnn; /* WWNN */
+ wwn_t wwpn; /* WWPN */
+ fc_id_t s_id; /* N_Port ID */
+ struct ccw_device *ccw_device; /* S/390 ccw device */
+ u8 fc_service_class;
+ u32 fc_topology; /* FC topology */
+ u32 fc_link_speed; /* FC interface speed */
+ u32 hydra_version; /* Hydra version */
+ u32 fsf_lic_version;
+ u32 supported_features;/* of FCP channel */
+ u32 hardware_version; /* of FCP channel */
+ u8 serial_number[32]; /* of hardware */
+ struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
+ unsigned short scsi_host_no; /* Assigned host number */
+ unsigned char name[9];
+ struct list_head port_list_head; /* remote port list */
+ struct list_head port_remove_lh; /* head of ports to be
+ removed */
+ u32 ports; /* number of remote ports */
+ struct timer_list scsi_er_timer; /* SCSI err recovery watch */
+ struct list_head fsf_req_list_head; /* head of FSF req list */
+ rwlock_t fsf_req_list_lock; /* lock for ops on list of
+ FSF requests */
+ atomic_t fsf_reqs_active; /* # active FSF reqs */
+ struct zfcp_qdio_queue request_queue; /* request queue */
+ u32 fsf_req_seq_no; /* FSF cmnd seq number */
+ wait_queue_head_t request_wq; /* can be used to wait for
+ more avaliable SBALs */
+ struct zfcp_qdio_queue response_queue; /* response queue */
+ rwlock_t abort_lock; /* Protects against SCSI
+ stack abort/command
+ completion races */
+ u16 status_read_failed; /* # failed status reads */
+ atomic_t status; /* status of this adapter */
+ struct list_head erp_ready_head; /* error recovery for this
+ adapter/devices */
+ struct list_head erp_running_head;
+ rwlock_t erp_lock;
+ struct semaphore erp_ready_sem;
+ wait_queue_head_t erp_thread_wqh;
+ wait_queue_head_t erp_done_wqh;
+ struct zfcp_erp_action erp_action; /* pending error recovery */
+ atomic_t erp_counter;
+ u32 erp_total_count; /* total nr of enqueued erp
+ actions */
+ u32 erp_low_mem_count; /* nr of erp actions waiting
+ for memory */
+ struct zfcp_port *nameserver_port; /* adapter's nameserver */
+ debug_info_t *erp_dbf; /* S/390 debug features */
+ debug_info_t *abort_dbf;
+ debug_info_t *in_els_dbf;
+ debug_info_t *cmd_dbf;
+ spinlock_t dbf_lock;
+ struct zfcp_adapter_mempool pool; /* Adapter memory pools */
+ struct qdio_initialize qdio_init_data; /* for qdio_establish */
+ struct device generic_services; /* directory for WKA ports */
+};
+
+/*
+ * the struct device sysfs_device must be at the beginning of this structure.
+ * pointer to struct device is used to free port structure in release function
+ * of the device. don't change!
+ */
+struct zfcp_port {
+ struct device sysfs_device; /* sysfs device */
+ struct list_head list; /* list of remote ports */
+ atomic_t refcount; /* reference count */
+ wait_queue_head_t remove_wq; /* can be used to wait for
+ refcount drop to zero */
+ struct zfcp_adapter *adapter; /* adapter used to access port */
+ struct list_head unit_list_head; /* head of logical unit list */
+ struct list_head unit_remove_lh; /* head of luns to be removed
+ list */
+ u32 units; /* # of logical units in list */
+ atomic_t status; /* status of this remote port */
+ scsi_id_t scsi_id; /* own SCSI ID */
+ wwn_t wwnn; /* WWNN if known */
+ wwn_t wwpn; /* WWPN */
+ fc_id_t d_id; /* D_ID */
+ u32 handle; /* handle assigned by FSF */
+ struct zfcp_erp_action erp_action; /* pending error recovery */
+ atomic_t erp_counter;
+};
+
+/* the struct device sysfs_device must be at the beginning of this structure.
+ * pointer to struct device is used to free unit structure in release function
+ * of the device. don't change!
+ */
+struct zfcp_unit {
+ struct device sysfs_device; /* sysfs device */
+ struct list_head list; /* list of logical units */
+ atomic_t refcount; /* reference count */
+ wait_queue_head_t remove_wq; /* can be used to wait for
+ refcount drop to zero */
+ struct zfcp_port *port; /* remote port of unit */
+ atomic_t status; /* status of this logical unit */
+ scsi_lun_t scsi_lun; /* own SCSI LUN */
+ fcp_lun_t fcp_lun; /* own FCP_LUN */
+ u32 handle; /* handle assigned by FSF */
+ struct scsi_device *device; /* scsi device struct pointer */
+ struct zfcp_erp_action erp_action; /* pending error recovery */
+ atomic_t erp_counter;
+};
+
+/* FSF request */
+struct zfcp_fsf_req {
+ struct list_head list; /* list of FSF requests */
+ struct zfcp_adapter *adapter; /* adapter request belongs to */
+ u8 sbal_number; /* nr of SBALs free for use */
+ u8 sbal_first; /* first SBAL for this request */
+ u8 sbal_last; /* last possible SBAL for
+ this reuest */
+ u8 sbal_curr; /* current SBAL during creation
+ of request */
+ u8 sbale_curr; /* current SBALE during creation
+ of request */
+ wait_queue_head_t completion_wq; /* can be used by a routine
+ to wait for completion */
+ volatile u32 status; /* status of this request */
+ u32 fsf_command; /* FSF Command copy */
+ struct fsf_qtcb *qtcb; /* address of associated QTCB */
+ u32 seq_no; /* Sequence number of request */
+ union zfcp_req_data data; /* Info fields of request */
+ struct zfcp_erp_action *erp_action; /* used if this request is
+ issued on behalf of erp */
+ mempool_t *pool; /* used if request was alloacted
+ from emergency pool */
+};
+
+typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
+
+/* driver data */
+struct zfcp_data {
+ struct scsi_host_template scsi_host_template;
+ atomic_t status; /* Module status flags */
+ struct list_head adapter_list_head; /* head of adapter list */
+ struct list_head adapter_remove_lh; /* head of adapters to be
+ removed */
+ rwlock_t status_read_lock; /* for status read thread */
+ struct list_head status_read_receive_head;
+ struct list_head status_read_send_head;
+ struct semaphore status_read_sema;
+ wait_queue_head_t status_read_thread_wqh;
+ u32 adapters; /* # of adapters in list */
+ rwlock_t config_lock; /* serialises changes
+ to adapter/port/unit
+ lists */
+ struct semaphore config_sema; /* serialises configuration
+ changes */
+ atomic_t loglevel; /* current loglevel */
+ char init_busid[BUS_ID_SIZE];
+ wwn_t init_wwpn;
+ fcp_lun_t init_fcp_lun;
+ char *driver_version;
+};
+
+/**
+ * struct zfcp_sg_list - struct describing a scatter-gather list
+ * @sg: pointer to array of (struct scatterlist)
+ * @count: number of elements in scatter-gather list
+ */
+struct zfcp_sg_list {
+ struct scatterlist *sg;
+ unsigned int count;
+};
+
+/* number of elements for various memory pools */
+#define ZFCP_POOL_FSF_REQ_ERP_NR 1
+#define ZFCP_POOL_FSF_REQ_SCSI_NR 1
+#define ZFCP_POOL_FSF_REQ_ABORT_NR 1
+#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM
+#define ZFCP_POOL_DATA_GID_PN_NR 1
+
+/* struct used by memory pools for fsf_requests */
+struct zfcp_fsf_req_pool_element {
+ struct zfcp_fsf_req fsf_req;
+ struct fsf_qtcb qtcb;
+};
+
+/********************** ZFCP SPECIFIC DEFINES ********************************/
+
+#define ZFCP_FSFREQ_CLEANUP_TIMEOUT HZ/10
+
+#define ZFCP_KNOWN 0x00000001
+#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
+#define ZFCP_WAIT_FOR_SBAL 0x00000004
+#define ZFCP_REQ_NO_QTCB 0x00000008
+
+#define ZFCP_SET 0x00000100
+#define ZFCP_CLEAR 0x00000200
+
+#define ZFCP_INTERRUPTIBLE 1
+#define ZFCP_UNINTERRUPTIBLE 0
+
+#ifndef atomic_test_mask
+#define atomic_test_mask(mask, target) \
+ ((atomic_read(target) & mask) == mask)
+#endif
+
+extern void _zfcp_hex_dump(char *, int);
+#define ZFCP_HEX_DUMP(level, addr, count) \
+ if (ZFCP_LOG_CHECK(level)) { \
+ _zfcp_hex_dump(addr, count); \
+ }
+
+#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
+#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
+#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
+
+/*
+ * functions needed for reference/usage counting
+ */
+
+static inline void
+zfcp_unit_get(struct zfcp_unit *unit)
+{
+ atomic_inc(&unit->refcount);
+}
+
+static inline void
+zfcp_unit_put(struct zfcp_unit *unit)
+{
+ if (atomic_dec_return(&unit->refcount) == 0)
+ wake_up(&unit->remove_wq);
+}
+
+static inline void
+zfcp_unit_wait(struct zfcp_unit *unit)
+{
+ wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
+}
+
+static inline void
+zfcp_port_get(struct zfcp_port *port)
+{
+ atomic_inc(&port->refcount);
+}
+
+static inline void
+zfcp_port_put(struct zfcp_port *port)
+{
+ if (atomic_dec_return(&port->refcount) == 0)
+ wake_up(&port->remove_wq);
+}
+
+static inline void
+zfcp_port_wait(struct zfcp_port *port)
+{
+ wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
+}
+
+static inline void
+zfcp_adapter_get(struct zfcp_adapter *adapter)
+{
+ atomic_inc(&adapter->refcount);
+}
+
+static inline void
+zfcp_adapter_put(struct zfcp_adapter *adapter)
+{
+ if (atomic_dec_return(&adapter->refcount) == 0)
+ wake_up(&adapter->remove_wq);
+}
+
+static inline void
+zfcp_adapter_wait(struct zfcp_adapter *adapter)
+{
+ wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
+}
+
+#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
new file mode 100644
index 000000000000..cfc0d8c588df
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -0,0 +1,3585 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_erp.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
+
+#define ZFCP_ERP_REVISION "$Revision: 1.86 $"
+
+#include "zfcp_ext.h"
+
+static int zfcp_erp_adisc(struct zfcp_adapter *, fc_id_t);
+static void zfcp_erp_adisc_handler(unsigned long);
+
+static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int);
+static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int);
+static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int);
+static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int);
+
+static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int);
+static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int);
+
+static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
+static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
+static void zfcp_erp_port_block(struct zfcp_port *, int);
+static void zfcp_erp_port_unblock(struct zfcp_port *);
+static void zfcp_erp_unit_block(struct zfcp_unit *, int);
+static void zfcp_erp_unit_unblock(struct zfcp_unit *);
+
+static int zfcp_erp_thread(void *);
+
+static int zfcp_erp_strategy(struct zfcp_erp_action *);
+
+static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *);
+static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *);
+static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int);
+static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int);
+static int zfcp_erp_strategy_check_port(struct zfcp_port *, int);
+static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
+static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
+ struct zfcp_port *,
+ struct zfcp_unit *, int);
+static inline int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
+static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
+ struct zfcp_port *,
+ struct zfcp_unit *, int);
+static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *);
+static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
+
+static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
+static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
+static int zfcp_erp_adapter_strategy_open_fsf_statusread(
+ struct zfcp_erp_action *);
+
+static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *);
+static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *);
+
+static int zfcp_erp_port_strategy(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *);
+static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open_nameserver_wakeup(
+ struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *);
+static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *);
+
+static int zfcp_erp_unit_strategy(struct zfcp_erp_action *);
+static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
+static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
+static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
+
+static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
+static int zfcp_erp_action_dismiss_port(struct zfcp_port *);
+static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
+static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
+
+static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
+ struct zfcp_port *, struct zfcp_unit *);
+static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
+static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
+ struct zfcp_port *, struct zfcp_unit *,
+ int);
+
+static void zfcp_erp_action_ready(struct zfcp_erp_action *);
+static int zfcp_erp_action_exists(struct zfcp_erp_action *);
+
+static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
+static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
+
+static void zfcp_erp_memwait_handler(unsigned long);
+static void zfcp_erp_timeout_handler(unsigned long);
+static inline void zfcp_erp_timeout_init(struct zfcp_erp_action *);
+
+/**
+ * zfcp_fsf_request_timeout_handler - called if a request timed out
+ * @data: pointer to adapter for handler function
+ *
+ * This function needs to be called if requests (ELS, Generic Service,
+ * or SCSI commands) exceed a certain time limit. The assumption is
+ * that after the time limit the adapter get stuck. So we trigger a reopen of
+ * the adapter. This should not be used for error recovery, SCSI abort
+ * commands and SCSI requests from SCSI mid-layer.
+ */
+void
+zfcp_fsf_request_timeout_handler(unsigned long data)
+{
+ struct zfcp_adapter *adapter;
+
+ adapter = (struct zfcp_adapter *) data;
+
+ zfcp_erp_adapter_reopen(adapter, 0);
+}
+
+/*
+ * function: zfcp_fsf_scsi_er_timeout_handler
+ *
+ * purpose: This function needs to be called whenever a SCSI error recovery
+ * action (abort/reset) does not return.
+ * Re-opening the adapter means that the command can be returned
+ * by zfcp (it is guarranteed that it does not return via the
+ * adapter anymore). The buffer can then be used again.
+ *
+ * returns: sod all
+ */
+void
+zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
+{
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+
+ ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
+ "Restarting all operations on the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
+ zfcp_erp_adapter_reopen(adapter, 0);
+
+ return;
+}
+
+/*
+ * function:
+ *
+ * purpose: called if an adapter failed,
+ * initiates adapter recovery which is done
+ * asynchronously
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+int
+zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
+{
+ int retval;
+
+ debug_text_event(adapter->erp_dbf, 5, "a_ro");
+ ZFCP_LOG_DEBUG("reopen adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+
+ zfcp_erp_adapter_block(adapter, clear_mask);
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
+ ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 5, "a_ro_f");
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_adapter_failed(adapter);
+ retval = -EIO;
+ goto out;
+ }
+ retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+ adapter, NULL, NULL);
+
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
+ * used to ensure the correct locking
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+int
+zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask)
+{
+ int retval;
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+ retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask);
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask)
+{
+ int retval;
+
+ retval = zfcp_erp_adapter_reopen(adapter,
+ ZFCP_STATUS_COMMON_RUNNING |
+ ZFCP_STATUS_COMMON_ERP_FAILED |
+ clear_mask);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask)
+{
+ int retval;
+
+ retval = zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_RUNNING |
+ ZFCP_STATUS_COMMON_ERP_FAILED |
+ clear_mask);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
+{
+ int retval;
+
+ retval = zfcp_erp_unit_reopen(unit,
+ ZFCP_STATUS_COMMON_RUNNING |
+ ZFCP_STATUS_COMMON_ERP_FAILED |
+ clear_mask);
+
+ return retval;
+}
+
+
+/**
+ * zfcp_erp_adisc - send ADISC ELS command
+ * @adapter: adapter structure
+ * @d_id: d_id of port where ADISC is sent to
+ */
+int
+zfcp_erp_adisc(struct zfcp_adapter *adapter, fc_id_t d_id)
+{
+ struct zfcp_send_els *send_els;
+ struct zfcp_ls_adisc *adisc;
+ void *address = NULL;
+ int retval = 0;
+ struct timer_list *timer;
+
+ send_els = kmalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
+ if (send_els == NULL)
+ goto nomem;
+ memset(send_els, 0, sizeof(*send_els));
+
+ send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
+ if (send_els->req == NULL)
+ goto nomem;
+ memset(send_els->req, 0, sizeof(*send_els->req));
+
+ send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
+ if (send_els->resp == NULL)
+ goto nomem;
+ memset(send_els->resp, 0, sizeof(*send_els->resp));
+
+ address = (void *) get_zeroed_page(GFP_ATOMIC);
+ if (address == NULL)
+ goto nomem;
+
+ zfcp_address_to_sg(address, send_els->req);
+ address += PAGE_SIZE >> 1;
+ zfcp_address_to_sg(address, send_els->resp);
+ send_els->req_count = send_els->resp_count = 1;
+
+ send_els->adapter = adapter;
+ send_els->d_id = d_id;
+ send_els->handler = zfcp_erp_adisc_handler;
+ send_els->handler_data = (unsigned long) send_els;
+
+ adisc = zfcp_sg_to_address(send_els->req);
+ send_els->ls_code = adisc->code = ZFCP_LS_ADISC;
+
+ send_els->req->length = sizeof(struct zfcp_ls_adisc);
+ send_els->resp->length = sizeof(struct zfcp_ls_adisc_acc);
+
+ /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
+ without FC-AL-2 capability, so we don't set it */
+ adisc->wwpn = adapter->wwpn;
+ adisc->wwnn = adapter->wwnn;
+ adisc->nport_id = adapter->s_id;
+ ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
+ "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
+ "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
+ adapter->s_id, d_id, (wwn_t) adisc->wwpn,
+ (wwn_t) adisc->wwnn, adisc->hard_nport_id,
+ adisc->nport_id);
+
+ timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
+ if (!timer)
+ goto nomem;
+
+ init_timer(timer);
+ timer->function = zfcp_fsf_request_timeout_handler;
+ timer->data = (unsigned long) adapter;
+ timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
+ send_els->timer = timer;
+
+ retval = zfcp_fsf_send_els(send_els);
+ if (retval != 0) {
+ ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
+ "0x%08x on adapter %s\n", d_id,
+ zfcp_get_busid_by_adapter(adapter));
+ del_timer(send_els->timer);
+ goto freemem;
+ }
+
+ goto out;
+
+ nomem:
+ retval = -ENOMEM;
+ freemem:
+ if (address != NULL)
+ __free_pages(send_els->req->page, 0);
+ if (send_els != NULL) {
+ kfree(send_els->timer);
+ kfree(send_els->req);
+ kfree(send_els->resp);
+ kfree(send_els);
+ }
+ out:
+ return retval;
+}
+
+
+/**
+ * zfcp_erp_adisc_handler - handler for ADISC ELS command
+ * @data: pointer to struct zfcp_send_els
+ *
+ * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
+ */
+void
+zfcp_erp_adisc_handler(unsigned long data)
+{
+ struct zfcp_send_els *send_els;
+ struct zfcp_port *port;
+ struct zfcp_adapter *adapter;
+ fc_id_t d_id;
+ struct zfcp_ls_adisc_acc *adisc;
+
+ send_els = (struct zfcp_send_els *) data;
+
+ del_timer(send_els->timer);
+
+ adapter = send_els->adapter;
+ d_id = send_els->d_id;
+
+ read_lock(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_did(send_els->adapter, send_els->d_id);
+ read_unlock(&zfcp_data.config_lock);
+
+ BUG_ON(port == NULL);
+
+ /* request rejected or timed out */
+ if (send_els->status != 0) {
+ ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
+ "force physical port reopen "
+ "(adapter %s, port d_id=0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ debug_text_event(adapter->erp_dbf, 3, "forcreop");
+ if (zfcp_erp_port_forced_reopen(port, 0))
+ ZFCP_LOG_NORMAL("failed reopen of port "
+ "(adapter %s, wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_port(port),
+ port->wwpn);
+ goto out;
+ }
+
+ adisc = zfcp_sg_to_address(send_els->resp);
+
+ ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
+ "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
+ "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
+ d_id, adapter->s_id, (wwn_t) adisc->wwpn,
+ (wwn_t) adisc->wwnn, adisc->hard_nport_id,
+ adisc->nport_id);
+
+ /* set wwnn for port */
+ if (port->wwnn == 0)
+ port->wwnn = adisc->wwnn;
+
+ if (port->wwpn != adisc->wwpn) {
+ ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
+ "port (adapter %s, wwpn=0x%016Lx, "
+ "adisc_resp_wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_port(port),
+ port->wwpn, (wwn_t) adisc->wwpn);
+ if (zfcp_erp_port_reopen(port, 0))
+ ZFCP_LOG_NORMAL("failed reopen of port "
+ "(adapter %s, wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_port(port),
+ port->wwpn);
+ }
+
+ out:
+ zfcp_port_put(port);
+ __free_pages(send_els->req->page, 0);
+ kfree(send_els->timer);
+ kfree(send_els->req);
+ kfree(send_els->resp);
+ kfree(send_els);
+}
+
+
+/**
+ * zfcp_test_link - lightweight link test procedure
+ * @port: port to be tested
+ *
+ * Test status of a link to a remote port using the ELS command ADISC.
+ */
+int
+zfcp_test_link(struct zfcp_port *port)
+{
+ int retval;
+
+ zfcp_port_get(port);
+ retval = zfcp_erp_adisc(port->adapter, port->d_id);
+ if (retval != 0) {
+ zfcp_port_put(port);
+ ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
+ "on adapter %s\n ", port->wwpn,
+ zfcp_get_busid_by_port(port));
+ retval = zfcp_erp_port_forced_reopen(port, 0);
+ if (retval != 0) {
+ ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
+ "on adapter %s failed\n", port->wwpn,
+ zfcp_get_busid_by_port(port));
+ retval = -EPERM;
+ }
+ }
+
+ return retval;
+}
+
+
+/*
+ * function:
+ *
+ * purpose: called if a port failed to be opened normally
+ * initiates Forced Reopen recovery which is done
+ * asynchronously
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+static int
+zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
+{
+ int retval;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "pf_ro");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+
+ ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+
+ zfcp_erp_port_block(port, clear_mask);
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
+ ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx "
+ "on adapter %s\n", port->wwpn,
+ zfcp_get_busid_by_port(port));
+ debug_text_event(adapter->erp_dbf, 5, "pf_ro_f");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ retval = -EIO;
+ goto out;
+ }
+
+ retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+ port->adapter, port, NULL);
+
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
+ * used to ensure the correct locking
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+int
+zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
+{
+ int retval;
+ unsigned long flags;
+ struct zfcp_adapter *adapter;
+
+ adapter = port->adapter;
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+ retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask);
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: called if a port is to be opened
+ * initiates Reopen recovery which is done
+ * asynchronously
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+static int
+zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask)
+{
+ int retval;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "p_ro");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+
+ ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+
+ zfcp_erp_port_block(port, clear_mask);
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
+ ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
+ "on adapter %s\n", port->wwpn,
+ zfcp_get_busid_by_port(port));
+ debug_text_event(adapter->erp_dbf, 5, "p_ro_f");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_port_failed(port);
+ retval = -EIO;
+ goto out;
+ }
+
+ retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+ port->adapter, port, NULL);
+
+ out:
+ return retval;
+}
+
+/**
+ * zfcp_erp_port_reopen - initiate reopen of a remote port
+ * @port: port to be reopened
+ * @clear_mask: specifies flags in port status to be cleared
+ * Return: 0 on success, < 0 on error
+ *
+ * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures
+ * correct locking. An error recovery task is initiated to do the reopen.
+ * To wait for the completion of the reopen zfcp_erp_wait should be used.
+ */
+int
+zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
+{
+ int retval;
+ unsigned long flags;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+ retval = zfcp_erp_port_reopen_internal(port, clear_mask);
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: called if a unit is to be opened
+ * initiates Reopen recovery which is done
+ * asynchronously
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+static int
+zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask)
+{
+ int retval;
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "u_ro");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
+ ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx "
+ "on adapter %s\n", unit->fcp_lun,
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+
+ zfcp_erp_unit_block(unit, clear_mask);
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
+ ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx "
+ "on port 0x%016Lx on adapter %s\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ debug_text_event(adapter->erp_dbf, 5, "u_ro_f");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ retval = -EIO;
+ goto out;
+ }
+
+ retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
+ unit->port->adapter, unit->port, unit);
+ out:
+ return retval;
+}
+
+/**
+ * zfcp_erp_unit_reopen - initiate reopen of a unit
+ * @unit: unit to be reopened
+ * @clear_mask: specifies flags in unit status to be cleared
+ * Return: 0 on success, < 0 on error
+ *
+ * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct
+ * locking. An error recovery task is initiated to do the reopen.
+ * To wait for the completion of the reopen zfcp_erp_wait should be used.
+ */
+int
+zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
+{
+ int retval;
+ unsigned long flags;
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+
+ port = unit->port;
+ adapter = port->adapter;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+ retval = zfcp_erp_unit_reopen_internal(unit, clear_mask);
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: disable I/O,
+ * return any open requests and clean them up,
+ * aim: no pending and incoming I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
+{
+ debug_text_event(adapter->erp_dbf, 6, "a_bl");
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_UNBLOCKED |
+ clear_mask, ZFCP_CLEAR);
+}
+
+/*
+ * function:
+ *
+ * purpose: enable I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
+{
+ debug_text_event(adapter->erp_dbf, 6, "a_ubl");
+ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+}
+
+/*
+ * function:
+ *
+ * purpose: disable I/O,
+ * return any open requests and clean them up,
+ * aim: no pending and incoming I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
+{
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "p_bl");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ zfcp_erp_modify_port_status(port,
+ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
+ ZFCP_CLEAR);
+}
+
+/*
+ * function:
+ *
+ * purpose: enable I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_port_unblock(struct zfcp_port *port)
+{
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "p_ubl");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+}
+
+/*
+ * function:
+ *
+ * purpose: disable I/O,
+ * return any open requests and clean them up,
+ * aim: no pending and incoming I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "u_bl");
+ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
+ zfcp_erp_modify_unit_status(unit,
+ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
+ ZFCP_CLEAR);
+}
+
+/*
+ * function:
+ *
+ * purpose: enable I/O
+ *
+ * returns:
+ */
+static void
+zfcp_erp_unit_unblock(struct zfcp_unit *unit)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "u_ubl");
+ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
+ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static void
+zfcp_erp_action_ready(struct zfcp_erp_action *erp_action)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 4, "a_ar");
+ debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
+
+ zfcp_erp_action_to_ready(erp_action);
+ up(&adapter->erp_ready_sem);
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: <0 erp_action not found in any list
+ * ZFCP_ERP_ACTION_READY erp_action is in ready list
+ * ZFCP_ERP_ACTION_RUNNING erp_action is in running list
+ *
+ * locks: erp_lock must be held
+ */
+static int
+zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
+{
+ int retval = -EINVAL;
+ struct list_head *entry;
+ struct zfcp_erp_action *entry_erp_action;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ /* search in running list */
+ list_for_each(entry, &adapter->erp_running_head) {
+ entry_erp_action =
+ list_entry(entry, struct zfcp_erp_action, list);
+ if (entry_erp_action == erp_action) {
+ retval = ZFCP_ERP_ACTION_RUNNING;
+ goto out;
+ }
+ }
+ /* search in ready list */
+ list_for_each(entry, &adapter->erp_ready_head) {
+ entry_erp_action =
+ list_entry(entry, struct zfcp_erp_action, list);
+ if (entry_erp_action == erp_action) {
+ retval = ZFCP_ERP_ACTION_READY;
+ goto out;
+ }
+ }
+
+ out:
+ return retval;
+}
+
+/*
+ * purpose: checks current status of action (timed out, dismissed, ...)
+ * and does appropriate preparations (dismiss fsf request, ...)
+ *
+ * locks: called under erp_lock (disabled interrupts)
+ *
+ * returns: 0
+ */
+static int
+zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
+{
+ int retval = 0;
+ struct zfcp_fsf_req *fsf_req;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ if (erp_action->fsf_req) {
+ /* take lock to ensure that request is not being deleted meanwhile */
+ write_lock(&adapter->fsf_req_list_lock);
+ /* check whether fsf req does still exist */
+ list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
+ if (fsf_req == erp_action->fsf_req)
+ break;
+ if (fsf_req == erp_action->fsf_req) {
+ /* fsf_req still exists */
+ debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
+ debug_event(adapter->erp_dbf, 3, &fsf_req,
+ sizeof (unsigned long));
+ /* dismiss fsf_req of timed out or dismissed erp_action */
+ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
+ ZFCP_STATUS_ERP_TIMEDOUT)) {
+ debug_text_event(adapter->erp_dbf, 3,
+ "a_ca_disreq");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ }
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ ZFCP_LOG_NORMAL("error: erp step timed out "
+ "(action=%d, fsf_req=%p)\n ",
+ erp_action->action,
+ erp_action->fsf_req);
+ }
+ /*
+ * If fsf_req is neither dismissed nor completed
+ * then keep it running asynchronously and don't mess
+ * with the association of erp_action and fsf_req.
+ */
+ if (fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
+ ZFCP_STATUS_FSFREQ_DISMISSED)) {
+ /* forget about association between fsf_req
+ and erp_action */
+ fsf_req->erp_action = NULL;
+ erp_action->fsf_req = NULL;
+ }
+ } else {
+ debug_text_event(adapter->erp_dbf, 3, "a_ca_gonereq");
+ /*
+ * even if this fsf_req has gone, forget about
+ * association between erp_action and fsf_req
+ */
+ erp_action->fsf_req = NULL;
+ }
+ write_unlock(&adapter->fsf_req_list_lock);
+ } else
+ debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
+
+ return retval;
+}
+
+/*
+ * purpose: generic handler for asynchronous events related to erp_action events
+ * (normal completion, time-out, dismissing, retry after
+ * low memory condition)
+ *
+ * note: deletion of timer is not required (e.g. in case of a time-out),
+ * but a second try does no harm,
+ * we leave it in here to allow for greater simplification
+ *
+ * returns: 0 - there was an action to handle
+ * !0 - otherwise
+ */
+static int
+zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
+ unsigned long set_mask)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
+ debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
+ debug_event(adapter->erp_dbf, 2, &erp_action->action,
+ sizeof (int));
+ if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
+ del_timer(&erp_action->timer);
+ erp_action->status |= set_mask;
+ zfcp_erp_action_ready(erp_action);
+ retval = 0;
+ } else {
+ /* action is ready or gone - nothing to do */
+ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
+ debug_event(adapter->erp_dbf, 3, &erp_action->action,
+ sizeof (int));
+ retval = 1;
+ }
+
+ return retval;
+}
+
+/*
+ * purpose: generic handler for asynchronous events related to erp_action
+ * events (normal completion, time-out, dismissing, retry after
+ * low memory condition)
+ *
+ * note: deletion of timer is not required (e.g. in case of a time-out),
+ * but a second try does no harm,
+ * we leave it in here to allow for greater simplification
+ *
+ * returns: 0 - there was an action to handle
+ * !0 - otherwise
+ */
+int
+zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
+ unsigned long set_mask)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ unsigned long flags;
+ int retval;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ retval = zfcp_erp_async_handler_nolock(erp_action, set_mask);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+ return retval;
+}
+
+/*
+ * purpose: is called for erp_action which was slept waiting for
+ * memory becoming avaliable,
+ * will trigger that this action will be continued
+ */
+static void
+zfcp_erp_memwait_handler(unsigned long data)
+{
+ struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 2, "a_mwh");
+ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
+
+ zfcp_erp_async_handler(erp_action, 0);
+}
+
+/*
+ * purpose: is called if an asynchronous erp step timed out,
+ * action gets an appropriate flag and will be processed
+ * accordingly
+ */
+static void
+zfcp_erp_timeout_handler(unsigned long data)
+{
+ struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 2, "a_th");
+ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
+
+ zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
+}
+
+/*
+ * purpose: is called for an erp_action which needs to be ended
+ * though not being done,
+ * this is usually required if an higher is generated,
+ * action gets an appropriate flag and will be processed
+ * accordingly
+ *
+ * locks: erp_lock held (thus we need to call another handler variant)
+ */
+static int
+zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 2, "a_adis");
+ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
+
+ zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
+
+ return 0;
+}
+
+int
+zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+
+ rwlock_init(&adapter->erp_lock);
+ INIT_LIST_HEAD(&adapter->erp_ready_head);
+ INIT_LIST_HEAD(&adapter->erp_running_head);
+ sema_init(&adapter->erp_ready_sem, 0);
+
+ retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
+ if (retval < 0) {
+ ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
+ "adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 5, "a_thset_fail");
+ } else {
+ wait_event(adapter->erp_thread_wqh,
+ atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
+ &adapter->status));
+ debug_text_event(adapter->erp_dbf, 5, "a_thset_ok");
+ }
+
+ return (retval < 0);
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * context: process (i.e. proc-fs or rmmod/insmod)
+ *
+ * note: The caller of this routine ensures that the specified
+ * adapter has been shut down and that this operation
+ * has been completed. Thus, there are no pending erp_actions
+ * which would need to be handled here.
+ */
+int
+zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
+ up(&adapter->erp_ready_sem);
+
+ wait_event(adapter->erp_thread_wqh,
+ !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
+ &adapter->status));
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
+ &adapter->status);
+
+ debug_text_event(adapter->erp_dbf, 5, "a_thki_ok");
+
+ return retval;
+}
+
+/*
+ * purpose: is run as a kernel thread,
+ * goes through list of error recovery actions of associated adapter
+ * and delegates single action to execution
+ *
+ * returns: 0
+ */
+static int
+zfcp_erp_thread(void *data)
+{
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ struct list_head *next;
+ struct zfcp_erp_action *erp_action;
+ unsigned long flags;
+
+ daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter));
+ /* Block all signals */
+ siginitsetinv(&current->blocked, 0);
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+ debug_text_event(adapter->erp_dbf, 5, "a_th_run");
+ wake_up(&adapter->erp_thread_wqh);
+
+ while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
+ &adapter->status)) {
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ next = adapter->erp_ready_head.prev;
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+ if (next != &adapter->erp_ready_head) {
+ erp_action =
+ list_entry(next, struct zfcp_erp_action, list);
+ /*
+ * process action (incl. [re]moving it
+ * from 'ready' queue)
+ */
+ zfcp_erp_strategy(erp_action);
+ }
+
+ /*
+ * sleep as long as there is nothing to do, i.e.
+ * no action in 'ready' queue to be processed and
+ * thread is not to be killed
+ */
+ down_interruptible(&adapter->erp_ready_sem);
+ debug_text_event(adapter->erp_dbf, 5, "a_th_woken");
+ }
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
+ debug_text_event(adapter->erp_dbf, 5, "a_th_stop");
+ wake_up(&adapter->erp_thread_wqh);
+
+ return 0;
+}
+
+/*
+ * function:
+ *
+ * purpose: drives single error recovery action and schedules higher and
+ * subordinate actions, if necessary
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_SUCCEEDED - action finished successfully (deqd)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully (deqd)
+ * ZFCP_ERP_EXIT - action finished (dequeued), offline
+ * ZFCP_ERP_DISMISSED - action canceled (dequeued)
+ */
+static int
+zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+ struct zfcp_unit *unit = erp_action->unit;
+ int action = erp_action->action;
+ u32 status = erp_action->status;
+ unsigned long flags;
+
+ /* serialise dismissing, timing out, moving, enqueueing */
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+
+ /* dequeue dismissed action and leave, if required */
+ retval = zfcp_erp_strategy_check_action(erp_action, retval);
+ if (retval == ZFCP_ERP_DISMISSED) {
+ debug_text_event(adapter->erp_dbf, 4, "a_st_dis1");
+ goto unlock;
+ }
+
+ /*
+ * move action to 'running' queue before processing it
+ * (to avoid a race condition regarding moving the
+ * action to the 'running' queue and back)
+ */
+ zfcp_erp_action_to_running(erp_action);
+
+ /*
+ * try to process action as far as possible,
+ * no lock to allow for blocking operations (kmalloc, qdio, ...),
+ * afterwards the lock is required again for the following reasons:
+ * - dequeueing of finished action and enqueueing of
+ * follow-up actions must be atomic so that any other
+ * reopen-routine does not believe there is nothing to do
+ * and that it is safe to enqueue something else,
+ * - we want to force any control thread which is dismissing
+ * actions to finish this before we decide about
+ * necessary steps to be taken here further
+ */
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ retval = zfcp_erp_strategy_do_action(erp_action);
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+
+ /*
+ * check for dismissed status again to avoid follow-up actions,
+ * failing of targets and so on for dismissed actions
+ */
+ retval = zfcp_erp_strategy_check_action(erp_action, retval);
+
+ switch (retval) {
+ case ZFCP_ERP_DISMISSED:
+ /* leave since this action has ridden to its ancestors */
+ debug_text_event(adapter->erp_dbf, 6, "a_st_dis2");
+ goto unlock;
+ case ZFCP_ERP_NOMEM:
+ /* no memory to continue immediately, let it sleep */
+ if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
+ ++adapter->erp_low_mem_count;
+ erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
+ }
+ /* This condition is true if there is no memory available
+ for any erp_action on this adapter. This implies that there
+ are no elements in the memory pool(s) left for erp_actions.
+ This might happen if an erp_action that used a memory pool
+ element was timed out.
+ */
+ if (adapter->erp_total_count == adapter->erp_low_mem_count) {
+ debug_text_event(adapter->erp_dbf, 3, "a_st_lowmem");
+ ZFCP_LOG_NORMAL("error: no mempool elements available, "
+ "restarting I/O on adapter %s "
+ "to free mempool\n",
+ zfcp_get_busid_by_adapter(adapter));
+ zfcp_erp_adapter_reopen_internal(adapter, 0);
+ } else {
+ debug_text_event(adapter->erp_dbf, 2, "a_st_memw");
+ retval = zfcp_erp_strategy_memwait(erp_action);
+ }
+ goto unlock;
+ case ZFCP_ERP_CONTINUES:
+ /* leave since this action runs asynchronously */
+ debug_text_event(adapter->erp_dbf, 6, "a_st_cont");
+ if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+ --adapter->erp_low_mem_count;
+ erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+ }
+ goto unlock;
+ }
+ /* ok, finished action (whatever its result is) */
+
+ /* check for unrecoverable targets */
+ retval = zfcp_erp_strategy_check_target(erp_action, retval);
+
+ /* action must be dequeued (here to allow for further ones) */
+ zfcp_erp_action_dequeue(erp_action);
+
+ /*
+ * put this target through the erp mill again if someone has
+ * requested to change the status of a target being online
+ * to offline or the other way around
+ * (old retval is preserved if nothing has to be done here)
+ */
+ retval = zfcp_erp_strategy_statechange(action, status, adapter,
+ port, unit, retval);
+
+ /*
+ * leave if target is in permanent error state or if
+ * action is repeated in order to process state change
+ */
+ if (retval == ZFCP_ERP_EXIT) {
+ debug_text_event(adapter->erp_dbf, 2, "a_st_exit");
+ goto unlock;
+ }
+
+ /* trigger follow up actions */
+ zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval);
+
+ unlock:
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ if (retval != ZFCP_ERP_CONTINUES)
+ zfcp_erp_action_cleanup(action, adapter, port, unit, retval);
+
+ /*
+ * a few tasks remain when the erp queues are empty
+ * (don't do that if the last action evaluated was dismissed
+ * since this clearly indicates that there is more to come) :
+ * - close the name server port if it is open yet
+ * (enqueues another [probably] final action)
+ * - otherwise, wake up whoever wants to be woken when we are
+ * done with erp
+ */
+ if (retval != ZFCP_ERP_DISMISSED)
+ zfcp_erp_strategy_check_queues(adapter);
+
+ debug_text_event(adapter->erp_dbf, 6, "a_st_done");
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_DISMISSED - if action has been dismissed
+ * retval - otherwise
+ */
+static int
+zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ zfcp_erp_strategy_check_fsfreq(erp_action);
+
+ debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
+ if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
+ debug_text_event(adapter->erp_dbf, 3, "a_stcd_dis");
+ zfcp_erp_action_dequeue(erp_action);
+ retval = ZFCP_ERP_DISMISSED;
+ } else
+ debug_text_event(adapter->erp_dbf, 5, "a_stcd_nodis");
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_FAILED;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ /*
+ * try to execute/continue action as far as possible,
+ * note: no lock in subsequent strategy routines
+ * (this allows these routine to call schedule, e.g.
+ * kmalloc with such flags or qdio_initialize & friends)
+ * Note: in case of timeout, the seperate strategies will fail
+ * anyhow. No need for a special action. Even worse, a nameserver
+ * failure would not wake up waiting ports without the call.
+ */
+ switch (erp_action->action) {
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ retval = zfcp_erp_adapter_strategy(erp_action);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ retval = zfcp_erp_port_forced_strategy(erp_action);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ retval = zfcp_erp_port_strategy(erp_action);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ retval = zfcp_erp_unit_strategy(erp_action);
+ break;
+
+ default:
+ debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug");
+ debug_event(adapter->erp_dbf, 1, &erp_action->action,
+ sizeof (int));
+ ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
+ "adapter %s (action=%d)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->action);
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: triggers retry of this action after a certain amount of time
+ * by means of timer provided by erp_action
+ *
+ * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
+ */
+static int
+zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_CONTINUES;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "a_mwinit");
+ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
+ init_timer(&erp_action->timer);
+ erp_action->timer.function = zfcp_erp_memwait_handler;
+ erp_action->timer.data = (unsigned long) erp_action;
+ erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT;
+ add_timer(&erp_action->timer);
+
+ return retval;
+}
+
+/*
+ * function: zfcp_erp_adapter_failed
+ *
+ * purpose: sets the adapter and all underlying devices to ERP_FAILED
+ *
+ */
+void
+zfcp_erp_adapter_failed(struct zfcp_adapter *adapter)
+{
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+ ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 2, "a_afail");
+}
+
+/*
+ * function: zfcp_erp_port_failed
+ *
+ * purpose: sets the port and all underlying devices to ERP_FAILED
+ *
+ */
+void
+zfcp_erp_port_failed(struct zfcp_port *port)
+{
+ zfcp_erp_modify_port_status(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+
+ if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
+ ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
+ "port d_id=0x%08x)\n",
+ zfcp_get_busid_by_port(port), port->d_id);
+ else
+ ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_port(port), port->wwpn);
+
+ debug_text_event(port->adapter->erp_dbf, 2, "p_pfail");
+ debug_event(port->adapter->erp_dbf, 2, &port->wwpn, sizeof (wwn_t));
+}
+
+/*
+ * function: zfcp_erp_unit_failed
+ *
+ * purpose: sets the unit to ERP_FAILED
+ *
+ */
+void
+zfcp_erp_unit_failed(struct zfcp_unit *unit)
+{
+ zfcp_erp_modify_unit_status(unit,
+ ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+
+ ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
+ " on adapter %s\n", unit->fcp_lun,
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ debug_text_event(unit->port->adapter->erp_dbf, 2, "u_ufail");
+ debug_event(unit->port->adapter->erp_dbf, 2,
+ &unit->fcp_lun, sizeof (fcp_lun_t));
+}
+
+/*
+ * function: zfcp_erp_strategy_check_target
+ *
+ * purpose: increments the erp action count on the device currently in
+ * recovery if the action failed or resets the count in case of
+ * success. If a maximum count is exceeded the device is marked
+ * as ERP_FAILED.
+ * The 'blocked' state of a target which has been recovered
+ * successfully is reset.
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (not considered)
+ * ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_EXIT - action failed and will not continue
+ */
+static int
+zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+ struct zfcp_unit *unit = erp_action->unit;
+
+ debug_text_event(adapter->erp_dbf, 5, "a_stct_norm");
+ debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
+ debug_event(adapter->erp_dbf, 5, &result, sizeof (int));
+
+ switch (erp_action->action) {
+
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ result = zfcp_erp_strategy_check_unit(unit, result);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ result = zfcp_erp_strategy_check_port(port, result);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ result = zfcp_erp_strategy_check_adapter(adapter, result);
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_statechange(int action,
+ u32 status,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct zfcp_unit *unit, int retval)
+{
+ debug_text_event(adapter->erp_dbf, 3, "a_stsc");
+ debug_event(adapter->erp_dbf, 3, &action, sizeof (int));
+
+ switch (action) {
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ if (zfcp_erp_strategy_statechange_detected(&adapter->status,
+ status)) {
+ zfcp_erp_adapter_reopen_internal(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+ retval = ZFCP_ERP_EXIT;
+ }
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if (zfcp_erp_strategy_statechange_detected(&port->status,
+ status)) {
+ zfcp_erp_port_reopen_internal(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+ retval = ZFCP_ERP_EXIT;
+ }
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ if (zfcp_erp_strategy_statechange_detected(&unit->status,
+ status)) {
+ zfcp_erp_unit_reopen_internal(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
+ retval = ZFCP_ERP_EXIT;
+ }
+ break;
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static inline int
+zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
+{
+ return
+ /* take it online */
+ (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
+ (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
+ /* take it offline */
+ (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
+ !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
+{
+ debug_text_event(unit->port->adapter->erp_dbf, 5, "u_stct");
+ debug_event(unit->port->adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+
+ switch (result) {
+ case ZFCP_ERP_SUCCEEDED :
+ atomic_set(&unit->erp_counter, 0);
+ zfcp_erp_unit_unblock(unit);
+ break;
+ case ZFCP_ERP_FAILED :
+ atomic_inc(&unit->erp_counter);
+ if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
+ zfcp_erp_unit_failed(unit);
+ break;
+ case ZFCP_ERP_EXIT :
+ /* nothing */
+ break;
+ }
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
+ zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
+ result = ZFCP_ERP_EXIT;
+ }
+
+ return result;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+{
+ debug_text_event(port->adapter->erp_dbf, 5, "p_stct");
+ debug_event(port->adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+
+ switch (result) {
+ case ZFCP_ERP_SUCCEEDED :
+ atomic_set(&port->erp_counter, 0);
+ zfcp_erp_port_unblock(port);
+ break;
+ case ZFCP_ERP_FAILED :
+ atomic_inc(&port->erp_counter);
+ if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
+ zfcp_erp_port_failed(port);
+ break;
+ case ZFCP_ERP_EXIT :
+ /* nothing */
+ break;
+ }
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
+ zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
+ result = ZFCP_ERP_EXIT;
+ }
+
+ return result;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
+{
+ debug_text_event(adapter->erp_dbf, 5, "a_stct");
+
+ switch (result) {
+ case ZFCP_ERP_SUCCEEDED :
+ atomic_set(&adapter->erp_counter, 0);
+ zfcp_erp_adapter_unblock(adapter);
+ break;
+ case ZFCP_ERP_FAILED :
+ atomic_inc(&adapter->erp_counter);
+ if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
+ zfcp_erp_adapter_failed(adapter);
+ break;
+ case ZFCP_ERP_EXIT :
+ /* nothing */
+ break;
+ }
+
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
+ zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
+ result = ZFCP_ERP_EXIT;
+ }
+
+ return result;
+}
+
+/*
+ * function:
+ *
+ * purpose: remaining things in good cases,
+ * escalation in bad cases
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_followup_actions(int action,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct zfcp_unit *unit, int status)
+{
+ debug_text_event(adapter->erp_dbf, 5, "a_stfol");
+ debug_event(adapter->erp_dbf, 5, &action, sizeof (int));
+
+ /* initiate follow-up actions depending on success of finished action */
+ switch (action) {
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ if (status == ZFCP_ERP_SUCCEEDED)
+ zfcp_erp_port_reopen_all_internal(adapter, 0);
+ else
+ zfcp_erp_adapter_reopen_internal(adapter, 0);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ if (status == ZFCP_ERP_SUCCEEDED)
+ zfcp_erp_port_reopen_internal(port, 0);
+ else
+ zfcp_erp_adapter_reopen_internal(adapter, 0);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if (status == ZFCP_ERP_SUCCEEDED)
+ zfcp_erp_unit_reopen_all_internal(port, 0);
+ else
+ zfcp_erp_port_forced_reopen_internal(port, 0);
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ if (status == ZFCP_ERP_SUCCEEDED) ; /* no further action */
+ else
+ zfcp_erp_port_reopen_internal(unit->port, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ read_lock(&adapter->erp_lock);
+ if (list_empty(&adapter->erp_ready_head) &&
+ list_empty(&adapter->erp_running_head)) {
+ debug_text_event(adapter->erp_dbf, 4, "a_cq_wake");
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+ &adapter->status);
+ wake_up(&adapter->erp_done_wqh);
+ } else
+ debug_text_event(adapter->erp_dbf, 5, "a_cq_notempty");
+ read_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return 0;
+}
+
+/**
+ * zfcp_erp_wait - wait for completion of error recovery on an adapter
+ * @adapter: adapter for which to wait for completion of its error recovery
+ * Return: 0
+ */
+int
+zfcp_erp_wait(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+
+ wait_event(adapter->erp_done_wqh,
+ !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+ &adapter->status));
+
+ return retval;
+}
+
+/*
+ * function: zfcp_erp_modify_adapter_status
+ *
+ * purpose:
+ *
+ */
+void
+zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter,
+ u32 mask, int set_or_clear)
+{
+ struct zfcp_port *port;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+ if (set_or_clear == ZFCP_SET) {
+ atomic_set_mask(mask, &adapter->status);
+ debug_text_event(adapter->erp_dbf, 3, "a_mod_as_s");
+ } else {
+ atomic_clear_mask(mask, &adapter->status);
+ if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+ atomic_set(&adapter->erp_counter, 0);
+ debug_text_event(adapter->erp_dbf, 3, "a_mod_as_c");
+ }
+ debug_event(adapter->erp_dbf, 3, &mask, sizeof (u32));
+
+ /* Deal with all underlying devices, only pass common_mask */
+ if (common_mask)
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ zfcp_erp_modify_port_status(port, common_mask,
+ set_or_clear);
+}
+
+/*
+ * function: zfcp_erp_modify_port_status
+ *
+ * purpose: sets the port and all underlying devices to ERP_FAILED
+ *
+ */
+void
+zfcp_erp_modify_port_status(struct zfcp_port *port, u32 mask, int set_or_clear)
+{
+ struct zfcp_unit *unit;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+ if (set_or_clear == ZFCP_SET) {
+ atomic_set_mask(mask, &port->status);
+ debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_s");
+ } else {
+ atomic_clear_mask(mask, &port->status);
+ if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+ atomic_set(&port->erp_counter, 0);
+ debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_c");
+ }
+ debug_event(port->adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
+ debug_event(port->adapter->erp_dbf, 3, &mask, sizeof (u32));
+
+ /* Modify status of all underlying devices, only pass common mask */
+ if (common_mask)
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ zfcp_erp_modify_unit_status(unit, common_mask,
+ set_or_clear);
+}
+
+/*
+ * function: zfcp_erp_modify_unit_status
+ *
+ * purpose: sets the unit to ERP_FAILED
+ *
+ */
+void
+zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
+{
+ if (set_or_clear == ZFCP_SET) {
+ atomic_set_mask(mask, &unit->status);
+ debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_s");
+ } else {
+ atomic_clear_mask(mask, &unit->status);
+ if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
+ atomic_set(&unit->erp_counter, 0);
+ }
+ debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_c");
+ }
+ debug_event(unit->port->adapter->erp_dbf, 3, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ debug_event(unit->port->adapter->erp_dbf, 3, &mask, sizeof (u32));
+}
+
+/*
+ * function:
+ *
+ * purpose: Wrappper for zfcp_erp_port_reopen_all_internal
+ * used to ensure the correct locking
+ *
+ * returns: 0 - initiated action succesfully
+ * <0 - failed to initiate action
+ */
+int
+zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask)
+{
+ int retval;
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ write_lock(&adapter->erp_lock);
+ retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask);
+ write_unlock(&adapter->erp_lock);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: FIXME
+ */
+static int
+zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, int clear_mask)
+{
+ int retval = 0;
+ struct zfcp_port *port;
+
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
+ zfcp_erp_port_reopen_internal(port, clear_mask);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: FIXME
+ */
+static int
+zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, int clear_mask)
+{
+ int retval = 0;
+ struct zfcp_unit *unit;
+
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ zfcp_erp_unit_reopen_internal(unit, clear_mask);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: this routine executes the 'Reopen Adapter' action
+ * (the entire action is processed synchronously, since
+ * there are no actions which might be run concurrently
+ * per definition)
+ *
+ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ retval = zfcp_erp_adapter_strategy_close(erp_action);
+ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+ retval = ZFCP_ERP_EXIT;
+ else
+ retval = zfcp_erp_adapter_strategy_open(erp_action);
+
+ debug_text_event(adapter->erp_dbf, 3, "a_ast/ret");
+ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
+ debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
+
+ if (retval == ZFCP_ERP_FAILED) {
+ ZFCP_LOG_INFO("Waiting to allow the adapter %s "
+ "to recover itself\n",
+ zfcp_get_busid_by_adapter(adapter));
+ msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+
+ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING,
+ &erp_action->adapter->status);
+ retval = zfcp_erp_adapter_strategy_generic(erp_action, 1);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING,
+ &erp_action->adapter->status);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING,
+ &erp_action->adapter->status);
+ retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING,
+ &erp_action->adapter->status);
+
+ return retval;
+}
+
+/*
+ * function: zfcp_register_adapter
+ *
+ * purpose: allocate the irq associated with this devno and register
+ * the FSF adapter with the SCSI stack
+ *
+ * returns:
+ */
+static int
+zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+
+ if (close)
+ goto close_only;
+
+ retval = zfcp_erp_adapter_strategy_open_qdio(erp_action);
+ if (retval != ZFCP_ERP_SUCCEEDED)
+ goto failed_qdio;
+
+ retval = zfcp_erp_adapter_strategy_open_fsf(erp_action);
+ if (retval != ZFCP_ERP_SUCCEEDED)
+ goto failed_openfcp;
+
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status);
+ goto out;
+
+ close_only:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+ &erp_action->adapter->status);
+
+ failed_openfcp:
+ zfcp_erp_adapter_strategy_close_qdio(erp_action);
+ zfcp_erp_adapter_strategy_close_fsf(erp_action);
+ failed_qdio:
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_qdio_init
+ *
+ * purpose: setup QDIO operation for specified adapter
+ *
+ * returns: 0 - successful setup
+ * !0 - failed setup
+ */
+int
+zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ int i;
+ volatile struct qdio_buffer_element *sbale;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
+ ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
+ "adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_sanity;
+ }
+
+ if (qdio_establish(&adapter->qdio_init_data) != 0) {
+ ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
+ "on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_qdio_establish;
+ }
+ debug_text_event(adapter->erp_dbf, 3, "qdio_est");
+
+ if (qdio_activate(adapter->ccw_device, 0) != 0) {
+ ZFCP_LOG_INFO("error: activation of QDIO queues failed "
+ "on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_qdio_activate;
+ }
+ debug_text_event(adapter->erp_dbf, 3, "qdio_act");
+
+ /*
+ * put buffers into response queue,
+ */
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+ sbale = &(adapter->response_queue.buffer[i]->element[0]);
+ sbale->length = 0;
+ sbale->flags = SBAL_FLAGS_LAST_ENTRY;
+ sbale->addr = 0;
+ }
+
+ ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
+ "queue_no=%i, index_in_queue=%i, count=%i)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
+
+ retval = do_QDIO(adapter->ccw_device,
+ QDIO_FLAG_SYNC_INPUT,
+ 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
+
+ if (retval) {
+ ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
+ retval);
+ goto failed_do_qdio;
+ } else {
+ adapter->response_queue.free_index = 0;
+ atomic_set(&adapter->response_queue.free_count, 0);
+ ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
+ "response queue\n", QDIO_MAX_BUFFERS_PER_Q);
+ }
+ /* set index of first avalable SBALS / number of available SBALS */
+ adapter->request_queue.free_index = 0;
+ atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
+ adapter->request_queue.distance_from_int = 0;
+
+ /* initialize waitqueue used to wait for free SBALs in requests queue */
+ init_waitqueue_head(&adapter->request_wq);
+
+ /* ok, we did it - skip all cleanups for different failures */
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+ retval = ZFCP_ERP_SUCCEEDED;
+ goto out;
+
+ failed_do_qdio:
+ /* NOP */
+
+ failed_qdio_activate:
+ debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
+ while (qdio_shutdown(adapter->ccw_device,
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
+ msleep(1000);
+ debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
+
+ failed_qdio_establish:
+ failed_sanity:
+ retval = ZFCP_ERP_FAILED;
+
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_qdio_cleanup
+ *
+ * purpose: cleans up QDIO operation for the specified adapter
+ *
+ * returns: 0 - successful cleanup
+ * !0 - failed cleanup
+ */
+int
+zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ int first_used;
+ int used_count;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
+ ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
+ "queues on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+
+ /*
+ * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
+ * do_QDIO won't be called while qdio_shutdown is in progress.
+ */
+
+ write_lock_irq(&adapter->request_queue.queue_lock);
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+ write_unlock_irq(&adapter->request_queue.queue_lock);
+
+ debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
+ while (qdio_shutdown(adapter->ccw_device,
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
+ msleep(1000);
+ debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
+
+ /*
+ * First we had to stop QDIO operation.
+ * Now it is safe to take the following actions.
+ */
+
+ /* Cleanup only necessary when there are unacknowledged buffers */
+ if (atomic_read(&adapter->request_queue.free_count)
+ < QDIO_MAX_BUFFERS_PER_Q) {
+ first_used = (adapter->request_queue.free_index +
+ atomic_read(&adapter->request_queue.free_count))
+ % QDIO_MAX_BUFFERS_PER_Q;
+ used_count = QDIO_MAX_BUFFERS_PER_Q -
+ atomic_read(&adapter->request_queue.free_count);
+ zfcp_qdio_zero_sbals(adapter->request_queue.buffer,
+ first_used, used_count);
+ }
+ adapter->response_queue.free_index = 0;
+ atomic_set(&adapter->response_queue.free_count, 0);
+ adapter->request_queue.free_index = 0;
+ atomic_set(&adapter->request_queue.free_count, 0);
+ adapter->request_queue.distance_from_int = 0;
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_init
+ *
+ * purpose: initializes FSF operation for the specified adapter
+ *
+ * returns: 0 - succesful initialization of FSF operation
+ * !0 - failed to initialize FSF operation
+ */
+static int
+zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+
+ /* do 'exchange configuration data' */
+ retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
+ if (retval == ZFCP_ERP_FAILED)
+ return retval;
+
+ /* start the desired number of Status Reads */
+ retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ int retries;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+ retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES;
+
+ do {
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &adapter->status);
+ ZFCP_LOG_DEBUG("Doing exchange config data\n");
+ zfcp_erp_action_to_running(erp_action);
+ zfcp_erp_timeout_init(erp_action);
+ if (zfcp_fsf_exchange_config_data(erp_action)) {
+ retval = ZFCP_ERP_FAILED;
+ debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
+ ZFCP_LOG_INFO("error: initiation of exchange of "
+ "configuration data failed for "
+ "adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
+ ZFCP_LOG_DEBUG("Xchange underway\n");
+
+ /*
+ * Why this works:
+ * Both the normal completion handler as well as the timeout
+ * handler will do an 'up' when the 'exchange config data'
+ * request completes or times out. Thus, the signal to go on
+ * won't be lost utilizing this semaphore.
+ * Furthermore, this 'adapter_reopen' action is
+ * guaranteed to be the only action being there (highest action
+ * which prevents other actions from being created).
+ * Resulting from that, the wake signal recognized here
+ * _must_ be the one belonging to the 'exchange config
+ * data' request.
+ */
+ down(&adapter->erp_ready_sem);
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ ZFCP_LOG_INFO("error: exchange of configuration data "
+ "for adapter %s timed out\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ }
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &adapter->status)) {
+ ZFCP_LOG_DEBUG("host connection still initialising... "
+ "waiting and retrying...\n");
+ /* sleep a little bit before retry */
+ msleep(jiffies_to_msecs(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP));
+ }
+ } while ((retries--) &&
+ atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &adapter->status));
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+ &adapter->status)) {
+ ZFCP_LOG_INFO("error: exchange of configuration data for "
+ "adapter %s failed\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
+ *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ int temp_ret;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ int i;
+
+ adapter->status_read_failed = 0;
+ for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
+ temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL);
+ if (temp_ret < 0) {
+ ZFCP_LOG_INFO("error: set-up of unsolicited status "
+ "notification failed on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ i--;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_cleanup
+ *
+ * purpose: cleanup FSF operation for specified adapter
+ *
+ * returns: 0 - FSF operation successfully cleaned up
+ * !0 - failed to cleanup FSF operation for this adapter
+ */
+static int
+zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_SUCCEEDED;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ /*
+ * wake waiting initiators of requests,
+ * return SCSI commands (with error status),
+ * clean up all requests (synchronously)
+ */
+ zfcp_fsf_req_dismiss_all(adapter);
+ /* reset FSF request sequence number */
+ adapter->fsf_req_seq_no = 0;
+ /* all ports and units are closed */
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: this routine executes the 'Reopen Physical Port' action
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_FAILED;
+ struct zfcp_port *port = erp_action->port;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ switch (erp_action->step) {
+
+ /*
+ * FIXME:
+ * the ULP spec. begs for waiting for oustanding commands
+ */
+ case ZFCP_ERP_STEP_UNINITIALIZED:
+ zfcp_erp_port_strategy_clearstati(port);
+ /*
+ * it would be sufficient to test only the normal open flag
+ * since the phys. open flag cannot be set if the normal
+ * open flag is unset - however, this is for readabilty ...
+ */
+ if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN |
+ ZFCP_STATUS_COMMON_OPEN),
+ &port->status)) {
+ ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
+ "close physical\n", port->wwpn);
+ retval =
+ zfcp_erp_port_forced_strategy_close(erp_action);
+ } else
+ retval = ZFCP_ERP_FAILED;
+ break;
+
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN,
+ &port->status)) {
+ ZFCP_LOG_DEBUG("close physical failed for port "
+ "0x%016Lx\n", port->wwpn);
+ retval = ZFCP_ERP_FAILED;
+ } else
+ retval = ZFCP_ERP_SUCCEEDED;
+ break;
+ }
+
+ debug_text_event(adapter->erp_dbf, 3, "p_pfst/ret");
+ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
+ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
+ debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: this routine executes the 'Reopen Port' action
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_FAILED;
+ struct zfcp_port *port = erp_action->port;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ switch (erp_action->step) {
+
+ /*
+ * FIXME:
+ * the ULP spec. begs for waiting for oustanding commands
+ */
+ case ZFCP_ERP_STEP_UNINITIALIZED:
+ zfcp_erp_port_strategy_clearstati(port);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
+ ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
+ "close\n", port->wwpn);
+ retval = zfcp_erp_port_strategy_close(erp_action);
+ goto out;
+ } /* else it's already closed, open it */
+ break;
+
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
+ ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n",
+ port->wwpn);
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ } /* else it's closed now, open it */
+ break;
+ }
+ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+ retval = ZFCP_ERP_EXIT;
+ else
+ retval = zfcp_erp_port_strategy_open(erp_action);
+
+ out:
+ debug_text_event(adapter->erp_dbf, 3, "p_pst/ret");
+ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
+ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
+ debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+
+ if (atomic_test_mask(ZFCP_STATUS_PORT_WKA,
+ &erp_action->port->status))
+ retval = zfcp_erp_port_strategy_open_nameserver(erp_action);
+ else
+ retval = zfcp_erp_port_strategy_open_common(erp_action);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * FIXME(design): currently only prepared for fabric (nameserver!)
+ */
+static int
+zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+
+ switch (erp_action->step) {
+
+ case ZFCP_ERP_STEP_UNINITIALIZED:
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ if (!(adapter->nameserver_port)) {
+ retval = zfcp_nameserver_enqueue(adapter);
+ if (retval != 0) {
+ ZFCP_LOG_NORMAL("error: nameserver port "
+ "unavailable for adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = ZFCP_ERP_FAILED;
+ break;
+ }
+ }
+ if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &adapter->nameserver_port->status)) {
+ ZFCP_LOG_DEBUG("nameserver port is not open -> open "
+ "nameserver port\n");
+ /* nameserver port may live again */
+ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
+ &adapter->nameserver_port->status);
+ if (zfcp_erp_port_reopen(adapter->nameserver_port, 0)
+ >= 0) {
+ erp_action->step =
+ ZFCP_ERP_STEP_NAMESERVER_OPEN;
+ retval = ZFCP_ERP_CONTINUES;
+ } else
+ retval = ZFCP_ERP_FAILED;
+ break;
+ }
+ /* else nameserver port is already open, fall through */
+ case ZFCP_ERP_STEP_NAMESERVER_OPEN:
+ if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN,
+ &adapter->nameserver_port->status)) {
+ ZFCP_LOG_DEBUG("open failed for nameserver port\n");
+ retval = ZFCP_ERP_FAILED;
+ } else {
+ ZFCP_LOG_DEBUG("nameserver port is open -> "
+ "nameserver look-up for port 0x%016Lx\n",
+ port->wwpn);
+ retval = zfcp_erp_port_strategy_open_common_lookup
+ (erp_action);
+ }
+ break;
+
+ case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
+ if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
+ if (atomic_test_mask
+ (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
+ ZFCP_LOG_DEBUG("nameserver look-up failed "
+ "for port 0x%016Lx "
+ "(misconfigured WWPN?)\n",
+ port->wwpn);
+ zfcp_erp_port_failed(port);
+ retval = ZFCP_ERP_EXIT;
+ } else {
+ ZFCP_LOG_DEBUG("nameserver look-up failed for "
+ "port 0x%016Lx\n", port->wwpn);
+ retval = ZFCP_ERP_FAILED;
+ }
+ } else {
+ ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> "
+ "trying open\n", port->wwpn, port->d_id);
+ retval = zfcp_erp_port_strategy_open_port(erp_action);
+ }
+ break;
+
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ /* D_ID might have changed during open */
+ if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN |
+ ZFCP_STATUS_PORT_DID_DID),
+ &port->status)) {
+ ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn);
+ retval = ZFCP_ERP_SUCCEEDED;
+ } else {
+ ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n",
+ port->wwpn);
+ retval = ZFCP_ERP_FAILED;
+ }
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
+ erp_action->step);
+ retval = ZFCP_ERP_FAILED;
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_port *port = erp_action->port;
+
+ switch (erp_action->step) {
+
+ case ZFCP_ERP_STEP_UNINITIALIZED:
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> trying open\n",
+ port->wwpn, port->d_id);
+ retval = zfcp_erp_port_strategy_open_port(erp_action);
+ break;
+
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
+ ZFCP_LOG_DEBUG("WKA port is open\n");
+ retval = ZFCP_ERP_SUCCEEDED;
+ } else {
+ ZFCP_LOG_DEBUG("open failed for WKA port\n");
+ retval = ZFCP_ERP_FAILED;
+ }
+ /* this is needed anyway (dont care for retval of wakeup) */
+ ZFCP_LOG_DEBUG("continue other open port operations\n");
+ zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
+ erp_action->step);
+ retval = ZFCP_ERP_FAILED;
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: makes the erp thread continue with reopen (physical) port
+ * actions which have been paused until the name server port
+ * is opened (or failed)
+ *
+ * returns: 0 (a kind of void retval, its not used)
+ */
+static int
+zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action
+ *ns_erp_action)
+{
+ int retval = 0;
+ unsigned long flags;
+ struct zfcp_adapter *adapter = ns_erp_action->adapter;
+ struct zfcp_erp_action *erp_action, *tmp;
+
+ read_lock_irqsave(&adapter->erp_lock, flags);
+ list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
+ list) {
+ debug_text_event(adapter->erp_dbf, 4, "p_pstnsw_n");
+ debug_event(adapter->erp_dbf, 4, &erp_action->port->wwpn,
+ sizeof (wwn_t));
+ if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
+ debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_w");
+ debug_event(adapter->erp_dbf, 3,
+ &erp_action->port->wwpn, sizeof (wwn_t));
+ if (atomic_test_mask(
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ &adapter->nameserver_port->status))
+ zfcp_erp_port_failed(erp_action->port);
+ zfcp_erp_action_ready(erp_action);
+ }
+ }
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_fsf_close_physical_port(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "o_pfstc_cpf");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ /* could not send 'open', fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "o_pfstc_cpok");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_CONTINUES;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "p_pstclst");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
+ ZFCP_STATUS_COMMON_CLOSING |
+ ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_PORT_DID_DID |
+ ZFCP_STATUS_PORT_PHYS_CLOSING |
+ ZFCP_STATUS_PORT_INVALID_WWPN,
+ &port->status);
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_fsf_close_port(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "p_pstc_cpf");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ /* could not send 'close', fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "p_pstc_cpok");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_CONTINUES;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_fsf_open_port(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "p_psto_opf");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ /* could not send 'open', fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "p_psto_opok");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_CONTINUES;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_port *port = erp_action->port;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_ns_gid_pn_request(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "p_pstn_ref");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ /* could not send nameserver request, fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "p_pstn_reok");
+ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
+ retval = ZFCP_ERP_CONTINUES;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: this routine executes the 'Reopen Unit' action
+ * currently no retries
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_SUCCEEDED - action finished successfully
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
+{
+ int retval = ZFCP_ERP_FAILED;
+ struct zfcp_unit *unit = erp_action->unit;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ switch (erp_action->step) {
+
+ /*
+ * FIXME:
+ * the ULP spec. begs for waiting for oustanding commands
+ */
+ case ZFCP_ERP_STEP_UNINITIALIZED:
+ zfcp_erp_unit_strategy_clearstati(unit);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
+ ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> "
+ "trying close\n", unit->fcp_lun);
+ retval = zfcp_erp_unit_strategy_close(erp_action);
+ break;
+ }
+ /* else it's already closed, fall through */
+ case ZFCP_ERP_STEP_UNIT_CLOSING:
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
+ ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n",
+ unit->fcp_lun);
+ retval = ZFCP_ERP_FAILED;
+ } else {
+ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+ retval = ZFCP_ERP_EXIT;
+ else {
+ ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> "
+ "trying open\n", unit->fcp_lun);
+ retval =
+ zfcp_erp_unit_strategy_open(erp_action);
+ }
+ }
+ break;
+
+ case ZFCP_ERP_STEP_UNIT_OPENING:
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
+ ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n",
+ unit->fcp_lun);
+ retval = ZFCP_ERP_SUCCEEDED;
+ } else {
+ ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n",
+ unit->fcp_lun);
+ retval = ZFCP_ERP_FAILED;
+ }
+ break;
+ }
+
+ debug_text_event(adapter->erp_dbf, 3, "u_ust/ret");
+ debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof (fcp_lun_t));
+ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
+ debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "u_ustclst");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
+
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
+ ZFCP_STATUS_COMMON_CLOSING |
+ ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_UNIT_SHARED |
+ ZFCP_STATUS_UNIT_READONLY,
+ &unit->status);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_unit *unit = erp_action->unit;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_fsf_close_unit(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "u_ustc_cuf");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ /* could not send 'close', fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "u_ustc_cuok");
+ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
+ retval = ZFCP_ERP_CONTINUES;
+
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
+ * ZFCP_ERP_FAILED - action finished unsuccessfully
+ */
+static int
+zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
+{
+ int retval;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_unit *unit = erp_action->unit;
+
+ zfcp_erp_timeout_init(erp_action);
+ retval = zfcp_fsf_open_unit(erp_action);
+ if (retval == -ENOMEM) {
+ debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ retval = ZFCP_ERP_NOMEM;
+ goto out;
+ }
+ erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
+ if (retval != 0) {
+ debug_text_event(adapter->erp_dbf, 5, "u_usto_ouf");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ /* could not send 'open', fail */
+ retval = ZFCP_ERP_FAILED;
+ goto out;
+ }
+ debug_text_event(adapter->erp_dbf, 6, "u_usto_ouok");
+ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
+ retval = ZFCP_ERP_CONTINUES;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static inline void
+zfcp_erp_timeout_init(struct zfcp_erp_action *erp_action)
+{
+ init_timer(&erp_action->timer);
+ erp_action->timer.function = zfcp_erp_timeout_handler;
+ erp_action->timer.data = (unsigned long) erp_action;
+ /* jiffies will be added in zfcp_fsf_req_send */
+ erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT;
+}
+
+/*
+ * function:
+ *
+ * purpose: enqueue the specified error recovery action, if needed
+ *
+ * returns:
+ */
+static int
+zfcp_erp_action_enqueue(int action,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct zfcp_unit *unit)
+{
+ int retval = 1;
+ struct zfcp_erp_action *erp_action = NULL;
+ int stronger_action = 0;
+ u32 status = 0;
+
+ /*
+ * We need some rules here which check whether we really need
+ * this action or whether we should just drop it.
+ * E.g. if there is a unfinished 'Reopen Port' request then we drop a
+ * 'Reopen Unit' request for an associated unit since we can't
+ * satisfy this request now. A 'Reopen Port' action will trigger
+ * 'Reopen Unit' actions when it completes.
+ * Thus, there are only actions in the queue which can immediately be
+ * executed. This makes the processing of the action queue more
+ * efficient.
+ */
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
+ &adapter->status))
+ return -EIO;
+
+ debug_event(adapter->erp_dbf, 4, &action, sizeof (int));
+ /* check whether we really need this */
+ switch (action) {
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ if (atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
+ debug_text_event(adapter->erp_dbf, 4, "u_actenq_drp");
+ debug_event(adapter->erp_dbf, 4, &port->wwpn,
+ sizeof (wwn_t));
+ debug_event(adapter->erp_dbf, 4, &unit->fcp_lun,
+ sizeof (fcp_lun_t));
+ goto out;
+ }
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_RUNNING, &port->status) ||
+ atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
+ goto out;
+ }
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) {
+ stronger_action = ZFCP_ERP_ACTION_REOPEN_PORT;
+ unit = NULL;
+ }
+ /* fall through !!! */
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if (atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
+ debug_text_event(adapter->erp_dbf, 4, "p_actenq_drp");
+ debug_event(adapter->erp_dbf, 4, &port->wwpn,
+ sizeof (wwn_t));
+ goto out;
+ }
+ /* fall through !!! */
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ if (atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)
+ && port->erp_action.action ==
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
+ debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp");
+ debug_event(adapter->erp_dbf, 4, &port->wwpn,
+ sizeof (wwn_t));
+ goto out;
+ }
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
+ atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
+ goto out;
+ }
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) {
+ stronger_action = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
+ port = NULL;
+ }
+ /* fall through !!! */
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ if (atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
+ debug_text_event(adapter->erp_dbf, 4, "a_actenq_drp");
+ goto out;
+ }
+ break;
+
+ default:
+ debug_text_exception(adapter->erp_dbf, 1, "a_actenq_bug");
+ debug_event(adapter->erp_dbf, 1, &action, sizeof (int));
+ ZFCP_LOG_NORMAL("bug: unknown erp action requested "
+ "on adapter %s (action=%d)\n",
+ zfcp_get_busid_by_adapter(adapter), action);
+ goto out;
+ }
+
+ /* check whether we need something stronger first */
+ if (stronger_action) {
+ debug_text_event(adapter->erp_dbf, 4, "a_actenq_str");
+ debug_event(adapter->erp_dbf, 4, &stronger_action,
+ sizeof (int));
+ ZFCP_LOG_DEBUG("stronger erp action %d needed before "
+ "erp action %d on adapter %s\n",
+ stronger_action, action,
+ zfcp_get_busid_by_adapter(adapter));
+ action = stronger_action;
+ }
+
+ /* mark adapter to have some error recovery pending */
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+
+ /* setup error recovery action */
+ switch (action) {
+
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ zfcp_unit_get(unit);
+ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
+ erp_action = &unit->erp_action;
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_RUNNING, &unit->status))
+ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ zfcp_port_get(port);
+ zfcp_erp_action_dismiss_port(port);
+ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+ erp_action = &port->erp_action;
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_RUNNING, &port->status))
+ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ zfcp_adapter_get(adapter);
+ zfcp_erp_action_dismiss_adapter(adapter);
+ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+ erp_action = &adapter->erp_action;
+ if (!atomic_test_mask
+ (ZFCP_STATUS_COMMON_RUNNING, &adapter->status))
+ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ break;
+ }
+
+ debug_text_event(adapter->erp_dbf, 4, "a_actenq");
+
+ memset(erp_action, 0, sizeof (struct zfcp_erp_action));
+ erp_action->adapter = adapter;
+ erp_action->port = port;
+ erp_action->unit = unit;
+ erp_action->action = action;
+ erp_action->status = status;
+
+ ++adapter->erp_total_count;
+
+ /* finally put it into 'ready' queue and kick erp thread */
+ list_add(&erp_action->list, &adapter->erp_ready_head);
+ up(&adapter->erp_ready_sem);
+ retval = 0;
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static int
+zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ --adapter->erp_total_count;
+ if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+ --adapter->erp_low_mem_count;
+ erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+ }
+
+ debug_text_event(adapter->erp_dbf, 4, "a_actdeq");
+ debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
+ list_del(&erp_action->list);
+ switch (erp_action->action) {
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &erp_action->unit->status);
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &erp_action->port->status);
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &erp_action->adapter->status);
+ break;
+ default:
+ /* bug */
+ break;
+ }
+ return retval;
+}
+
+/**
+ * zfcp_erp_action_cleanup
+ *
+ * Register unit with scsi stack if appropiate and fix reference counts.
+ * Note: Temporary units are not registered with scsi stack.
+ */
+static void
+zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct zfcp_unit *unit,
+ int result)
+{
+ switch (action) {
+ case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ if ((result == ZFCP_ERP_SUCCEEDED)
+ && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
+ &unit->status))
+ && (!unit->device))
+ scsi_add_device(unit->port->adapter->scsi_host, 0,
+ unit->port->scsi_id, unit->scsi_lun);
+ zfcp_unit_put(unit);
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ zfcp_port_put(port);
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ zfcp_adapter_put(adapter);
+ break;
+ default:
+ break;
+ }
+}
+
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: FIXME
+ */
+static int
+zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+ struct zfcp_port *port;
+
+ debug_text_event(adapter->erp_dbf, 5, "a_actab");
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status))
+ zfcp_erp_action_dismiss(&adapter->erp_action);
+ else
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ zfcp_erp_action_dismiss_port(port);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: FIXME
+ */
+static int
+zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+{
+ int retval = 0;
+ struct zfcp_unit *unit;
+ struct zfcp_adapter *adapter = port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "p_actab");
+ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status))
+ zfcp_erp_action_dismiss(&port->erp_action);
+ else
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ zfcp_erp_action_dismiss_unit(unit);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: FIXME
+ */
+static int
+zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 5, "u_actab");
+ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
+ zfcp_erp_action_dismiss(&unit->erp_action);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose: moves erp_action to 'erp running list'
+ *
+ * returns:
+ */
+static inline void
+zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "a_toru");
+ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
+ list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
+}
+
+/*
+ * function:
+ *
+ * purpose: moves erp_action to 'erp ready list'
+ *
+ * returns:
+ */
+static inline void
+zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
+{
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ debug_text_event(adapter->erp_dbf, 6, "a_tore");
+ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
+ list_move(&erp_action->list, &erp_action->adapter->erp_ready_head);
+}
+
+/*
+ * function: zfcp_erp_port_access_denied
+ *
+ * purpose:
+ */
+void
+zfcp_erp_port_access_denied(struct zfcp_port *port)
+{
+ struct zfcp_adapter *adapter = port->adapter;
+ unsigned long flags;
+
+ debug_text_event(adapter->erp_dbf, 3, "p_access_block");
+ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED |
+ ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+/*
+ * function: zfcp_erp_unit_access_denied
+ *
+ * purpose:
+ */
+void
+zfcp_erp_unit_access_denied(struct zfcp_unit *unit)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 3, "u_access_block");
+ debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
+ zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_ERP_FAILED |
+ ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
+}
+
+/*
+ * function: zfcp_erp_adapter_access_changed
+ *
+ * purpose:
+ */
+void
+zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
+{
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ debug_text_event(adapter->erp_dbf, 3, "a_access_unblock");
+ debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
+
+ zfcp_erp_port_access_changed(adapter->nameserver_port);
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (port != adapter->nameserver_port)
+ zfcp_erp_port_access_changed(port);
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+/*
+ * function: zfcp_erp_port_access_changed
+ *
+ * purpose:
+ */
+void
+zfcp_erp_port_access_changed(struct zfcp_port *port)
+{
+ struct zfcp_adapter *adapter = port->adapter;
+ struct zfcp_unit *unit;
+
+ debug_text_event(adapter->erp_dbf, 3, "p_access_unblock");
+ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
+
+ if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+ &port->status)) {
+ if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ zfcp_erp_unit_access_changed(unit);
+ return;
+ }
+
+ ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s "
+ "(due to ACT update)\n",
+ port->wwpn, zfcp_get_busid_by_adapter(adapter));
+ if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ ZFCP_LOG_NORMAL("failed reopen of port"
+ "(adapter %s, wwpn=0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(adapter), port->wwpn);
+}
+
+/*
+ * function: zfcp_erp_unit_access_changed
+ *
+ * purpose:
+ */
+void
+zfcp_erp_unit_access_changed(struct zfcp_unit *unit)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+
+ debug_text_event(adapter->erp_dbf, 3, "u_access_unblock");
+ debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
+
+ if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status))
+ return;
+
+ ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx "
+ " on adapter %s (due to ACT update)\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_adapter(adapter));
+ if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, "
+ "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn, unit->fcp_lun);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
new file mode 100644
index 000000000000..d5fd43352071
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -0,0 +1,186 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_ext.h
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef ZFCP_EXT_H
+#define ZFCP_EXT_H
+
+#define ZFCP_EXT_REVISION "$Revision: 1.62 $"
+
+#include "zfcp_def.h"
+
+extern struct zfcp_data zfcp_data;
+
+/******************************** SYSFS *************************************/
+extern int zfcp_sysfs_driver_create_files(struct device_driver *);
+extern void zfcp_sysfs_driver_remove_files(struct device_driver *);
+extern int zfcp_sysfs_adapter_create_files(struct device *);
+extern void zfcp_sysfs_adapter_remove_files(struct device *);
+extern int zfcp_sysfs_port_create_files(struct device *, u32);
+extern void zfcp_sysfs_port_remove_files(struct device *, u32);
+extern int zfcp_sysfs_unit_create_files(struct device *);
+extern void zfcp_sysfs_unit_remove_files(struct device *);
+extern void zfcp_sysfs_port_release(struct device *);
+extern void zfcp_sysfs_unit_release(struct device *);
+
+/**************************** CONFIGURATION *********************************/
+extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
+extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
+extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
+struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
+extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
+extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
+extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
+extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
+ u32, u32);
+extern void zfcp_port_dequeue(struct zfcp_port *);
+extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
+extern void zfcp_unit_dequeue(struct zfcp_unit *);
+
+/******************************* S/390 IO ************************************/
+extern int zfcp_ccw_register(void);
+extern void zfcp_ccw_unregister(void);
+
+extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
+extern int zfcp_qdio_allocate(struct zfcp_adapter *);
+extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
+extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
+extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
+ struct zfcp_fsf_req *);
+extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
+
+extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
+ (struct zfcp_fsf_req *, int, int);
+extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr
+ (struct zfcp_fsf_req *);
+extern int zfcp_qdio_sbals_from_sg
+ (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int);
+extern int zfcp_qdio_sbals_from_scsicmnd
+ (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *);
+
+
+/******************************** FSF ****************************************/
+extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
+
+extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
+
+extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *,
+ struct fsf_qtcb_bottom_port *);
+extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
+ u32, u32, struct zfcp_sg_list *);
+extern void zfcp_fsf_request_timeout_handler(unsigned long);
+extern void zfcp_fsf_scsi_er_timeout_handler(unsigned long);
+extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
+extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
+ unsigned long *, struct zfcp_fsf_req **);
+extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
+ struct zfcp_erp_action *);
+extern int zfcp_fsf_send_els(struct zfcp_send_els *);
+extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
+extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
+ struct zfcp_unit *,
+ struct scsi_cmnd *,
+ struct timer_list*, int);
+extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
+extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
+extern void zfcp_fsf_req_cleanup(struct zfcp_fsf_req *);
+extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
+ struct zfcp_adapter *, struct zfcp_unit *, u8, int);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
+ unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
+
+/******************************* FC/FCP **************************************/
+extern int zfcp_nameserver_enqueue(struct zfcp_adapter *);
+extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
+extern int zfcp_check_ct_response(struct ct_hdr *);
+extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
+
+/******************************* SCSI ****************************************/
+extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
+extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
+extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
+extern void set_host_byte(u32 *, char);
+extern void set_driver_byte(u32 *, char);
+extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
+extern void zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *);
+extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
+
+extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
+ struct scsi_cmnd *, struct timer_list *);
+extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *,
+ struct timer_list *);
+extern struct scsi_transport_template *zfcp_transport_template;
+extern struct fc_function_template zfcp_transport_functions;
+
+/******************************** ERP ****************************************/
+extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
+extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
+extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
+extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
+
+extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
+extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
+extern int zfcp_erp_port_shutdown(struct zfcp_port *, int);
+extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int);
+extern void zfcp_erp_port_failed(struct zfcp_port *);
+extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int);
+
+extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u32, int);
+extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int);
+extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int);
+extern void zfcp_erp_unit_failed(struct zfcp_unit *);
+
+extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
+extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
+extern int zfcp_erp_wait(struct zfcp_adapter *);
+extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
+
+extern int zfcp_test_link(struct zfcp_port *);
+
+extern void zfcp_erp_port_access_denied(struct zfcp_port *);
+extern void zfcp_erp_unit_access_denied(struct zfcp_unit *);
+extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *);
+extern void zfcp_erp_port_access_changed(struct zfcp_port *);
+extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
+
+/******************************** AUX ****************************************/
+extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *,
+ void *, int);
+extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *);
+extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *,
+ struct fsf_status_read_buffer *, int);
+#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
new file mode 100644
index 000000000000..578b9fbe5206
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -0,0 +1,5087 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_fsf.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_FSF_C_REVISION "$Revision: 1.92 $"
+
+#include "zfcp_ext.h"
+
+static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *);
+static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_send_fcp_command_task_management_handler(
+ struct zfcp_fsf_req *);
+static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *);
+static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *);
+static inline int zfcp_fsf_req_sbal_check(
+ unsigned long *, struct zfcp_qdio_queue *, int);
+static inline int zfcp_use_one_sbal(
+ struct scatterlist *, int, struct scatterlist *, int);
+static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
+static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
+static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
+static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
+static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
+static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
+static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
+static void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+
+/* association between FSF command and FSF QTCB type */
+static u32 fsf_qtcb_type[] = {
+ [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
+ [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
+ [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
+ [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
+ [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
+};
+
+static const char zfcp_act_subtable_type[5][8] = {
+ "unknown", "OS", "WWPN", "DID", "LUN"
+};
+
+/****************************************************************/
+/*************** FSF related Functions *************************/
+/****************************************************************/
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
+
+/*
+ * function: zfcp_fsf_req_alloc
+ *
+ * purpose: Obtains an fsf_req and potentially a qtcb (for all but
+ * unsolicited requests) via helper functions
+ * Does some initial fsf request set-up.
+ *
+ * returns: pointer to allocated fsf_req if successfull
+ * NULL otherwise
+ *
+ * locks: none
+ *
+ */
+static struct zfcp_fsf_req *
+zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
+{
+ size_t size;
+ void *ptr;
+ struct zfcp_fsf_req *fsf_req = NULL;
+
+ if (req_flags & ZFCP_REQ_NO_QTCB)
+ size = sizeof(struct zfcp_fsf_req);
+ else
+ size = sizeof(struct zfcp_fsf_req_pool_element);
+
+ if (likely(pool != NULL))
+ ptr = mempool_alloc(pool, GFP_ATOMIC);
+ else
+ ptr = kmalloc(size, GFP_ATOMIC);
+
+ if (unlikely(NULL == ptr))
+ goto out;
+
+ memset(ptr, 0, size);
+
+ if (req_flags & ZFCP_REQ_NO_QTCB) {
+ fsf_req = (struct zfcp_fsf_req *) ptr;
+ } else {
+ fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req;
+ fsf_req->qtcb =
+ &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
+ }
+
+ fsf_req->pool = pool;
+
+ out:
+ return fsf_req;
+}
+
+/*
+ * function: zfcp_fsf_req_free
+ *
+ * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or
+ * returns it into the pool via helper functions.
+ *
+ * returns: sod all
+ *
+ * locks: none
+ */
+static void
+zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
+{
+ if (likely(fsf_req->pool != NULL))
+ mempool_free(fsf_req, fsf_req->pool);
+ else
+ kfree(fsf_req);
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * note: qdio queues shall be down (no ongoing inbound processing)
+ */
+int
+zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+ struct zfcp_fsf_req *fsf_req, *tmp;
+
+ list_for_each_entry_safe(fsf_req, tmp, &adapter->fsf_req_list_head,
+ list)
+ zfcp_fsf_req_dismiss(fsf_req);
+ /* wait_event_timeout? */
+ while (!list_empty(&adapter->fsf_req_list_head)) {
+ ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
+ zfcp_get_busid_by_adapter(adapter));
+ /* wait for woken intiators to clean up their requests */
+ msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
+ }
+
+ /* consistency check */
+ if (atomic_read(&adapter->fsf_reqs_active)) {
+ ZFCP_LOG_NORMAL("bug: There are still %d FSF requests pending "
+ "on adapter %s after cleanup.\n",
+ atomic_read(&adapter->fsf_reqs_active),
+ zfcp_get_busid_by_adapter(adapter));
+ atomic_set(&adapter->fsf_reqs_active, 0);
+ }
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+static void
+zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
+{
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ zfcp_fsf_req_complete(fsf_req);
+}
+
+/*
+ * function: zfcp_fsf_req_complete
+ *
+ * purpose: Updates active counts and timers for openfcp-reqs
+ * May cleanup request after req_eval returns
+ *
+ * returns: 0 - success
+ * !0 - failure
+ *
+ * context:
+ */
+int
+zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+ int cleanup;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
+ /* do some statistics */
+ atomic_dec(&adapter->fsf_reqs_active);
+
+ if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+ ZFCP_LOG_DEBUG("Status read response received\n");
+ /*
+ * Note: all cleanup handling is done in the callchain of
+ * the function call-chain below.
+ */
+ zfcp_fsf_status_read_handler(fsf_req);
+ goto out;
+ } else
+ zfcp_fsf_protstatus_eval(fsf_req);
+
+ /*
+ * fsf_req may be deleted due to waking up functions, so
+ * cleanup is saved here and used later
+ */
+ if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
+ cleanup = 1;
+ else
+ cleanup = 0;
+
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
+
+ /* cleanup request if requested by initiator */
+ if (likely(cleanup)) {
+ ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req);
+ /*
+ * lock must not be held here since it will be
+ * grabed by the called routine, too
+ */
+ zfcp_fsf_req_cleanup(fsf_req);
+ } else {
+ /* notify initiator waiting for the requests completion */
+ ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req);
+ /*
+ * FIXME: Race! We must not access fsf_req here as it might have been
+ * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
+ * flag. It's an improbable case. But, we have the same paranoia for
+ * the cleanup flag already.
+ * Might better be handled using complete()?
+ * (setting the flag and doing wakeup ought to be atomic
+ * with regard to checking the flag as long as waitqueue is
+ * part of the to be released structure)
+ */
+ wake_up(&fsf_req->completion_wq);
+ }
+
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_protstatus_eval
+ *
+ * purpose: evaluates the QTCB of the finished FSF request
+ * and initiates appropriate actions
+ * (usually calling FSF command specific handlers)
+ *
+ * returns:
+ *
+ * context:
+ *
+ * locks:
+ */
+static int
+zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
+ ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb);
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+ ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
+ (unsigned long) fsf_req);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+ ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
+ zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
+ goto skip_protstatus;
+ }
+
+ /* log additional information provided by FSF (if any) */
+ if (unlikely(fsf_req->qtcb->header.log_length)) {
+ /* do not trust them ;-) */
+ if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
+ ZFCP_LOG_NORMAL
+ ("bug: ULP (FSF logging) log data starts "
+ "beyond end of packet header. Ignored. "
+ "(start=%i, size=%li)\n",
+ fsf_req->qtcb->header.log_start,
+ sizeof(struct fsf_qtcb));
+ goto forget_log;
+ }
+ if ((size_t) (fsf_req->qtcb->header.log_start +
+ fsf_req->qtcb->header.log_length)
+ > sizeof(struct fsf_qtcb)) {
+ ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
+ "beyond end of packet header. Ignored. "
+ "(start=%i, length=%i, size=%li)\n",
+ fsf_req->qtcb->header.log_start,
+ fsf_req->qtcb->header.log_length,
+ sizeof(struct fsf_qtcb));
+ goto forget_log;
+ }
+ ZFCP_LOG_TRACE("ULP log data: \n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
+ (char *) fsf_req->qtcb +
+ fsf_req->qtcb->header.log_start,
+ fsf_req->qtcb->header.log_length);
+ }
+ forget_log:
+
+ /* evaluate FSF Protocol Status */
+ switch (fsf_req->qtcb->prefix.prot_status) {
+
+ case FSF_PROT_GOOD:
+ ZFCP_LOG_TRACE("FSF_PROT_GOOD\n");
+ break;
+
+ case FSF_PROT_FSF_STATUS_PRESENTED:
+ ZFCP_LOG_TRACE("FSF_PROT_FSF_STATUS_PRESENTED\n");
+ break;
+
+ case FSF_PROT_QTCB_VERSION_ERROR:
+ ZFCP_LOG_FLAGS(0, "FSF_PROT_QTCB_VERSION_ERROR\n");
+ ZFCP_LOG_NORMAL("error: The adapter %s contains "
+ "microcode of version 0x%x, the device driver "
+ "only supports 0x%x. Aborting.\n",
+ zfcp_get_busid_by_adapter(adapter),
+ fsf_req->qtcb->prefix.prot_status_qual.
+ version_error.fsf_version, ZFCP_QTCB_VERSION);
+ /* stop operation for this adapter */
+ debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_SEQ_NUMB_ERROR:
+ ZFCP_LOG_FLAGS(0, "FSF_PROT_SEQ_NUMB_ERROR\n");
+ ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
+ "driver (0x%x) and adapter %s (0x%x). "
+ "Restarting all operations on this adapter.\n",
+ fsf_req->qtcb->prefix.req_seq_no,
+ zfcp_get_busid_by_adapter(adapter),
+ fsf_req->qtcb->prefix.prot_status_qual.
+ sequence_error.exp_req_seq_no);
+ debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
+ /* restart operation on this adapter */
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_UNSUPP_QTCB_TYPE:
+ ZFCP_LOG_FLAGS(0, "FSF_PROT_UNSUP_QTCB_TYPE\n");
+ ZFCP_LOG_NORMAL("error: Packet header type used by the "
+ "device driver is incompatible with "
+ "that used on adapter %s. "
+ "Stopping all operations on this adapter.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_HOST_CONNECTION_INITIALIZING:
+ ZFCP_LOG_FLAGS(1, "FSF_PROT_HOST_CONNECTION_INITIALIZING\n");
+ zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &(adapter->status));
+ debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
+ break;
+
+ case FSF_PROT_DUPLICATE_REQUEST_ID:
+ ZFCP_LOG_FLAGS(0, "FSF_PROT_DUPLICATE_REQUEST_IDS\n");
+ if (fsf_req->qtcb) {
+ ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
+ "to the adapter %s is ambiguous. "
+ "Stopping all operations on this "
+ "adapter.\n",
+ *(unsigned long long *)
+ (&fsf_req->qtcb->bottom.support.
+ req_handle),
+ zfcp_get_busid_by_adapter(adapter));
+ } else {
+ ZFCP_LOG_NORMAL("bug: The request identifier %p "
+ "to the adapter %s is ambiguous. "
+ "Stopping all operations on this "
+ "adapter. "
+ "(bug: got this for an unsolicited "
+ "status read request)\n",
+ fsf_req,
+ zfcp_get_busid_by_adapter(adapter));
+ }
+ debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_LINK_DOWN:
+ ZFCP_LOG_FLAGS(1, "FSF_PROT_LINK_DOWN\n");
+ /*
+ * 'test and set' is not atomic here -
+ * it's ok as long as calls to our response queue handler
+ * (and thus execution of this code here) are serialized
+ * by the qdio module
+ */
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status)) {
+ switch (fsf_req->qtcb->prefix.prot_status_qual.
+ locallink_error.code) {
+ case FSF_PSQ_LINK_NOLIGHT:
+ ZFCP_LOG_INFO("The local link to adapter %s "
+ "is down (no light detected).\n",
+ zfcp_get_busid_by_adapter(
+ adapter));
+ break;
+ case FSF_PSQ_LINK_WRAPPLUG:
+ ZFCP_LOG_INFO("The local link to adapter %s "
+ "is down (wrap plug detected).\n",
+ zfcp_get_busid_by_adapter(
+ adapter));
+ break;
+ case FSF_PSQ_LINK_NOFCP:
+ ZFCP_LOG_INFO("The local link to adapter %s "
+ "is down (adjacent node on "
+ "link does not support FCP).\n",
+ zfcp_get_busid_by_adapter(
+ adapter));
+ break;
+ default:
+ ZFCP_LOG_INFO("The local link to adapter %s "
+ "is down "
+ "(warning: unknown reason "
+ "code).\n",
+ zfcp_get_busid_by_adapter(
+ adapter));
+ break;
+
+ }
+ /*
+ * Due to the 'erp failed' flag the adapter won't
+ * be recovered but will be just set to 'blocked'
+ * state. All subordinary devices will have state
+ * 'blocked' and 'erp failed', too.
+ * Thus the adapter is still able to provide
+ * 'link up' status without being flooded with
+ * requests.
+ * (note: even 'close port' is not permitted)
+ */
+ ZFCP_LOG_INFO("Stopping all operations for adapter "
+ "%s.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ &adapter->status);
+ zfcp_erp_adapter_reopen(adapter, 0);
+ debug_text_event(adapter->erp_dbf, 1, "prot_link_down");
+ }
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_REEST_QUEUE:
+ ZFCP_LOG_FLAGS(1, "FSF_PROT_REEST_QUEUE\n");
+ debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue");
+ ZFCP_LOG_INFO("The local link to adapter with "
+ "%s was re-plugged. "
+ "Re-starting operations on this adapter.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ /* All ports should be marked as ready to run again */
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING,
+ ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter,
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
+ | ZFCP_STATUS_COMMON_ERP_FAILED);
+ zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PROT_ERROR_STATE:
+ ZFCP_LOG_FLAGS(0, "FSF_PROT_ERROR_STATE\n");
+ ZFCP_LOG_NORMAL("error: The adapter %s "
+ "has entered the error state. "
+ "Restarting all operations on this "
+ "adapter.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
+ /* restart operation on this adapter */
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
+ &fsf_req->qtcb->prefix.prot_status_qual,
+ sizeof (union fsf_prot_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: Transfer protocol status information "
+ "provided by the adapter %s "
+ "is not compatible with the device driver. "
+ "Stopping all operations on this adapter. "
+ "(debug info 0x%x).\n",
+ zfcp_get_busid_by_adapter(adapter),
+ fsf_req->qtcb->prefix.prot_status);
+ debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
+ debug_exception(adapter->erp_dbf, 0,
+ &fsf_req->qtcb->prefix.prot_status,
+ sizeof (u32));
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ }
+
+ skip_protstatus:
+ /*
+ * always call specific handlers to give them a chance to do
+ * something meaningful even in error cases
+ */
+ zfcp_fsf_fsfstatus_eval(fsf_req);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_fsfstatus_eval
+ *
+ * purpose: evaluates FSF status of completed FSF request
+ * and acts accordingly
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF Status */
+ switch (fsf_req->qtcb->header.fsf_status) {
+ case FSF_UNKNOWN_COMMAND:
+ ZFCP_LOG_FLAGS(0, "FSF_UNKNOWN_COMMAND\n");
+ ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
+ "not known by the adapter %s "
+ "Stopping all operations on this adapter. "
+ "(debug info 0x%x).\n",
+ zfcp_get_busid_by_adapter(fsf_req->adapter),
+ fsf_req->qtcb->header.fsf_command);
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_unknown");
+ zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_FCP_RSP_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
+ ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
+ "SCSI stack.\n");
+ debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
+ zfcp_fsf_fsfstatus_qual_eval(fsf_req);
+ break;
+
+ default:
+ break;
+ }
+
+ skip_fsfstatus:
+ /*
+ * always call specific handlers to give them a chance to do
+ * something meaningful even in error cases
+ */
+ zfcp_fsf_req_dispatch(fsf_req);
+
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_fsfstatus_qual_eval
+ *
+ * purpose: evaluates FSF status-qualifier of completed FSF request
+ * and acts accordingly
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+
+ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
+ case FSF_SQ_FCP_RSP_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_SQ_FCP_RSP_AVAILABLE\n");
+ debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
+ break;
+ case FSF_SQ_RETRY_IF_POSSIBLE:
+ ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
+ /* The SCSI-stack may now issue retries or escalate */
+ debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
+ zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_COMMAND_ABORTED:
+ ZFCP_LOG_FLAGS(2, "FSF_SQ_COMMAND_ABORTED\n");
+ /* Carry the aborted state on to upper layer */
+ debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
+ zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_NO_RECOM:
+ ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RECOM\n");
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_no_rec");
+ ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
+ "problem on the adapter %s "
+ "Stopping all operations on this adapter. ",
+ zfcp_get_busid_by_adapter(fsf_req->adapter));
+ zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_PROGRAMMING_ERROR:
+ ZFCP_LOG_FLAGS(0, "FSF_SQ_ULP_PROGRAMMING_ERROR\n");
+ ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
+ "(adapter %s)\n",
+ zfcp_get_busid_by_adapter(fsf_req->adapter));
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_ulp_err");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ case FSF_SQ_NO_RETRY_POSSIBLE:
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ /* dealt with in the respective functions */
+ break;
+ default:
+ ZFCP_LOG_NORMAL("bug: Additional status info could "
+ "not be interpreted properly.\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status_qual.word[0],
+ sizeof (u32));
+ zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ }
+
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_req_dispatch
+ *
+ * purpose: calls the appropriate command specific handler
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_erp_action *erp_action = fsf_req->erp_action;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ int retval = 0;
+
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
+ (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
+ }
+
+ switch (fsf_req->fsf_command) {
+
+ case FSF_QTCB_FCP_CMND:
+ ZFCP_LOG_FLAGS(3, "FSF_QTCB_FCP_CMND\n");
+ zfcp_fsf_send_fcp_command_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_ABORT_FCP_CMND:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_ABORT_FCP_CMND\n");
+ zfcp_fsf_abort_fcp_command_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_SEND_GENERIC:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_GENERIC\n");
+ zfcp_fsf_send_ct_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_OPEN_PORT_WITH_DID:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_PORT_WITH_DID\n");
+ zfcp_fsf_open_port_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_OPEN_LUN:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_LUN\n");
+ zfcp_fsf_open_unit_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_CLOSE_LUN:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_LUN\n");
+ zfcp_fsf_close_unit_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_CLOSE_PORT:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PORT\n");
+ zfcp_fsf_close_port_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_CLOSE_PHYSICAL_PORT:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PHYSICAL_PORT\n");
+ zfcp_fsf_close_physical_port_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_EXCHANGE_CONFIG_DATA:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_CONFIG_DATA\n");
+ zfcp_fsf_exchange_config_data_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_EXCHANGE_PORT_DATA:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_PORT_DATA\n");
+ zfcp_fsf_exchange_port_data_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_SEND_ELS:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_ELS\n");
+ zfcp_fsf_send_els_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_DOWNLOAD_CONTROL_FILE\n");
+ zfcp_fsf_control_file_handler(fsf_req);
+ break;
+
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_UPLOAD_CONTROL_FILE\n");
+ zfcp_fsf_control_file_handler(fsf_req);
+ break;
+
+ default:
+ ZFCP_LOG_FLAGS(2, "FSF_QTCB_UNKNOWN\n");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
+ "not supported by the adapter %s\n",
+ zfcp_get_busid_by_adapter(fsf_req->adapter));
+ if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
+ ZFCP_LOG_NORMAL
+ ("bug: Command issued by the device driver differs "
+ "from the command returned by the adapter %s "
+ "(debug info 0x%x, 0x%x).\n",
+ zfcp_get_busid_by_adapter(fsf_req->adapter),
+ fsf_req->fsf_command,
+ fsf_req->qtcb->header.fsf_command);
+ }
+
+ if (!erp_action)
+ return retval;
+
+ debug_text_event(adapter->erp_dbf, 3, "a_frh");
+ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
+ zfcp_erp_async_handler(erp_action, 0);
+
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_status_read
+ *
+ * purpose: initiates a Status Read command at the specified adapter
+ *
+ * returns:
+ */
+int
+zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
+{
+ struct zfcp_fsf_req *fsf_req;
+ struct fsf_status_read_buffer *status_buffer;
+ unsigned long lock_flags;
+ volatile struct qdio_buffer_element *sbale;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
+ req_flags | ZFCP_REQ_NO_QTCB,
+ adapter->pool.fsf_req_status_read,
+ &lock_flags, &fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create unsolicited status "
+ "buffer for adapter %s.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_req_create;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
+ sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
+ fsf_req->sbale_curr = 2;
+
+ status_buffer =
+ mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
+ if (!status_buffer) {
+ ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
+ goto failed_buf;
+ }
+ memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
+ fsf_req->data.status_read.buffer = status_buffer;
+
+ /* insert pointer to respective buffer */
+ sbale = zfcp_qdio_sbale_curr(fsf_req);
+ sbale->addr = (void *) status_buffer;
+ sbale->length = sizeof(struct fsf_status_read_buffer);
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(fsf_req, NULL);
+ if (retval) {
+ ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
+ "environment.\n");
+ goto failed_req_send;
+ }
+
+ ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto out;
+
+ failed_req_send:
+ mempool_free(status_buffer, adapter->pool.data_status_read);
+
+ failed_buf:
+ zfcp_fsf_req_free(fsf_req);
+ failed_req_create:
+ out:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+ return retval;
+}
+
+static int
+zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
+{
+ struct fsf_status_read_buffer *status_buffer;
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ status_buffer = fsf_req->data.status_read.buffer;
+ adapter = fsf_req->adapter;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ list_for_each_entry(port, &adapter->port_list_head, list)
+ if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK))
+ break;
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+
+ if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
+ ZFCP_LOG_NORMAL("bug: Reopen port indication received for"
+ "nonexisting port with d_id 0x%08x on "
+ "adapter %s. Ignored.\n",
+ status_buffer->d_id & ZFCP_DID_MASK,
+ zfcp_get_busid_by_adapter(adapter));
+ goto out;
+ }
+
+ switch (status_buffer->status_subtype) {
+
+ case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
+ ZFCP_LOG_FLAGS(2, "FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT\n");
+ debug_text_event(adapter->erp_dbf, 3, "unsol_pc_phys:");
+ zfcp_erp_port_reopen(port, 0);
+ break;
+
+ case FSF_STATUS_READ_SUB_ERROR_PORT:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_SUB_ERROR_PORT\n");
+ debug_text_event(adapter->erp_dbf, 1, "unsol_pc_err:");
+ zfcp_erp_port_shutdown(port, 0);
+ break;
+
+ default:
+ debug_text_event(adapter->erp_dbf, 0, "unsol_unk_sub:");
+ debug_exception(adapter->erp_dbf, 0,
+ &status_buffer->status_subtype, sizeof (u32));
+ ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
+ "for a reopen indication on port with "
+ "d_id 0x%08x on the adapter %s. "
+ "Ignored. (debug info 0x%x)\n",
+ status_buffer->d_id,
+ zfcp_get_busid_by_adapter(adapter),
+ status_buffer->status_subtype);
+ }
+ out:
+ return 0;
+}
+
+/*
+ * function: zfcp_fsf_status_read_handler
+ *
+ * purpose: is called for finished Open Port command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_status_read_buffer *status_buffer =
+ fsf_req->data.status_read.buffer;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+ mempool_free(status_buffer, adapter->pool.data_status_read);
+ zfcp_fsf_req_cleanup(fsf_req);
+ goto out;
+ }
+
+ switch (status_buffer->status_type) {
+
+ case FSF_STATUS_READ_PORT_CLOSED:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_PORT_CLOSED\n");
+ debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
+ debug_event(adapter->erp_dbf, 3,
+ &status_buffer->d_id, sizeof (u32));
+ zfcp_fsf_status_read_port_closed(fsf_req);
+ break;
+
+ case FSF_STATUS_READ_INCOMING_ELS:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_INCOMING_ELS\n");
+ debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
+ zfcp_fsf_incoming_els(fsf_req);
+ break;
+
+ case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_SENSE_DATA_AVAIL\n");
+ debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
+ ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
+ sizeof(struct fsf_status_read_buffer));
+ break;
+
+ case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_BIT_ERROR_THRESHOLD\n");
+ debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
+ ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) status_buffer,
+ sizeof (struct fsf_status_read_buffer));
+ break;
+
+ case FSF_STATUS_READ_LINK_DOWN:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_LINK_DOWN\n");
+ debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:");
+ ZFCP_LOG_INFO("Local link to adapter %s is down\n",
+ zfcp_get_busid_by_adapter(adapter));
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status);
+ zfcp_erp_adapter_failed(adapter);
+ break;
+
+ case FSF_STATUS_READ_LINK_UP:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_LINK_UP\n");
+ debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:");
+ ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
+ "Restarting operations on this adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ /* All ports should be marked as ready to run again */
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING,
+ ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter,
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
+ | ZFCP_STATUS_COMMON_ERP_FAILED);
+ break;
+
+ case FSF_STATUS_READ_CFDC_UPDATED:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_UPDATED\n");
+ debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:");
+ ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ zfcp_erp_adapter_access_changed(adapter);
+ break;
+
+ case FSF_STATUS_READ_CFDC_HARDENED:
+ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_HARDENED\n");
+ debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
+ switch (status_buffer->status_subtype) {
+ case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
+ ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
+ ZFCP_LOG_INFO("CFDC of adapter %s has been copied "
+ "to the secondary SE\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n",
+ zfcp_get_busid_by_adapter(adapter));
+ }
+ break;
+
+ default:
+ debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:");
+ debug_exception(adapter->erp_dbf, 0,
+ &status_buffer->status_type, sizeof (u32));
+ ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
+ "type was received (debug info 0x%x)\n",
+ status_buffer->status_type);
+ ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
+ status_buffer);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) status_buffer,
+ sizeof (struct fsf_status_read_buffer));
+ break;
+ }
+ mempool_free(status_buffer, adapter->pool.data_status_read);
+ zfcp_fsf_req_cleanup(fsf_req);
+ /*
+ * recycle buffer and start new request repeat until outbound
+ * queue is empty or adapter shutdown is requested
+ */
+ /*
+ * FIXME(qdio):
+ * we may wait in the req_create for 5s during shutdown, so
+ * qdio_cleanup will have to wait at least that long before returning
+ * with failure to allow us a proper cleanup under all circumstances
+ */
+ /*
+ * FIXME:
+ * allocation failure possible? (Is this code needed?)
+ */
+ retval = zfcp_fsf_status_read(adapter, 0);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("Failed to create unsolicited status read "
+ "request for the adapter %s.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ /* temporary fix to avoid status read buffer shortage */
+ adapter->status_read_failed++;
+ if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
+ < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
+ ZFCP_LOG_INFO("restart adapter %s due to status read "
+ "buffer shortage\n",
+ zfcp_get_busid_by_adapter(adapter));
+ zfcp_erp_adapter_reopen(adapter, 0);
+ }
+ }
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_abort_fcp_command
+ *
+ * purpose: tells FSF to abort a running SCSI command
+ *
+ * returns: address of initiated FSF request
+ * NULL - request could not be initiated
+ *
+ * FIXME(design): should be watched by a timeout !!!
+ * FIXME(design) shouldn't this be modified to return an int
+ * also...don't know how though
+ */
+struct zfcp_fsf_req *
+zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
+ struct zfcp_adapter *adapter,
+ struct zfcp_unit *unit, int req_flags)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ struct zfcp_fsf_req *fsf_req = NULL;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
+ req_flags, adapter->pool.fsf_req_abort,
+ &lock_flags, &fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Failed to create an abort command "
+ "request for lun 0x%016Lx on port 0x%016Lx "
+ "on adapter %s.\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_adapter(adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ fsf_req->data.abort_fcp_command.unit = unit;
+
+ /* set handles of unit and its parent port in QTCB */
+ fsf_req->qtcb->header.lun_handle = unit->handle;
+ fsf_req->qtcb->header.port_handle = unit->port->handle;
+
+ /* set handle of request which should be aborted */
+ fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+
+ /* start QDIO request for this FSF request */
+
+ zfcp_fsf_start_scsi_er_timer(adapter);
+ retval = zfcp_fsf_req_send(fsf_req, NULL);
+ if (retval) {
+ del_timer(&adapter->scsi_er_timer);
+ ZFCP_LOG_INFO("error: Failed to send abort command request "
+ "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn, unit->fcp_lun);
+ zfcp_fsf_req_free(fsf_req);
+ fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
+ "(adapter%s, port d_id=0x%08x, "
+ "unit x%016Lx, old_req_id=0x%lx)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->d_id,
+ unit->fcp_lun, old_req_id);
+ out:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+ return fsf_req;
+}
+
+/*
+ * function: zfcp_fsf_abort_fcp_command_handler
+ *
+ * purpose: is called for finished Abort FCP Command request
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit;
+ unsigned char status_qual =
+ new_fsf_req->qtcb->header.fsf_status_qual.word[0];
+
+ del_timer(&new_fsf_req->adapter->scsi_er_timer);
+
+ if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (new_fsf_req->qtcb->header.fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ if (status_qual >> 4 != status_qual % 0xf) {
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_HANDLE_NOT_VALID\n");
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
+ "fsf_s_phand_nv0");
+ /*
+ * In this case a command that was sent prior to a port
+ * reopen was aborted (handles are different). This is
+ * fine.
+ */
+ } else {
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x for "
+ "port 0x%016Lx on adapter %s invalid. "
+ "This may happen occasionally.\n",
+ unit->port->handle,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_INFO("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
+ (char *) &new_fsf_req->qtcb->header.
+ fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ /* Let's hope this sorts out the mess */
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_phand_nv1");
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ }
+ break;
+
+ case FSF_LUN_HANDLE_NOT_VALID:
+ if (status_qual >> 4 != status_qual % 0xf) {
+ /* 2 */
+ ZFCP_LOG_FLAGS(0, "FSF_LUN_HANDLE_NOT_VALID\n");
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
+ "fsf_s_lhand_nv0");
+ /*
+ * In this case a command that was sent prior to a unit
+ * reopen was aborted (handles are different).
+ * This is fine.
+ */
+ } else {
+ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO
+ ("Warning: Temporary LUN identifier 0x%x of LUN "
+ "0x%016Lx on port 0x%016Lx on adapter %s is "
+ "invalid. This may happen in rare cases. "
+ "Trying to re-establish link.\n",
+ unit->handle,
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_DEBUG("Status qualifier data:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &new_fsf_req->qtcb->header.
+ fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ /* Let's hope this sorts out the mess */
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_lhand_nv1");
+ zfcp_erp_port_reopen(unit->port, 0);
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ }
+ break;
+
+ case FSF_FCP_COMMAND_DOES_NOT_EXIST:
+ ZFCP_LOG_FLAGS(2, "FSF_FCP_COMMAND_DOES_NOT_EXIST\n");
+ retval = 0;
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
+ "fsf_s_no_exist");
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
+ break;
+
+ case FSF_PORT_BOXED:
+ /* 2 */
+ ZFCP_LOG_FLAGS(0, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to "
+ "be reopened\n", unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 2,
+ "fsf_s_pboxed");
+ zfcp_erp_port_reopen(unit->port, 0);
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
+ | ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_LUN_BOXED:
+ ZFCP_LOG_FLAGS(0, "FSF_LUN_BOXED\n");
+ ZFCP_LOG_INFO(
+ "unit 0x%016Lx on port 0x%016Lx on adapter %s needs "
+ "to be reopened\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
+ zfcp_erp_unit_reopen(unit, 0);
+ zfcp_cmd_dbf_event_fsf("unitbox", new_fsf_req,
+ &new_fsf_req->qtcb->header.fsf_status_qual,
+ sizeof(union fsf_status_qual));
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
+ | ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ /* 2 */
+ ZFCP_LOG_FLAGS(0, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ /* reopening link to port */
+ zfcp_erp_port_reopen(unit->port, 0);
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* SCSI stack will escalate */
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(new_fsf_req->adapter->erp_dbf, 0,
+ &new_fsf_req->qtcb->header.
+ fsf_status_qual.word[0], sizeof (u32));
+ break;
+ }
+ break;
+
+ case FSF_GOOD:
+ /* 3 */
+ ZFCP_LOG_FLAGS(0, "FSF_GOOD\n");
+ retval = 0;
+ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ new_fsf_req->qtcb->header.fsf_status);
+ debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_inval:");
+ debug_exception(new_fsf_req->adapter->erp_dbf, 0,
+ &new_fsf_req->qtcb->header.fsf_status,
+ sizeof (u32));
+ break;
+ }
+ skip_fsfstatus:
+ return retval;
+}
+
+/**
+ * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into
+ * one SBALE
+ * Two scatter-gather lists are passed, one for the reqeust and one for the
+ * response.
+ */
+static inline int
+zfcp_use_one_sbal(struct scatterlist *req, int req_count,
+ struct scatterlist *resp, int resp_count)
+{
+ return ((req_count == 1) &&
+ (resp_count == 1) &&
+ (((unsigned long) zfcp_sg_to_address(&req[0]) &
+ PAGE_MASK) ==
+ ((unsigned long) (zfcp_sg_to_address(&req[0]) +
+ req[0].length - 1) & PAGE_MASK)) &&
+ (((unsigned long) zfcp_sg_to_address(&resp[0]) &
+ PAGE_MASK) ==
+ ((unsigned long) (zfcp_sg_to_address(&resp[0]) +
+ resp[0].length - 1) & PAGE_MASK)));
+}
+
+/**
+ * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @ct: pointer to struct zfcp_send_ct which conatins all needed data for
+ * the request
+ * @pool: pointer to memory pool, if non-null this pool is used to allocate
+ * a struct zfcp_fsf_req
+ * @erp_action: pointer to erp_action, if non-null the Generic Service request
+ * is sent within error recovery
+ */
+int
+zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
+ struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ struct zfcp_port *port;
+ struct zfcp_adapter *adapter;
+ struct zfcp_fsf_req *fsf_req;
+ unsigned long lock_flags;
+ int bytes;
+ int ret = 0;
+
+ port = ct->port;
+ adapter = port->adapter;
+
+ ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ pool, &lock_flags, &fsf_req);
+ if (ret < 0) {
+ ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for "
+ "adapter: %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_req;
+ }
+
+ if (erp_action != NULL) {
+ erp_action->fsf_req = fsf_req;
+ fsf_req->erp_action = erp_action;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ if (zfcp_use_one_sbal(ct->req, ct->req_count,
+ ct->resp, ct->resp_count)){
+ /* both request buffer and response buffer
+ fit into one sbale each */
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
+ sbale[2].addr = zfcp_sg_to_address(&ct->req[0]);
+ sbale[2].length = ct->req[0].length;
+ sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
+ sbale[3].length = ct->resp[0].length;
+ sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
+ } else if (adapter->supported_features &
+ FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
+ /* try to use chained SBALs */
+ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
+ SBAL_FLAGS0_TYPE_WRITE_READ,
+ ct->req, ct->req_count,
+ ZFCP_MAX_SBALS_PER_CT_REQ);
+ if (bytes <= 0) {
+ ZFCP_LOG_INFO("error: creation of CT request failed "
+ "on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ if (bytes == 0)
+ ret = -ENOMEM;
+ else
+ ret = bytes;
+
+ goto failed_send;
+ }
+ fsf_req->qtcb->bottom.support.req_buf_length = bytes;
+ fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
+ SBAL_FLAGS0_TYPE_WRITE_READ,
+ ct->resp, ct->resp_count,
+ ZFCP_MAX_SBALS_PER_CT_REQ);
+ if (bytes <= 0) {
+ ZFCP_LOG_INFO("error: creation of CT request failed "
+ "on adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ if (bytes == 0)
+ ret = -ENOMEM;
+ else
+ ret = bytes;
+
+ goto failed_send;
+ }
+ fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
+ } else {
+ /* reject send generic request */
+ ZFCP_LOG_INFO(
+ "error: microcode does not support chained SBALs,"
+ "CT request too big (adapter %s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ ret = -EOPNOTSUPP;
+ goto failed_send;
+ }
+
+ /* settings in QTCB */
+ fsf_req->qtcb->header.port_handle = port->handle;
+ fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
+ fsf_req->qtcb->bottom.support.timeout = ct->timeout;
+ fsf_req->data.send_ct = ct;
+
+ /* start QDIO request for this FSF request */
+ ret = zfcp_fsf_req_send(fsf_req, ct->timer);
+ if (ret) {
+ ZFCP_LOG_DEBUG("error: initiation of CT request failed "
+ "(adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(adapter), port->wwpn);
+ goto failed_send;
+ }
+
+ ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(adapter), port->wwpn);
+ goto out;
+
+ failed_send:
+ zfcp_fsf_req_free(fsf_req);
+ if (erp_action != NULL) {
+ erp_action->fsf_req = NULL;
+ }
+ failed_req:
+ out:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+ lock_flags);
+ return ret;
+}
+
+/**
+ * zfcp_fsf_send_ct_handler - handler for Generic Service requests
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ *
+ * Data specific for the Generic Service request is passed by
+ * fsf_req->data.send_ct
+ * Usually a specific handler for the request is called via
+ * fsf_req->data.send_ct->handler at end of this function.
+ */
+static int
+zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_port *port;
+ struct zfcp_adapter *adapter;
+ struct zfcp_send_ct *send_ct;
+ struct fsf_qtcb_header *header;
+ struct fsf_qtcb_bottom_support *bottom;
+ int retval = -EINVAL;
+ u16 subtable, rule, counter;
+
+ adapter = fsf_req->adapter;
+ send_ct = fsf_req->data.send_ct;
+ port = send_ct->port;
+ header = &fsf_req->qtcb->header;
+ bottom = &fsf_req->qtcb->bottom.support;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ goto skip_fsfstatus;
+
+ /* evaluate FSF status in QTCB */
+ switch (header->fsf_status) {
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
+ retval = 0;
+ break;
+
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ ZFCP_LOG_FLAGS(2, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
+ if (adapter->fc_service_class <= 3) {
+ ZFCP_LOG_INFO("error: adapter %s does not support fc "
+ "class %d.\n",
+ zfcp_get_busid_by_port(port),
+ adapter->fc_service_class);
+ } else {
+ ZFCP_LOG_INFO("bug: The fibre channel class at the "
+ "adapter %s is invalid. "
+ "(debug info %d)\n",
+ zfcp_get_busid_by_port(port),
+ adapter->fc_service_class);
+ }
+ /* stop operation for this adapter */
+ debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]){
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,"FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ /* reopening link to port */
+ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
+ zfcp_test_link(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,"FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* ERP strategy will escalate */
+ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x "
+ "arrived.\n",
+ header->fsf_status_qual.word[0]);
+ break;
+ }
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("access denied, cannot send generic service "
+ "command (adapter %s, port d_id=0x%08x)\n",
+ zfcp_get_busid_by_port(port), port->d_id);
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
+ zfcp_erp_port_access_denied(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_GENERIC_COMMAND_REJECTED:
+ ZFCP_LOG_FLAGS(2, "FSF_GENERIC_COMMAND_REJECTED\n");
+ ZFCP_LOG_INFO("generic service command rejected "
+ "(adapter %s, port d_id=0x%08x)\n",
+ zfcp_get_busid_by_port(port), port->d_id);
+ ZFCP_LOG_INFO("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_gcom_rej");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port "
+ "0x%016Lx on adapter %s invalid. This may "
+ "happen occasionally.\n", port->handle,
+ port->wwpn, zfcp_get_busid_by_port(port));
+ ZFCP_LOG_INFO("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_phandle_nv");
+ zfcp_erp_adapter_reopen(adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_BOXED:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_INFO("port needs to be reopened "
+ "(adapter %s, port d_id=0x%08x)\n",
+ zfcp_get_busid_by_port(port), port->d_id);
+ debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
+ zfcp_erp_port_reopen(port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
+ | ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ /* following states should never occure, all cases avoided
+ in zfcp_fsf_send_ct - but who knows ... */
+ case FSF_PAYLOAD_SIZE_MISMATCH:
+ ZFCP_LOG_FLAGS(2, "FSF_PAYLOAD_SIZE_MISMATCH\n");
+ ZFCP_LOG_INFO("payload size mismatch (adapter: %s, "
+ "req_buf_length=%d, resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length, bottom->resp_buf_length);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_REQUEST_SIZE_TOO_LARGE:
+ ZFCP_LOG_FLAGS(2, "FSF_REQUEST_SIZE_TOO_LARGE\n");
+ ZFCP_LOG_INFO("request size too large (adapter: %s, "
+ "req_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_RESPONSE_SIZE_TOO_LARGE:
+ ZFCP_LOG_FLAGS(2, "FSF_RESPONSE_SIZE_TOO_LARGE\n");
+ ZFCP_LOG_INFO("response size too large (adapter: %s, "
+ "resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->resp_buf_length);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SBAL_MISMATCH:
+ ZFCP_LOG_FLAGS(2, "FSF_SBAL_MISMATCH\n");
+ ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
+ "resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length, bottom->resp_buf_length);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n", header->fsf_status);
+ debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval:");
+ debug_exception(adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0], sizeof (u32));
+ break;
+ }
+
+skip_fsfstatus:
+ send_ct->status = retval;
+
+ if (send_ct->handler != NULL)
+ send_ct->handler(send_ct->handler_data);
+
+ return retval;
+}
+
+/**
+ * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @els: pointer to struct zfcp_send_els which contains all needed data for
+ * the command.
+ */
+int
+zfcp_fsf_send_els(struct zfcp_send_els *els)
+{
+ volatile struct qdio_buffer_element *sbale;
+ struct zfcp_fsf_req *fsf_req;
+ fc_id_t d_id;
+ struct zfcp_adapter *adapter;
+ unsigned long lock_flags;
+ int bytes;
+ int ret = 0;
+
+ d_id = els->d_id;
+ adapter = els->adapter;
+
+ ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
+ ZFCP_REQ_AUTO_CLEANUP,
+ NULL, &lock_flags, &fsf_req);
+ if (ret < 0) {
+ ZFCP_LOG_INFO("error: creation of ELS request failed "
+ "(adapter %s, port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ goto failed_req;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ if (zfcp_use_one_sbal(els->req, els->req_count,
+ els->resp, els->resp_count)){
+ /* both request buffer and response buffer
+ fit into one sbale each */
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
+ sbale[2].addr = zfcp_sg_to_address(&els->req[0]);
+ sbale[2].length = els->req[0].length;
+ sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
+ sbale[3].length = els->resp[0].length;
+ sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
+ } else if (adapter->supported_features &
+ FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
+ /* try to use chained SBALs */
+ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
+ SBAL_FLAGS0_TYPE_WRITE_READ,
+ els->req, els->req_count,
+ ZFCP_MAX_SBALS_PER_ELS_REQ);
+ if (bytes <= 0) {
+ ZFCP_LOG_INFO("error: creation of ELS request failed "
+ "(adapter %s, port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ if (bytes == 0) {
+ ret = -ENOMEM;
+ } else {
+ ret = bytes;
+ }
+ goto failed_send;
+ }
+ fsf_req->qtcb->bottom.support.req_buf_length = bytes;
+ fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
+ SBAL_FLAGS0_TYPE_WRITE_READ,
+ els->resp, els->resp_count,
+ ZFCP_MAX_SBALS_PER_ELS_REQ);
+ if (bytes <= 0) {
+ ZFCP_LOG_INFO("error: creation of ELS request failed "
+ "(adapter %s, port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ if (bytes == 0) {
+ ret = -ENOMEM;
+ } else {
+ ret = bytes;
+ }
+ goto failed_send;
+ }
+ fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
+ } else {
+ /* reject request */
+ ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
+ ", ELS request too big (adapter %s, "
+ "port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ ret = -EOPNOTSUPP;
+ goto failed_send;
+ }
+
+ /* settings in QTCB */
+ fsf_req->qtcb->bottom.support.d_id = d_id;
+ fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
+ fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
+ fsf_req->data.send_els = els;
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+
+ /* start QDIO request for this FSF request */
+ ret = zfcp_fsf_req_send(fsf_req, els->timer);
+ if (ret) {
+ ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
+ "(adapter %s, port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ goto failed_send;
+ }
+
+ ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: "
+ "0x%08x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
+ goto out;
+
+ failed_send:
+ zfcp_fsf_req_free(fsf_req);
+
+ failed_req:
+ out:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+ lock_flags);
+
+ return ret;
+}
+
+/**
+ * zfcp_fsf_send_els_handler - handler for ELS commands
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ *
+ * Data specific for the ELS command is passed by
+ * fsf_req->data.send_els
+ * Usually a specific handler for the command is called via
+ * fsf_req->data.send_els->handler at end of this function.
+ */
+static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter;
+ fc_id_t d_id;
+ struct zfcp_port *port;
+ struct fsf_qtcb_header *header;
+ struct fsf_qtcb_bottom_support *bottom;
+ struct zfcp_send_els *send_els;
+ int retval = -EINVAL;
+ u16 subtable, rule, counter;
+
+ send_els = fsf_req->data.send_els;
+ adapter = send_els->adapter;
+ d_id = send_els->d_id;
+ header = &fsf_req->qtcb->header;
+ bottom = &fsf_req->qtcb->bottom.support;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ goto skip_fsfstatus;
+
+ switch (header->fsf_status) {
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
+ retval = 0;
+ break;
+
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ ZFCP_LOG_FLAGS(2, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
+ if (adapter->fc_service_class <= 3) {
+ ZFCP_LOG_INFO("error: adapter %s does "
+ "not support fibrechannel class %d.\n",
+ zfcp_get_busid_by_adapter(adapter),
+ adapter->fc_service_class);
+ } else {
+ ZFCP_LOG_INFO("bug: The fibrechannel class at "
+ "adapter %s is invalid. "
+ "(debug info %d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ adapter->fc_service_class);
+ }
+ /* stop operation for this adapter */
+ debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]){
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,"FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
+ if (send_els->ls_code != ZFCP_LS_ADISC) {
+ read_lock(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_did(adapter, d_id);
+ if (port)
+ zfcp_test_link(port);
+ read_unlock(&zfcp_data.config_lock);
+ }
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,"FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval =
+ zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
+ (struct zfcp_ls_rjt_par *)
+ &header->fsf_status_qual.word[2]);
+ break;
+ case FSF_SQ_RETRY_IF_POSSIBLE:
+ ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
+ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_retry");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n",
+ header->fsf_status_qual.word[0]);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
+ (char*)header->fsf_status_qual.word, 16);
+ }
+ break;
+
+ case FSF_ELS_COMMAND_REJECTED:
+ ZFCP_LOG_FLAGS(2, "FSF_ELS_COMMAND_REJECTED\n");
+ ZFCP_LOG_INFO("ELS has been rejected because command filter "
+ "prohibited sending "
+ "(adapter: %s, port d_id: 0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+
+ break;
+
+ case FSF_PAYLOAD_SIZE_MISMATCH:
+ ZFCP_LOG_FLAGS(2, "FSF_PAYLOAD_SIZE_MISMATCH\n");
+ ZFCP_LOG_INFO(
+ "ELS request size and ELS response size must be either "
+ "both 0, or both greater than 0 "
+ "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length,
+ bottom->resp_buf_length);
+ break;
+
+ case FSF_REQUEST_SIZE_TOO_LARGE:
+ ZFCP_LOG_FLAGS(2, "FSF_REQUEST_SIZE_TOO_LARGE\n");
+ ZFCP_LOG_INFO(
+ "Length of the ELS request buffer, "
+ "specified in QTCB bottom, "
+ "exceeds the size of the buffers "
+ "that have been allocated for ELS request data "
+ "(adapter: %s, req_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length);
+ break;
+
+ case FSF_RESPONSE_SIZE_TOO_LARGE:
+ ZFCP_LOG_FLAGS(2, "FSF_RESPONSE_SIZE_TOO_LARGE\n");
+ ZFCP_LOG_INFO(
+ "Length of the ELS response buffer, "
+ "specified in QTCB bottom, "
+ "exceeds the size of the buffers "
+ "that have been allocated for ELS response data "
+ "(adapter: %s, resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->resp_buf_length);
+ break;
+
+ case FSF_SBAL_MISMATCH:
+ /* should never occure, avoided in zfcp_fsf_send_els */
+ ZFCP_LOG_FLAGS(2, "FSF_SBAL_MISMATCH\n");
+ ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
+ "resp_buf_length=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->req_buf_length, bottom->resp_buf_length);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("access denied, cannot send ELS command "
+ "(adapter %s, port d_id=0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter), d_id);
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
+ read_lock(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_did(adapter, d_id);
+ if (port != NULL)
+ zfcp_erp_port_access_denied(port);
+ read_unlock(&zfcp_data.config_lock);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL(
+ "bug: An unknown FSF Status was presented "
+ "(adapter: %s, fsf_status=0x%08x)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ header->fsf_status);
+ debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval");
+ debug_exception(adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0], sizeof(u32));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ }
+
+skip_fsfstatus:
+ send_els->status = retval;
+
+ if (send_els->handler != 0)
+ send_els->handler(send_els->handler_data);
+
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns: address of initiated FSF request
+ * NULL - request could not be initiated
+ */
+int
+zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &(erp_action->fsf_req));
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create exchange configuration "
+ "data request for adapter %s.\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ erp_action->fsf_req->erp_action = erp_action;
+ erp_action->fsf_req->qtcb->bottom.config.feature_selection =
+ (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING);
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO
+ ("error: Could not send exchange configuration data "
+ "command on the adapter %s\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_DEBUG("exchange configuration data request initiated "
+ "(adapter %s)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/**
+ * zfcp_fsf_exchange_config_evaluate
+ * @fsf_req: fsf_req which belongs to xchg config data request
+ * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
+ *
+ * returns: -EIO on error, 0 otherwise
+ */
+static int
+zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
+{
+ struct fsf_qtcb_bottom_config *bottom;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
+ bottom = &fsf_req->qtcb->bottom.config;
+ ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
+ bottom->low_qtcb_version, bottom->high_qtcb_version);
+ adapter->fsf_lic_version = bottom->lic_version;
+ adapter->supported_features = bottom->supported_features;
+
+ if (xchg_ok) {
+ adapter->wwnn = bottom->nport_serv_param.wwnn;
+ adapter->wwpn = bottom->nport_serv_param.wwpn;
+ adapter->s_id = bottom->s_id & ZFCP_DID_MASK;
+ adapter->fc_topology = bottom->fc_topology;
+ adapter->fc_link_speed = bottom->fc_link_speed;
+ adapter->hydra_version = bottom->adapter_type;
+ } else {
+ adapter->wwnn = 0;
+ adapter->wwpn = 0;
+ adapter->s_id = 0;
+ adapter->fc_topology = 0;
+ adapter->fc_link_speed = 0;
+ adapter->hydra_version = 0;
+ }
+
+ if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
+ adapter->hardware_version = bottom->hardware_version;
+ memcpy(adapter->serial_number, bottom->serial_number, 17);
+ EBCASC(adapter->serial_number, sizeof(adapter->serial_number));
+ }
+
+ ZFCP_LOG_INFO("The adapter %s reported the following characteristics:\n"
+ "WWNN 0x%016Lx, "
+ "WWPN 0x%016Lx, "
+ "S_ID 0x%08x,\n"
+ "adapter version 0x%x, "
+ "LIC version 0x%x, "
+ "FC link speed %d Gb/s\n",
+ zfcp_get_busid_by_adapter(adapter),
+ adapter->wwnn,
+ adapter->wwpn,
+ (unsigned int) adapter->s_id,
+ adapter->hydra_version,
+ adapter->fsf_lic_version,
+ adapter->fc_link_speed);
+ if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
+ ZFCP_LOG_NORMAL("error: the adapter %s "
+ "only supports newer control block "
+ "versions in comparison to this device "
+ "driver (try updated device driver)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 0, "low_qtcb_ver");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ }
+ if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
+ ZFCP_LOG_NORMAL("error: the adapter %s "
+ "only supports older control block "
+ "versions than this device driver uses"
+ "(consider a microcode upgrade)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(adapter->erp_dbf, 0, "high_qtcb_ver");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * function: zfcp_fsf_exchange_config_data_handler
+ *
+ * purpose: is called for finished Exchange Configuration Data command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
+{
+ struct fsf_qtcb_bottom_config *bottom;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return -EIO;
+
+ switch (fsf_req->qtcb->header.fsf_status) {
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
+
+ if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
+ return -EIO;
+
+ switch (adapter->fc_topology) {
+ case FSF_TOPO_P2P:
+ ZFCP_LOG_FLAGS(1, "FSF_TOPO_P2P\n");
+ ZFCP_LOG_NORMAL("error: Point-to-point fibrechannel "
+ "configuration detected at adapter %s "
+ "unsupported, shutting down adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "top-p-to-p");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ case FSF_TOPO_AL:
+ ZFCP_LOG_FLAGS(1, "FSF_TOPO_AL\n");
+ ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
+ "topology detected at adapter %s "
+ "unsupported, shutting down adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "top-al");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ case FSF_TOPO_FABRIC:
+ ZFCP_LOG_FLAGS(1, "FSF_TOPO_FABRIC\n");
+ ZFCP_LOG_INFO("Switched fabric fibrechannel "
+ "network detected at adapter %s.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+ default:
+ ZFCP_LOG_NORMAL("bug: The fibrechannel topology "
+ "reported by the exchange "
+ "configuration command for "
+ "the adapter %s is not "
+ "of a type known to the zfcp "
+ "driver, shutting down adapter\n",
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "unknown-topo");
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ }
+ bottom = &fsf_req->qtcb->bottom.config;
+ if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
+ ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
+ "allowed by the adapter %s "
+ "is lower than the minimum "
+ "required by the driver (%ld bytes).\n",
+ bottom->max_qtcb_size,
+ zfcp_get_busid_by_adapter(adapter),
+ sizeof(struct fsf_qtcb));
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "qtcb-size");
+ debug_event(fsf_req->adapter->erp_dbf, 0,
+ &bottom->max_qtcb_size, sizeof (u32));
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ }
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+ &adapter->status);
+ break;
+ case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ debug_text_event(adapter->erp_dbf, 0, "xchg-inco");
+
+ if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
+ return -EIO;
+
+ ZFCP_LOG_INFO("Local link to adapter %s is down\n",
+ zfcp_get_busid_by_adapter(adapter));
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+ &adapter->status);
+ zfcp_erp_adapter_failed(adapter);
+ break;
+ default:
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
+ debug_event(fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status, sizeof (u32));
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * zfcp_fsf_exchange_port_data - request information about local port
+ * @adapter: for which port data is requested
+ * @data: response to exchange port data request
+ */
+int
+zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
+ struct fsf_qtcb_bottom_port *data)
+{
+ volatile struct qdio_buffer_element *sbale;
+ int retval = 0;
+ unsigned long lock_flags;
+ struct zfcp_fsf_req *fsf_req;
+ struct timer_list *timer;
+
+ if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){
+ ZFCP_LOG_INFO("error: exchange port data "
+ "command not supported by adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ return -EOPNOTSUPP;
+ }
+
+ timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+ if (!timer)
+ return -ENOMEM;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
+ 0, 0, &lock_flags, &fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Out of resources. Could not create an "
+ "exchange port data request for"
+ "the adapter %s.\n",
+ zfcp_get_busid_by_adapter(adapter));
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+ lock_flags);
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ fsf_req->data.port_data = data;
+
+ init_timer(timer);
+ timer->function = zfcp_fsf_request_timeout_handler;
+ timer->data = (unsigned long) adapter;
+ timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
+
+ retval = zfcp_fsf_req_send(fsf_req, timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send an exchange port data "
+ "command on the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ zfcp_fsf_req_free(fsf_req);
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+ lock_flags);
+ goto out;
+ }
+
+ ZFCP_LOG_DEBUG("Exchange Port Data request initiated (adapter %s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+ lock_flags);
+
+ wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ del_timer_sync(timer);
+ zfcp_fsf_req_cleanup(fsf_req);
+ out:
+ kfree(timer);
+ return retval;
+}
+
+
+/**
+ * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ */
+static void
+zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
+{
+ struct fsf_qtcb_bottom_port *bottom;
+ struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
+ switch (fsf_req->qtcb->header.fsf_status) {
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
+ bottom = &fsf_req->qtcb->bottom.port;
+ memcpy(data, bottom, sizeof(*data));
+ break;
+
+ default:
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng");
+ debug_event(fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status, sizeof(u32));
+ }
+}
+
+
+/*
+ * function: zfcp_fsf_open_port
+ *
+ * purpose:
+ *
+ * returns: address of initiated FSF request
+ * NULL - request could not be initiated
+ */
+int
+zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_OPEN_PORT_WITH_DID,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &(erp_action->fsf_req));
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create open port request "
+ "for port 0x%016Lx on adapter %s.\n",
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
+ erp_action->fsf_req->data.open_port.port = erp_action->port;
+ erp_action->fsf_req->erp_action = erp_action;
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send open port request for "
+ "port 0x%016Lx on adapter %s.\n",
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_DEBUG("open port request initiated "
+ "(adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn);
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_open_port_handler
+ *
+ * purpose: is called for finished Open Port command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_port *port;
+ struct fsf_plogi *plogi;
+ struct fsf_qtcb_header *header;
+ u16 subtable, rule, counter;
+
+ port = fsf_req->data.open_port.port;
+ header = &fsf_req->qtcb->header;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* don't change port status in our bookkeeping */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (header->fsf_status) {
+
+ case FSF_PORT_ALREADY_OPEN:
+ ZFCP_LOG_FLAGS(0, "FSF_PORT_ALREADY_OPEN\n");
+ ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
+ "is already open.\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_popen");
+ /*
+ * This is a bug, however operation should continue normally
+ * if it is simply ignored
+ */
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx "
+ "on adapter %s\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
+ zfcp_erp_port_access_denied(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+ ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED\n");
+ ZFCP_LOG_INFO("error: The FSF adapter is out of resources. "
+ "The remote port 0x%016Lx on adapter %s "
+ "could not be opened. Disabling it.\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_max_ports");
+ zfcp_erp_port_failed(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ /* ERP strategy will escalate */
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ /* ERP strategy will escalate */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_NO_RETRY_POSSIBLE:
+ ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RETRY_POSSIBLE\n");
+ ZFCP_LOG_NORMAL("The remote port 0x%016Lx on "
+ "adapter %s could not be opened. "
+ "Disabling it.\n",
+ port->wwpn,
+ zfcp_get_busid_by_port(port));
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_no_retry");
+ zfcp_erp_port_failed(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ header->fsf_status_qual.word[0]);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(
+ fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0],
+ sizeof (u32));
+ break;
+ }
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ /* save port handle assigned by FSF */
+ port->handle = header->port_handle;
+ ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s "
+ "was opened, it's port handle is 0x%x\n",
+ port->wwpn, zfcp_get_busid_by_port(port),
+ port->handle);
+ /* mark port as open */
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+ ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+ retval = 0;
+ /* check whether D_ID has changed during open */
+ /*
+ * FIXME: This check is not airtight, as the FCP channel does
+ * not monitor closures of target port connections caused on
+ * the remote side. Thus, they might miss out on invalidating
+ * locally cached WWPNs (and other N_Port parameters) of gone
+ * target ports. So, our heroic attempt to make things safe
+ * could be undermined by 'open port' response data tagged with
+ * obsolete WWPNs. Another reason to monitor potential
+ * connection closures ourself at least (by interpreting
+ * incoming ELS' and unsolicited status). It just crosses my
+ * mind that one should be able to cross-check by means of
+ * another GID_PN straight after a port has been opened.
+ * Alternately, an ADISC/PDISC ELS should suffice, as well.
+ */
+ plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els;
+ if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status))
+ {
+ if (fsf_req->qtcb->bottom.support.els1_length <
+ ((((unsigned long) &plogi->serv_param.wwpn) -
+ ((unsigned long) plogi)) + sizeof (u64))) {
+ ZFCP_LOG_INFO(
+ "warning: insufficient length of "
+ "PLOGI payload (%i)\n",
+ fsf_req->qtcb->bottom.support.els1_length);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_short_plogi:");
+ /* skip sanity check and assume wwpn is ok */
+ } else {
+ if (plogi->serv_param.wwpn != port->wwpn) {
+ ZFCP_LOG_INFO("warning: d_id of port "
+ "0x%016Lx changed during "
+ "open\n", port->wwpn);
+ debug_text_event(
+ fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_did_change:");
+ atomic_clear_mask(
+ ZFCP_STATUS_PORT_DID_DID,
+ &port->status);
+ } else
+ port->wwnn = plogi->serv_param.wwnn;
+ }
+ }
+ break;
+
+ case FSF_UNKNOWN_OP_SUBTYPE:
+ /* should never occure, subtype not set in zfcp_fsf_open_port */
+ ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_OP_SUBTYPE\n");
+ ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, "
+ "op_subtype=0x%x)\n",
+ zfcp_get_busid_by_port(port),
+ fsf_req->qtcb->bottom.support.operation_subtype);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ header->fsf_status);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status, sizeof (u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_port
+ *
+ * purpose: submit FSF command "close port"
+ *
+ * returns: address of initiated FSF request
+ * NULL - request could not be initiated
+ */
+int
+zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_CLOSE_PORT,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &(erp_action->fsf_req));
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create a close port request "
+ "for port 0x%016Lx on adapter %s.\n",
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
+ erp_action->fsf_req->data.close_port.port = erp_action->port;
+ erp_action->fsf_req->erp_action = erp_action;
+ erp_action->fsf_req->qtcb->header.port_handle =
+ erp_action->port->handle;
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send a close port request for "
+ "port 0x%016Lx on adapter %s.\n",
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("close port request initiated "
+ "(adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn);
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_port_handler
+ *
+ * purpose: is called for finished Close Port FSF command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_port *port;
+
+ port = fsf_req->data.close_port.port;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* don't change port status in our bookkeeping */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (fsf_req->qtcb->header.fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
+ "0x%016Lx on adapter %s invalid. This may happen "
+ "occasionally.\n", port->handle,
+ port->wwpn, zfcp_get_busid_by_port(port));
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_phand_nv");
+ zfcp_erp_adapter_reopen(port->adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ /* Note: FSF has actually closed the port in this case.
+ * The status code is just daft. Fingers crossed for a change
+ */
+ retval = 0;
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, "
+ "port handle 0x%x\n", port->wwpn,
+ zfcp_get_busid_by_port(port), port->handle);
+ zfcp_erp_modify_port_status(port,
+ ZFCP_STATUS_COMMON_OPEN,
+ ZFCP_CLEAR);
+ retval = 0;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ fsf_req->qtcb->header.fsf_status);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status,
+ sizeof (u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_physical_port
+ *
+ * purpose: submit FSF command "close physical port"
+ *
+ * returns: address of initiated FSF request
+ * NULL - request could not be initiated
+ */
+int
+zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
+{
+ int retval = 0;
+ unsigned long lock_flags;
+ volatile struct qdio_buffer_element *sbale;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &erp_action->fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create close physical port "
+ "request (adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn);
+
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ /* mark port as being closed */
+ atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
+ &erp_action->port->status);
+ /* save a pointer to this port */
+ erp_action->fsf_req->data.close_physical_port.port = erp_action->port;
+ /* port to be closeed */
+ erp_action->fsf_req->qtcb->header.port_handle =
+ erp_action->port->handle;
+ erp_action->fsf_req->erp_action = erp_action;
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send close physical port "
+ "request (adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn);
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("close physical port request initiated "
+ "(adapter %s, port 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn);
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_physical_port_handler
+ *
+ * purpose: is called for finished Close Physical Port FSF command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ struct fsf_qtcb_header *header;
+ u16 subtable, rule, counter;
+
+ port = fsf_req->data.close_physical_port.port;
+ header = &fsf_req->qtcb->header;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* don't change port status in our bookkeeping */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (header->fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid"
+ "(adapter %s, port 0x%016Lx). "
+ "This may happen occasionally.\n",
+ port->handle,
+ zfcp_get_busid_by_port(port),
+ port->wwpn);
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_phand_nv");
+ zfcp_erp_adapter_reopen(port->adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("Access denied, cannot close "
+ "physical port 0x%016Lx on adapter %s\n",
+ port->wwpn, zfcp_get_busid_by_port(port));
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
+ zfcp_erp_port_access_denied(port);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_BOXED:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter "
+ "%s needs to be reopened but it was attempted "
+ "to close it physically.\n",
+ port->wwpn,
+ zfcp_get_busid_by_port(port));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_pboxed");
+ zfcp_erp_port_reopen(port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+ ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ /* This will now be escalated by ERP */
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* ERP strategy will escalate */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ header->fsf_status_qual.word[0]);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(
+ fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0], sizeof (u32));
+ break;
+ }
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s "
+ "physically closed, port handle 0x%x\n",
+ port->wwpn,
+ zfcp_get_busid_by_port(port), port->handle);
+ /* can't use generic zfcp_erp_modify_port_status because
+ * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
+ */
+ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
+ retval = 0;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ header->fsf_status);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status, sizeof (u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_open_unit
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * assumptions: This routine does not check whether the associated
+ * remote port has already been opened. This should be
+ * done by calling routines. Otherwise some status
+ * may be presented by FSF
+ */
+int
+zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_OPEN_LUN,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &(erp_action->fsf_req));
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create open unit request for "
+ "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
+ erp_action->unit->fcp_lun,
+ erp_action->unit->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ erp_action->fsf_req->qtcb->header.port_handle =
+ erp_action->port->handle;
+ erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
+ erp_action->unit->fcp_lun;
+ erp_action->fsf_req->qtcb->bottom.support.option =
+ FSF_OPEN_LUN_SUPPRESS_BOXING;
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
+ erp_action->fsf_req->data.open_unit.unit = erp_action->unit;
+ erp_action->fsf_req->erp_action = erp_action;
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send an open unit request "
+ "on the adapter %s, port 0x%016Lx for "
+ "unit 0x%016Lx\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn,
+ erp_action->unit->fcp_lun);
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, "
+ "port 0x%016Lx, unit 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn, erp_action->unit->fcp_lun);
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_open_unit_handler
+ *
+ * purpose: is called for finished Open LUN command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_adapter *adapter;
+ struct zfcp_unit *unit;
+ struct fsf_qtcb_header *header;
+ struct fsf_qtcb_bottom_support *bottom;
+ struct fsf_queue_designator *queue_designator;
+ u16 subtable, rule, counter;
+ u32 allowed, exclusive, readwrite;
+
+ unit = fsf_req->data.open_unit.unit;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* don't change unit status in our bookkeeping */
+ goto skip_fsfstatus;
+ }
+
+ adapter = fsf_req->adapter;
+ header = &fsf_req->qtcb->header;
+ bottom = &fsf_req->qtcb->bottom.support;
+ queue_designator = &header->fsf_status_qual.fsf_queue_designator;
+
+ allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
+ exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
+ readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
+
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_UNIT_SHARED |
+ ZFCP_STATUS_UNIT_READONLY,
+ &unit->status);
+
+ /* evaluate FSF status in QTCB */
+ switch (header->fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x "
+ "for port 0x%016Lx on adapter %s invalid "
+ "This may happen occasionally\n",
+ unit->port->handle,
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_ph_nv");
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_LUN_ALREADY_OPEN:
+ ZFCP_LOG_FLAGS(0, "FSF_LUN_ALREADY_OPEN\n");
+ ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on "
+ "remote port 0x%016Lx on adapter %s twice.\n",
+ unit->fcp_lun,
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ debug_text_exception(adapter->erp_dbf, 0,
+ "fsf_s_uopen");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on "
+ "remote port 0x%016Lx on adapter %s\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
+ zfcp_erp_unit_access_denied(unit);
+ atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
+ atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_BOXED:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
+ "needs to be reopened\n",
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
+ zfcp_erp_port_reopen(unit->port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+ ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_LUN_SHARING_VIOLATION:
+ ZFCP_LOG_FLAGS(2, "FSF_LUN_SHARING_VIOLATION\n");
+ if (header->fsf_status_qual.word[0] != 0) {
+ ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port "
+ "with WWPN 0x%Lx "
+ "connected to the adapter %s "
+ "is already in use in LPAR%d, CSS%d\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ queue_designator->hla,
+ queue_designator->cssid);
+ } else {
+ subtable = header->fsf_status_qual.halfword[4];
+ rule = header->fsf_status_qual.halfword[5];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
+ "remote port with WWPN 0x%Lx "
+ "connected to the adapter %s "
+ "is denied (%s rule %d)\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ zfcp_act_subtable_type[subtable],
+ rule);
+ break;
+ }
+ }
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(adapter->erp_dbf, 2,
+ "fsf_s_l_sh_vio");
+ zfcp_erp_unit_access_denied(unit);
+ atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
+ atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
+ ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED\n");
+ ZFCP_LOG_INFO("error: The adapter ran out of resources. "
+ "There is no handle (temporary port identifier) "
+ "available for unit 0x%016Lx on port 0x%016Lx "
+ "on adapter %s\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ debug_text_event(adapter->erp_dbf, 1,
+ "fsf_s_max_units");
+ zfcp_erp_unit_failed(unit);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ /* Re-establish link to port */
+ debug_text_event(adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ zfcp_erp_port_reopen(unit->port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* ERP strategy will escalate */
+ debug_text_event(adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ header->fsf_status_qual.word[0]);
+ debug_text_event(adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0],
+ sizeof (u32));
+ }
+ break;
+
+ case FSF_INVALID_COMMAND_OPTION:
+ ZFCP_LOG_FLAGS(2, "FSF_INVALID_COMMAND_OPTION\n");
+ ZFCP_LOG_NORMAL(
+ "Invalid option 0x%x has been specified "
+ "in QTCB bottom sent to the adapter %s\n",
+ bottom->option,
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EINVAL;
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ /* save LUN handle assigned by FSF */
+ unit->handle = header->lun_handle;
+ ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on "
+ "adapter %s opened, port handle 0x%x\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ unit->handle);
+ /* mark unit as open */
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
+
+ if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){
+ if (!exclusive)
+ atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
+ &unit->status);
+
+ if (!readwrite) {
+ atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
+ &unit->status);
+ ZFCP_LOG_NORMAL("read-only access for unit "
+ "(adapter %s, wwpn=0x%016Lx, "
+ "fcp_lun=0x%016Lx)\n",
+ zfcp_get_busid_by_unit(unit),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ }
+
+ if (exclusive && !readwrite) {
+ ZFCP_LOG_NORMAL("exclusive access of read-only "
+ "unit not supported\n");
+ zfcp_erp_unit_failed(unit);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ zfcp_erp_unit_shutdown(unit, 0);
+ } else if (!exclusive && readwrite) {
+ ZFCP_LOG_NORMAL("shared access of read-write "
+ "unit not supported\n");
+ zfcp_erp_unit_failed(unit);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ zfcp_erp_unit_shutdown(unit, 0);
+ }
+ }
+
+ retval = 0;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ header->fsf_status);
+ debug_text_event(adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(adapter->erp_dbf, 0,
+ &header->fsf_status, sizeof (u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_unit
+ *
+ * purpose:
+ *
+ * returns: address of fsf_req - request successfully initiated
+ * NULL -
+ *
+ * assumptions: This routine does not check whether the associated
+ * remote port/lun has already been opened. This should be
+ * done by calling routines. Otherwise some status
+ * may be presented by FSF
+ */
+int
+zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
+{
+ volatile struct qdio_buffer_element *sbale;
+ unsigned long lock_flags;
+ int retval = 0;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(erp_action->adapter,
+ FSF_QTCB_CLOSE_LUN,
+ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
+ erp_action->adapter->pool.fsf_req_erp,
+ &lock_flags, &(erp_action->fsf_req));
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create close unit request for "
+ "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
+ erp_action->unit->fcp_lun,
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ erp_action->fsf_req->qtcb->header.port_handle =
+ erp_action->port->handle;
+ erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
+ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
+ erp_action->fsf_req->data.close_unit.unit = erp_action->unit;
+ erp_action->fsf_req->erp_action = erp_action;
+
+ /* start QDIO request for this FSF request */
+ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
+ if (retval) {
+ ZFCP_LOG_INFO("error: Could not send a close unit request for "
+ "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
+ erp_action->unit->fcp_lun,
+ erp_action->port->wwpn,
+ zfcp_get_busid_by_adapter(erp_action->adapter));
+ zfcp_fsf_req_free(erp_action->fsf_req);
+ erp_action->fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, "
+ "port 0x%016Lx, unit 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(erp_action->adapter),
+ erp_action->port->wwpn, erp_action->unit->fcp_lun);
+ out:
+ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
+ lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_close_unit_handler
+ *
+ * purpose: is called for finished Close LUN FSF command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_unit *unit;
+
+ unit = fsf_req->data.close_unit.unit; /* restore unit */
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ /* don't change unit status in our bookkeeping */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (fsf_req->qtcb->header.fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
+ "0x%016Lx on adapter %s invalid. This may "
+ "happen in rare circumstances\n",
+ unit->port->handle,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_phand_nv");
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_LUN_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit "
+ "0x%016Lx on port 0x%016Lx on adapter %s is "
+ "invalid. This may happen occasionally.\n",
+ unit->handle,
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_DEBUG("Status qualifier data:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_lhand_nv");
+ zfcp_erp_port_reopen(unit->port, 0);
+ zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
+ &fsf_req->qtcb->header.fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_BOXED:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
+ "needs to be reopened\n",
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
+ zfcp_erp_port_reopen(unit->port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+ ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ /* re-establish link to port */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ zfcp_erp_port_reopen(unit->port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* ERP strategy will escalate */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ fsf_req->qtcb->header.fsf_status_qual.word[0]);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(
+ fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status_qual.word[0],
+ sizeof (u32));
+ break;
+ }
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s "
+ "closed, port handle 0x%x\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ unit->handle);
+ /* mark unit as closed */
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
+ retval = 0;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
+ "(debug info 0x%x)\n",
+ fsf_req->qtcb->header.fsf_status);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &fsf_req->qtcb->header.fsf_status,
+ sizeof (u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
+ return retval;
+}
+
+/**
+ * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
+ * @adapter: adapter where scsi command is issued
+ * @unit: unit where command is sent to
+ * @scsi_cmnd: scsi command to be sent
+ * @timer: timer to be started when request is initiated
+ * @req_flags: flags for fsf_request
+ */
+int
+zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
+ struct zfcp_unit *unit,
+ struct scsi_cmnd * scsi_cmnd,
+ struct timer_list *timer, int req_flags)
+{
+ struct zfcp_fsf_req *fsf_req = NULL;
+ struct fcp_cmnd_iu *fcp_cmnd_iu;
+ unsigned int sbtype;
+ unsigned long lock_flags;
+ int real_bytes = 0;
+ int retval = 0;
+ int mask;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+ adapter->pool.fsf_req_scsi,
+ &lock_flags, &fsf_req);
+ if (unlikely(retval < 0)) {
+ ZFCP_LOG_DEBUG("error: Could not create FCP command request "
+ "for unit 0x%016Lx on port 0x%016Lx on "
+ "adapter %s\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_adapter(adapter));
+ goto failed_req_create;
+ }
+
+ /*
+ * associate FSF request with SCSI request
+ * (need this for look up on abort)
+ */
+ fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
+ scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
+
+ /*
+ * associate SCSI command with FSF request
+ * (need this for look up on normal command completion)
+ */
+ fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd;
+ fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
+ fsf_req->data.send_fcp_command_task.unit = unit;
+ ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
+
+ /* set handles of unit and its parent port in QTCB */
+ fsf_req->qtcb->header.lun_handle = unit->handle;
+ fsf_req->qtcb->header.port_handle = unit->port->handle;
+
+ /* FSF does not define the structure of the FCP_CMND IU */
+ fcp_cmnd_iu = (struct fcp_cmnd_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
+
+ /*
+ * set depending on data direction:
+ * data direction bits in SBALE (SB Type)
+ * data direction bits in QTCB
+ * data direction bits in FCP_CMND IU
+ */
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_NONE:
+ ZFCP_LOG_FLAGS(3, "DMA_NONE\n");
+ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+ /*
+ * FIXME(qdio):
+ * what is the correct type for commands
+ * without 'real' data buffers?
+ */
+ sbtype = SBAL_FLAGS0_TYPE_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ ZFCP_LOG_FLAGS(3, "DMA_FROM_DEVICE\n");
+ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
+ sbtype = SBAL_FLAGS0_TYPE_READ;
+ fcp_cmnd_iu->rddata = 1;
+ break;
+ case DMA_TO_DEVICE:
+ ZFCP_LOG_FLAGS(3, "DMA_TO_DEVICE\n");
+ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
+ sbtype = SBAL_FLAGS0_TYPE_WRITE;
+ fcp_cmnd_iu->wddata = 1;
+ break;
+ case DMA_BIDIRECTIONAL:
+ ZFCP_LOG_FLAGS(0, "DMA_BIDIRECTIONAL not supported\n");
+ default:
+ /*
+ * dummy, catch this condition earlier
+ * in zfcp_scsi_queuecommand
+ */
+ goto failed_scsi_cmnd;
+ }
+
+ /* set FC service class in QTCB (3 per default) */
+ fsf_req->qtcb->bottom.io.service_class = adapter->fc_service_class;
+
+ /* set FCP_LUN in FCP_CMND IU in QTCB */
+ fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
+
+ mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED;
+
+ /* set task attributes in FCP_CMND IU in QTCB */
+ if (likely((scsi_cmnd->device->simple_tags) ||
+ (atomic_test_mask(mask, &unit->status))))
+ fcp_cmnd_iu->task_attribute = SIMPLE_Q;
+ else
+ fcp_cmnd_iu->task_attribute = UNTAGGED;
+
+ /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
+ if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) {
+ fcp_cmnd_iu->add_fcp_cdb_length
+ = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
+ ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
+ "additional FCP_CDB length is 0x%x "
+ "(shifted right 2 bits)\n",
+ scsi_cmnd->cmd_len,
+ fcp_cmnd_iu->add_fcp_cdb_length);
+ }
+ /*
+ * copy SCSI CDB (including additional length, if any) to
+ * FCP_CDB in FCP_CMND IU in QTCB
+ */
+ memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+
+ /* FCP CMND IU length in QTCB */
+ fsf_req->qtcb->bottom.io.fcp_cmnd_length =
+ sizeof (struct fcp_cmnd_iu) +
+ fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
+
+ /* generate SBALEs from data buffer */
+ real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd);
+ if (unlikely(real_bytes < 0)) {
+ if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) {
+ ZFCP_LOG_DEBUG(
+ "Data did not fit into available buffer(s), "
+ "waiting for more...\n");
+ retval = -EIO;
+ } else {
+ ZFCP_LOG_NORMAL("error: No truncation implemented but "
+ "required. Shutting down unit "
+ "(adapter %s, port 0x%016Lx, "
+ "unit 0x%016Lx)\n",
+ zfcp_get_busid_by_unit(unit),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ zfcp_erp_unit_shutdown(unit, 0);
+ retval = -EINVAL;
+ }
+ goto no_fit;
+ }
+
+ /* set length of FCP data length in FCP_CMND IU in QTCB */
+ zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
+
+ ZFCP_LOG_DEBUG("Sending SCSI command:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+
+ /*
+ * start QDIO request for this FSF request
+ * covered by an SBALE)
+ */
+ retval = zfcp_fsf_req_send(fsf_req, timer);
+ if (unlikely(retval < 0)) {
+ ZFCP_LOG_INFO("error: Could not send FCP command request "
+ "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ goto send_failed;
+ }
+
+ ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, "
+ "port 0x%016Lx, unit 0x%016Lx)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ goto success;
+
+ send_failed:
+ no_fit:
+ failed_scsi_cmnd:
+ zfcp_fsf_req_free(fsf_req);
+ fsf_req = NULL;
+ scsi_cmnd->host_scribble = NULL;
+ success:
+ failed_req_create:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_send_fcp_command_task_management
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * FIXME(design): should be watched by a timeout!!!
+ * FIXME(design) shouldn't this be modified to return an int
+ * also...don't know how though
+ *
+ */
+struct zfcp_fsf_req *
+zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
+ struct zfcp_unit *unit,
+ u8 tm_flags, int req_flags)
+{
+ struct zfcp_fsf_req *fsf_req = NULL;
+ int retval = 0;
+ struct fcp_cmnd_iu *fcp_cmnd_iu;
+ unsigned long lock_flags;
+ volatile struct qdio_buffer_element *sbale;
+
+ /* setup new FSF request */
+ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
+ adapter->pool.fsf_req_scsi,
+ &lock_flags, &fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create FCP command (task "
+ "management) request for adapter %s, port "
+ " 0x%016Lx, unit 0x%016Lx.\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn, unit->fcp_lun);
+ goto out;
+ }
+
+ /*
+ * Used to decide on proper handler in the return path,
+ * could be either zfcp_fsf_send_fcp_command_task_handler or
+ * zfcp_fsf_send_fcp_command_task_management_handler */
+
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
+
+ /*
+ * hold a pointer to the unit being target of this
+ * task management request
+ */
+ fsf_req->data.send_fcp_command_task_management.unit = unit;
+
+ /* set FSF related fields in QTCB */
+ fsf_req->qtcb->header.lun_handle = unit->handle;
+ fsf_req->qtcb->header.port_handle = unit->port->handle;
+ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+ fsf_req->qtcb->bottom.io.service_class = adapter->fc_service_class;
+ fsf_req->qtcb->bottom.io.fcp_cmnd_length =
+ sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ /* set FCP related fields in FCP_CMND IU in QTCB */
+ fcp_cmnd_iu = (struct fcp_cmnd_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
+ fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
+ fcp_cmnd_iu->task_management_flags = tm_flags;
+
+ /* start QDIO request for this FSF request */
+ zfcp_fsf_start_scsi_er_timer(adapter);
+ retval = zfcp_fsf_req_send(fsf_req, NULL);
+ if (retval) {
+ del_timer(&adapter->scsi_er_timer);
+ ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
+ "management) on adapter %s, port 0x%016Lx for "
+ "unit LUN 0x%016Lx\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ zfcp_fsf_req_free(fsf_req);
+ fsf_req = NULL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("Send FCP Command (task management function) initiated "
+ "(adapter %s, port 0x%016Lx, unit 0x%016Lx, "
+ "tm_flags=0x%x)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ unit->port->wwpn,
+ unit->fcp_lun,
+ tm_flags);
+ out:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+ return fsf_req;
+}
+
+/*
+ * function: zfcp_fsf_send_fcp_command_handler
+ *
+ * purpose: is called for finished Send FCP Command
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = -EINVAL;
+ struct zfcp_unit *unit;
+ struct fsf_qtcb_header *header;
+ u16 subtable, rule, counter;
+
+ header = &fsf_req->qtcb->header;
+
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
+ unit = fsf_req->data.send_fcp_command_task_management.unit;
+ else
+ unit = fsf_req->data.send_fcp_command_task.unit;
+
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ /* go directly to calls of special handlers */
+ goto skip_fsfstatus;
+ }
+
+ /* evaluate FSF status in QTCB */
+ switch (header->fsf_status) {
+
+ case FSF_PORT_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
+ "0x%016Lx on adapter %s invalid\n",
+ unit->port->handle,
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_phand_nv");
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_LUN_HANDLE_NOT_VALID:
+ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
+ ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit "
+ "0x%016Lx on port 0x%016Lx on adapter %s is "
+ "invalid. This may happen occasionally.\n",
+ unit->handle,
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ ZFCP_LOG_NORMAL("Status qualifier data:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_uhand_nv");
+ zfcp_erp_port_reopen(unit->port, 0);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_HANDLE_MISMATCH:
+ ZFCP_LOG_FLAGS(0, "FSF_HANDLE_MISMATCH\n");
+ ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed "
+ "unexpectedly. (adapter %s, port 0x%016Lx, "
+ "unit 0x%016Lx)\n",
+ unit->port->handle,
+ zfcp_get_busid_by_unit(unit),
+ unit->port->wwpn,
+ unit->fcp_lun);
+ ZFCP_LOG_NORMAL("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_hand_mis");
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("handmism",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ ZFCP_LOG_FLAGS(0, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
+ if (fsf_req->adapter->fc_service_class <= 3) {
+ ZFCP_LOG_NORMAL("error: The adapter %s does "
+ "not support fibrechannel class %d.\n",
+ zfcp_get_busid_by_unit(unit),
+ fsf_req->adapter->fc_service_class);
+ } else {
+ ZFCP_LOG_NORMAL("bug: The fibrechannel class at "
+ "adapter %s is invalid. "
+ "(debug info %d)\n",
+ zfcp_get_busid_by_unit(unit),
+ fsf_req->adapter->fc_service_class);
+ }
+ /* stop operation for this adapter */
+ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_class_nsup");
+ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("unsclass",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_FCPLUN_NOT_VALID:
+ ZFCP_LOG_FLAGS(0, "FSF_FCPLUN_NOT_VALID\n");
+ ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on "
+ "adapter %s does not have correct unit "
+ "handle 0x%x\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ unit->handle);
+ ZFCP_LOG_DEBUG("status qualifier:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_s_fcp_lun_nv");
+ zfcp_erp_port_reopen(unit->port, 0);
+ zfcp_cmd_dbf_event_fsf("fluninv",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_ACCESS_DENIED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
+ ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to "
+ "unit 0x%016Lx on port 0x%016Lx on "
+ "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ for (counter = 0; counter < 2; counter++) {
+ subtable = header->fsf_status_qual.halfword[counter * 2];
+ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
+ switch (subtable) {
+ case FSF_SQ_CFDC_SUBTABLE_OS:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
+ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
+ case FSF_SQ_CFDC_SUBTABLE_LUN:
+ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
+ zfcp_act_subtable_type[subtable], rule);
+ break;
+ }
+ }
+ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
+ zfcp_erp_unit_access_denied(unit);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_DIRECTION_INDICATOR_NOT_VALID:
+ ZFCP_LOG_FLAGS(0, "FSF_DIRECTION_INDICATOR_NOT_VALID\n");
+ ZFCP_LOG_INFO("bug: Invalid data direction given for unit "
+ "0x%016Lx on port 0x%016Lx on adapter %s "
+ "(debug info %d)\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fsf_req->qtcb->bottom.io.data_direction);
+ /* stop operation for this adapter */
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_dir_ind_nv");
+ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("dirinv",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_CMND_LENGTH_NOT_VALID:
+ ZFCP_LOG_FLAGS(0, "FSF_CMND_LENGTH_NOT_VALID\n");
+ ZFCP_LOG_NORMAL
+ ("bug: An invalid control-data-block length field "
+ "was found in a command for unit 0x%016Lx on port "
+ "0x%016Lx on adapter %s " "(debug info %d)\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fsf_req->qtcb->bottom.io.fcp_cmnd_length);
+ /* stop operation for this adapter */
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_s_cmd_len_nv");
+ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
+ zfcp_cmd_dbf_event_fsf("cleninv",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+
+ case FSF_PORT_BOXED:
+ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
+ ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
+ "needs to be reopened\n",
+ unit->port->wwpn, zfcp_get_busid_by_unit(unit));
+ debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
+ zfcp_erp_port_reopen(unit->port, 0);
+ zfcp_cmd_dbf_event_fsf("portbox", fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
+ ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_LUN_BOXED:
+ ZFCP_LOG_FLAGS(0, "FSF_LUN_BOXED\n");
+ ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, "
+ "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
+ zfcp_get_busid_by_unit(unit),
+ unit->port->wwpn, unit->fcp_lun);
+ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
+ zfcp_erp_unit_reopen(unit, 0);
+ zfcp_cmd_dbf_event_fsf("unitbox", fsf_req,
+ &header->fsf_status_qual,
+ sizeof(union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
+ | ZFCP_STATUS_FSFREQ_RETRY;
+ break;
+
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
+ switch (header->fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ ZFCP_LOG_FLAGS(2,
+ "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
+ /* re-establish link to port */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ltest");
+ zfcp_erp_port_reopen(unit->port, 0);
+ zfcp_cmd_dbf_event_fsf(
+ "sqltest",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+ ZFCP_LOG_FLAGS(3,
+ "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
+ /* FIXME(hw) need proper specs for proper action */
+ /* let scsi stack deal with retries and escalation */
+ debug_text_event(fsf_req->adapter->erp_dbf, 1,
+ "fsf_sq_ulp");
+ zfcp_cmd_dbf_event_fsf(
+ "sqdeperp",
+ fsf_req,
+ &header->fsf_status_qual,
+ sizeof (union fsf_status_qual));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ default:
+ /* FIXME: shall we consider this a successful transfer? */
+ ZFCP_LOG_NORMAL
+ ("bug: Wrong status qualifier 0x%x arrived.\n",
+ header->fsf_status_qual.word[0]);
+ debug_text_event(fsf_req->adapter->erp_dbf, 0,
+ "fsf_sq_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0],
+ sizeof(u32));
+ break;
+ }
+ break;
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
+ break;
+
+ case FSF_FCP_RSP_AVAILABLE:
+ ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
+ break;
+
+ default:
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status, sizeof(u32));
+ break;
+ }
+
+ skip_fsfstatus:
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) {
+ retval =
+ zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
+ } else {
+ retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
+ }
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_send_fcp_command_task_handler
+ *
+ * purpose: evaluates FCP_RSP IU
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+ struct scsi_cmnd *scpnt;
+ struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_rsp);
+ struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
+ u32 sns_len;
+ char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
+ unsigned long flags;
+ struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit;
+
+ read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
+ scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd;
+ if (unlikely(!scpnt)) {
+ ZFCP_LOG_DEBUG
+ ("Command with fsf_req %p is not associated to "
+ "a scsi command anymore. Aborted?\n", fsf_req);
+ goto out;
+ }
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
+ /* FIXME: (design) mid-layer should handle DID_ABORT like
+ * DID_SOFT_ERROR by retrying the request for devices
+ * that allow retries.
+ */
+ ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
+ set_host_byte(&scpnt->result, DID_SOFT_ERROR);
+ set_driver_byte(&scpnt->result, SUGGEST_RETRY);
+ goto skip_fsfstatus;
+ }
+
+ if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ ZFCP_LOG_DEBUG("Setting DID_ERROR\n");
+ set_host_byte(&scpnt->result, DID_ERROR);
+ goto skip_fsfstatus;
+ }
+
+ /* set message byte of result in SCSI command */
+ scpnt->result |= COMMAND_COMPLETE << 8;
+
+ /*
+ * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
+ * of result in SCSI command
+ */
+ scpnt->result |= fcp_rsp_iu->scsi_status;
+ if (unlikely(fcp_rsp_iu->scsi_status)) {
+ /* DEBUG */
+ ZFCP_LOG_DEBUG("status for SCSI Command:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ scpnt->cmnd, scpnt->cmd_len);
+ ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
+ fcp_rsp_iu->scsi_status);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu));
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
+ fcp_rsp_iu->fcp_sns_len);
+ }
+
+ /* check FCP_RSP_INFO */
+ if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
+ ZFCP_LOG_DEBUG("rsp_len is valid\n");
+ switch (fcp_rsp_info[3]) {
+ case RSP_CODE_GOOD:
+ ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
+ /* ok, continue */
+ ZFCP_LOG_TRACE("no failure or Task Management "
+ "Function complete\n");
+ set_host_byte(&scpnt->result, DID_OK);
+ break;
+ case RSP_CODE_LENGTH_MISMATCH:
+ ZFCP_LOG_FLAGS(0, "RSP_CODE_LENGTH_MISMATCH\n");
+ /* hardware bug */
+ ZFCP_LOG_NORMAL("bug: FCP response code indictates "
+ "that the fibrechannel protocol data "
+ "length differs from the burst length. "
+ "The problem occured on unit 0x%016Lx "
+ "on port 0x%016Lx on adapter %s",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ /* dump SCSI CDB as prepared by zfcp */
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->
+ bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
+ zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
+ set_host_byte(&scpnt->result, DID_ERROR);
+ goto skip_fsfstatus;
+ case RSP_CODE_FIELD_INVALID:
+ ZFCP_LOG_FLAGS(0, "RSP_CODE_FIELD_INVALID\n");
+ /* driver or hardware bug */
+ ZFCP_LOG_NORMAL("bug: FCP response code indictates "
+ "that the fibrechannel protocol data "
+ "fields were incorrectly set up. "
+ "The problem occured on the unit "
+ "0x%016Lx on port 0x%016Lx on "
+ "adapter %s",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ /* dump SCSI CDB as prepared by zfcp */
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->
+ bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
+ set_host_byte(&scpnt->result, DID_ERROR);
+ zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
+ goto skip_fsfstatus;
+ case RSP_CODE_RO_MISMATCH:
+ ZFCP_LOG_FLAGS(0, "RSP_CODE_RO_MISMATCH\n");
+ /* hardware bug */
+ ZFCP_LOG_NORMAL("bug: The FCP response code indicates "
+ "that conflicting values for the "
+ "fibrechannel payload offset from the "
+ "header were found. "
+ "The problem occured on unit 0x%016Lx "
+ "on port 0x%016Lx on adapter %s.\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ /* dump SCSI CDB as prepared by zfcp */
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->
+ bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
+ zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
+ set_host_byte(&scpnt->result, DID_ERROR);
+ goto skip_fsfstatus;
+ default:
+ ZFCP_LOG_NORMAL("bug: An invalid FCP response "
+ "code was detected for a command. "
+ "The problem occured on the unit "
+ "0x%016Lx on port 0x%016Lx on "
+ "adapter %s (debug info 0x%x)\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fcp_rsp_info[3]);
+ /* dump SCSI CDB as prepared by zfcp */
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
+ (char *) &fsf_req->qtcb->
+ bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
+ zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
+ set_host_byte(&scpnt->result, DID_ERROR);
+ }
+ }
+
+ /* check for sense data */
+ if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
+ sns_len = FSF_FCP_RSP_SIZE -
+ sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len;
+ ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n",
+ sns_len);
+ sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
+ ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n",
+ SCSI_SENSE_BUFFERSIZE);
+ sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
+ ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
+ scpnt->result);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
+ (void *) &scpnt->cmnd, scpnt->cmd_len);
+
+ ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
+ fcp_rsp_iu->fcp_sns_len);
+ memcpy(&scpnt->sense_buffer,
+ zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
+ (void *) &scpnt->sense_buffer, sns_len);
+ }
+
+ /* check for overrun */
+ if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) {
+ ZFCP_LOG_INFO("A data overrun was detected for a command. "
+ "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
+ "The response data length is "
+ "%d, the original length was %d.\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fcp_rsp_iu->fcp_resid,
+ (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
+ }
+
+ /* check for underrun */
+ if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
+ ZFCP_LOG_INFO("A data underrun was detected for a command. "
+ "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
+ "The response data length is "
+ "%d, the original length was %d.\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fcp_rsp_iu->fcp_resid,
+ (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
+
+ scpnt->resid = fcp_rsp_iu->fcp_resid;
+ if (scpnt->request_bufflen - scpnt->resid < scpnt->underflow)
+ scpnt->result |= DID_ERROR << 16;
+ }
+
+ skip_fsfstatus:
+ ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
+
+ zfcp_cmd_dbf_event_scsi("response", scpnt);
+
+ /* cleanup pointer (need this especially for abort) */
+ scpnt->host_scribble = NULL;
+
+ /*
+ * NOTE:
+ * according to the outcome of a discussion on linux-scsi we
+ * don't need to grab the io_request_lock here since we use
+ * the new eh
+ */
+ /* always call back */
+
+ (scpnt->scsi_done) (scpnt);
+
+ /*
+ * We must hold this lock until scsi_done has been called.
+ * Otherwise we may call scsi_done after abort regarding this
+ * command has completed.
+ * Note: scsi_done must not block!
+ */
+ out:
+ read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_send_fcp_command_task_management_handler
+ *
+ * purpose: evaluates FCP_RSP IU
+ *
+ * returns:
+ */
+static int
+zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
+{
+ int retval = 0;
+ struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
+ &(fsf_req->qtcb->bottom.io.fcp_rsp);
+ char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
+ struct zfcp_unit *unit =
+ fsf_req->data.send_fcp_command_task_management.unit;
+
+ del_timer(&fsf_req->adapter->scsi_er_timer);
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+ goto skip_fsfstatus;
+ }
+
+ /* check FCP_RSP_INFO */
+ switch (fcp_rsp_info[3]) {
+ case RSP_CODE_GOOD:
+ ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
+ /* ok, continue */
+ ZFCP_LOG_DEBUG("no failure or Task Management "
+ "Function complete\n");
+ break;
+ case RSP_CODE_TASKMAN_UNSUPP:
+ ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_UNSUPP\n");
+ ZFCP_LOG_NORMAL("bug: A reuested task management function "
+ "is not supported on the target device "
+ "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
+ break;
+ case RSP_CODE_TASKMAN_FAILED:
+ ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_FAILED\n");
+ ZFCP_LOG_NORMAL("bug: A reuested task management function "
+ "failed to complete successfully. "
+ "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+ break;
+ default:
+ ZFCP_LOG_NORMAL("bug: An invalid FCP response "
+ "code was detected for a command. "
+ "unit 0x%016Lx, port 0x%016Lx, adapter %s "
+ "(debug info 0x%x)\n",
+ unit->fcp_lun,
+ unit->port->wwpn,
+ zfcp_get_busid_by_unit(unit),
+ fcp_rsp_info[3]);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+ }
+
+ skip_fsfstatus:
+ return retval;
+}
+
+
+/*
+ * function: zfcp_fsf_control_file
+ *
+ * purpose: Initiator of the control file upload/download FSF requests
+ *
+ * returns: 0 - FSF request is successfuly created and queued
+ * -EOPNOTSUPP - The FCP adapter does not have Control File support
+ * -EINVAL - Invalid direction specified
+ * -ENOMEM - Insufficient memory
+ * -EPERM - Cannot create FSF request or place it in QDIO queue
+ */
+int
+zfcp_fsf_control_file(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req **fsf_req_ptr,
+ u32 fsf_command,
+ u32 option,
+ struct zfcp_sg_list *sg_list)
+{
+ struct zfcp_fsf_req *fsf_req;
+ struct fsf_qtcb_bottom_support *bottom;
+ volatile struct qdio_buffer_element *sbale;
+ struct timer_list *timer;
+ unsigned long lock_flags;
+ int req_flags = 0;
+ int direction;
+ int retval = 0;
+
+ if (!(adapter->supported_features & FSF_FEATURE_CFDC)) {
+ ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (fsf_command) {
+
+ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+ direction = SBAL_FLAGS0_TYPE_WRITE;
+ if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
+ (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
+ req_flags = ZFCP_WAIT_FOR_SBAL;
+ break;
+
+ case FSF_QTCB_UPLOAD_CONTROL_FILE:
+ direction = SBAL_FLAGS0_TYPE_READ;
+ break;
+
+ default:
+ ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+ if (!timer) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
+ NULL, &lock_flags, &fsf_req);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("error: Could not create FSF request for the "
+ "adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -EPERM;
+ goto unlock_queue_lock;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale[0].flags |= direction;
+
+ bottom = &fsf_req->qtcb->bottom.support;
+ bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
+ bottom->option = option;
+
+ if (sg_list->count > 0) {
+ int bytes;
+
+ bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
+ sg_list->sg, sg_list->count,
+ ZFCP_MAX_SBALS_PER_REQ);
+ if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
+ ZFCP_LOG_INFO(
+ "error: Could not create sufficient number of "
+ "SBALS for an FSF request to the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -ENOMEM;
+ goto free_fsf_req;
+ }
+ } else
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ init_timer(timer);
+ timer->function = zfcp_fsf_request_timeout_handler;
+ timer->data = (unsigned long) adapter;
+ timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
+
+ retval = zfcp_fsf_req_send(fsf_req, timer);
+ if (retval < 0) {
+ ZFCP_LOG_INFO("initiation of cfdc up/download failed"
+ "(adapter %s)\n",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -EPERM;
+ goto free_fsf_req;
+ }
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+
+ ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the "
+ "adapter %s\n",
+ fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
+ "download" : "upload",
+ zfcp_get_busid_by_adapter(adapter));
+
+ wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+
+ *fsf_req_ptr = fsf_req;
+ del_timer_sync(timer);
+ goto free_timer;
+
+ free_fsf_req:
+ zfcp_fsf_req_free(fsf_req);
+ unlock_queue_lock:
+ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
+ free_timer:
+ kfree(timer);
+ out:
+ return retval;
+}
+
+
+/*
+ * function: zfcp_fsf_control_file_handler
+ *
+ * purpose: Handler of the control file upload/download FSF requests
+ *
+ * returns: 0 - FSF request successfuly processed
+ * -EAGAIN - Operation has to be repeated because of a temporary problem
+ * -EACCES - There is no permission to execute an operation
+ * -EPERM - The control file is not in a right format
+ * -EIO - There is a problem with the FCP adapter
+ * -EINVAL - Invalid operation
+ * -EFAULT - User space memory I/O operation fault
+ */
+static int
+zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ struct fsf_qtcb_header *header = &fsf_req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support;
+ int retval = 0;
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ retval = -EINVAL;
+ goto skip_fsfstatus;
+ }
+
+ switch (header->fsf_status) {
+
+ case FSF_GOOD:
+ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
+ ZFCP_LOG_NORMAL(
+ "The FSF request has been successfully completed "
+ "on the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+
+ case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
+ ZFCP_LOG_FLAGS(2, "FSF_OPERATION_PARTIALLY_SUCCESSFUL\n");
+ if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
+ switch (header->fsf_status_qual.word[0]) {
+
+ case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
+ ZFCP_LOG_NORMAL(
+ "CFDC of the adapter %s could not "
+ "be saved on the SE\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+
+ case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
+ ZFCP_LOG_NORMAL(
+ "CFDC of the adapter %s could not "
+ "be copied to the secondary SE\n",
+ zfcp_get_busid_by_adapter(adapter));
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL(
+ "CFDC could not be hardened "
+ "on the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ }
+ }
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EAGAIN;
+ break;
+
+ case FSF_AUTHORIZATION_FAILURE:
+ ZFCP_LOG_FLAGS(2, "FSF_AUTHORIZATION_FAILURE\n");
+ ZFCP_LOG_NORMAL(
+ "Adapter %s does not accept privileged commands\n",
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EACCES;
+ break;
+
+ case FSF_CFDC_ERROR_DETECTED:
+ ZFCP_LOG_FLAGS(2, "FSF_CFDC_ERROR_DETECTED\n");
+ ZFCP_LOG_NORMAL(
+ "Error at position %d in the CFDC, "
+ "CFDC is discarded by the adapter %s\n",
+ header->fsf_status_qual.word[0],
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EPERM;
+ break;
+
+ case FSF_CONTROL_FILE_UPDATE_ERROR:
+ ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_UPDATE_ERROR\n");
+ ZFCP_LOG_NORMAL(
+ "Adapter %s cannot harden the control file, "
+ "file is discarded\n",
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EIO;
+ break;
+
+ case FSF_CONTROL_FILE_TOO_LARGE:
+ ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_TOO_LARGE\n");
+ ZFCP_LOG_NORMAL(
+ "Control file is too large, file is discarded "
+ "by the adapter %s\n",
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EIO;
+ break;
+
+ case FSF_ACCESS_CONFLICT_DETECTED:
+ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_CONFLICT_DETECTED\n");
+ if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
+ ZFCP_LOG_NORMAL(
+ "CFDC has been discarded by the adapter %s, "
+ "because activation would impact "
+ "%d active connection(s)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ header->fsf_status_qual.word[0]);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EIO;
+ break;
+
+ case FSF_CONFLICTS_OVERRULED:
+ ZFCP_LOG_FLAGS(2, "FSF_CONFLICTS_OVERRULED\n");
+ if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
+ ZFCP_LOG_NORMAL(
+ "CFDC has been activated on the adapter %s, "
+ "but activation has impacted "
+ "%d active connection(s)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ header->fsf_status_qual.word[0]);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EIO;
+ break;
+
+ case FSF_UNKNOWN_OP_SUBTYPE:
+ ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_OP_SUBTYPE\n");
+ ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, "
+ "op_subtype=0x%x)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ bottom->operation_subtype);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EINVAL;
+ break;
+
+ case FSF_INVALID_COMMAND_OPTION:
+ ZFCP_LOG_FLAGS(2, "FSF_INVALID_COMMAND_OPTION\n");
+ ZFCP_LOG_NORMAL(
+ "Invalid option 0x%x has been specified "
+ "in QTCB bottom sent to the adapter %s\n",
+ bottom->option,
+ zfcp_get_busid_by_adapter(adapter));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EINVAL;
+ break;
+
+ default:
+ ZFCP_LOG_NORMAL(
+ "bug: An unknown/unexpected FSF status 0x%08x "
+ "was presented on the adapter %s\n",
+ header->fsf_status,
+ zfcp_get_busid_by_adapter(adapter));
+ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval");
+ debug_exception(fsf_req->adapter->erp_dbf, 0,
+ &header->fsf_status_qual.word[0], sizeof(u32));
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ retval = -EINVAL;
+ break;
+ }
+
+skip_fsfstatus:
+ return retval;
+}
+
+
+/*
+ * function: zfcp_fsf_req_wait_and_cleanup
+ *
+ * purpose:
+ *
+ * FIXME(design): signal seems to be <0 !!!
+ * returns: 0 - request completed (*status is valid), cleanup succ.
+ * <0 - request completed (*status is valid), cleanup failed
+ * >0 - signal which interrupted waiting (*status invalid),
+ * request not completed, no cleanup
+ *
+ * *status is a copy of status of completed fsf_req
+ */
+int
+zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
+ int interruptible, u32 * status)
+{
+ int retval = 0;
+ int signal = 0;
+
+ if (interruptible) {
+ __wait_event_interruptible(fsf_req->completion_wq,
+ fsf_req->status &
+ ZFCP_STATUS_FSFREQ_COMPLETED,
+ signal);
+ if (signal) {
+ ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
+ "completion of the request at %p\n",
+ signal, fsf_req);
+ retval = signal;
+ goto out;
+ }
+ } else {
+ __wait_event(fsf_req->completion_wq,
+ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ }
+
+ *status = fsf_req->status;
+
+ /* cleanup request */
+ zfcp_fsf_req_cleanup(fsf_req);
+ out:
+ return retval;
+}
+
+static inline int
+zfcp_fsf_req_sbal_check(unsigned long *flags,
+ struct zfcp_qdio_queue *queue, int needed)
+{
+ write_lock_irqsave(&queue->queue_lock, *flags);
+ if (likely(atomic_read(&queue->free_count) >= needed))
+ return 1;
+ write_unlock_irqrestore(&queue->queue_lock, *flags);
+ return 0;
+}
+
+/*
+ * set qtcb pointer in fsf_req and initialize QTCB
+ */
+static inline void
+zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd)
+{
+ if (likely(fsf_req->qtcb != NULL)) {
+ fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
+ fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
+ fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
+ fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
+ fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
+ fsf_req->qtcb->header.fsf_command = fsf_cmd;
+ }
+}
+
+/**
+ * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue
+ * @adapter: adapter for which request queue is examined
+ * @req_flags: flags indicating whether to wait for needed SBAL or not
+ * @lock_flags: lock_flags if queue_lock is taken
+ * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS
+ * Locks: lock adapter->request_queue->queue_lock on success
+ */
+static int
+zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
+ unsigned long *lock_flags)
+{
+ long ret;
+ struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
+
+ if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
+ ret = wait_event_interruptible_timeout(adapter->request_wq,
+ zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
+ ZFCP_SBAL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -EIO;
+ } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * function: zfcp_fsf_req_create
+ *
+ * purpose: create an FSF request at the specified adapter and
+ * setup common fields
+ *
+ * returns: -ENOMEM if there was insufficient memory for a request
+ * -EIO if no qdio buffers could be allocate to the request
+ * -EINVAL/-EPERM on bug conditions in req_dequeue
+ * 0 in success
+ *
+ * note: The created request is returned by reference.
+ *
+ * locks: lock of concerned request queue must not be held,
+ * but is held on completion (write, irqsave)
+ */
+int
+zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
+ mempool_t *pool, unsigned long *lock_flags,
+ struct zfcp_fsf_req **fsf_req_p)
+{
+ volatile struct qdio_buffer_element *sbale;
+ struct zfcp_fsf_req *fsf_req = NULL;
+ int ret = 0;
+ struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
+
+ /* allocate new FSF request */
+ fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
+ if (unlikely(NULL == fsf_req)) {
+ ZFCP_LOG_DEBUG("error: Could not put an FSF request into"
+ "the outbound (send) queue.\n");
+ ret = -ENOMEM;
+ goto failed_fsf_req;
+ }
+
+ zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd);
+
+ /* initialize waitqueue which may be used to wait on
+ this request completion */
+ init_waitqueue_head(&fsf_req->completion_wq);
+
+ ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
+ if(ret < 0) {
+ goto failed_sbals;
+ }
+
+ /*
+ * We hold queue_lock here. Check if QDIOUP is set and let request fail
+ * if it is not set (see also *_open_qdio and *_close_qdio).
+ */
+
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
+ write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
+ ret = -EIO;
+ goto failed_sbals;
+ }
+
+ fsf_req->adapter = adapter; /* pointer to "parent" adapter */
+ fsf_req->fsf_command = fsf_cmd;
+ fsf_req->sbal_number = 1;
+ fsf_req->sbal_first = req_queue->free_index;
+ fsf_req->sbal_curr = req_queue->free_index;
+ fsf_req->sbale_curr = 1;
+
+ if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) {
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ }
+
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+
+ /* setup common SBALE fields */
+ sbale[0].addr = fsf_req;
+ sbale[0].flags |= SBAL_FLAGS0_COMMAND;
+ if (likely(fsf_req->qtcb != NULL)) {
+ sbale[1].addr = (void *) fsf_req->qtcb;
+ sbale[1].length = sizeof(struct fsf_qtcb);
+ }
+
+ ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n",
+ fsf_req->sbal_number, fsf_req->sbal_first);
+
+ goto success;
+
+ failed_sbals:
+/* dequeue new FSF request previously enqueued */
+ zfcp_fsf_req_free(fsf_req);
+ fsf_req = NULL;
+
+ failed_fsf_req:
+ write_lock_irqsave(&req_queue->queue_lock, *lock_flags);
+ success:
+ *fsf_req_p = fsf_req;
+ return ret;
+}
+
+/*
+ * function: zfcp_fsf_req_send
+ *
+ * purpose: start transfer of FSF request via QDIO
+ *
+ * returns: 0 - request transfer succesfully started
+ * !0 - start of request transfer failed
+ */
+static int
+zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_qdio_queue *req_queue;
+ volatile struct qdio_buffer_element *sbale;
+ int new_distance_from_int;
+ unsigned long flags;
+ int inc_seq_no = 1;
+ int retval = 0;
+
+ adapter = fsf_req->adapter;
+ req_queue = &adapter->request_queue,
+
+
+ /* FIXME(debug): remove it later */
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0);
+ ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags);
+ ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
+ sbale[1].length);
+
+ /* set sequence counter in QTCB */
+ if (likely(fsf_req->qtcb)) {
+ fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+ fsf_req->seq_no = adapter->fsf_req_seq_no;
+ ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
+ "FSF sequence counter value of %i\n",
+ fsf_req,
+ zfcp_get_busid_by_adapter(adapter),
+ fsf_req->qtcb->prefix.req_seq_no);
+ } else
+ inc_seq_no = 0;
+
+ /* put allocated FSF request at list tail */
+ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
+ list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
+ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+
+ /* figure out expiration time of timeout and start timeout */
+ if (unlikely(timer)) {
+ timer->expires += jiffies;
+ add_timer(timer);
+ }
+
+ ZFCP_LOG_TRACE("request queue of adapter %s: "
+ "next free SBAL is %i, %i free SBALs\n",
+ zfcp_get_busid_by_adapter(adapter),
+ req_queue->free_index,
+ atomic_read(&req_queue->free_count));
+
+ ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, "
+ "index_in_queue=%i, count=%i, buffers=%p\n",
+ zfcp_get_busid_by_adapter(adapter),
+ QDIO_FLAG_SYNC_OUTPUT,
+ 0, fsf_req->sbal_first, fsf_req->sbal_number,
+ &req_queue->buffer[fsf_req->sbal_first]);
+
+ /*
+ * adjust the number of free SBALs in request queue as well as
+ * position of first one
+ */
+ atomic_sub(fsf_req->sbal_number, &req_queue->free_count);
+ ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count));
+ req_queue->free_index += fsf_req->sbal_number; /* increase */
+ req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
+ new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
+
+ retval = do_QDIO(adapter->ccw_device,
+ QDIO_FLAG_SYNC_OUTPUT,
+ 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
+
+ if (unlikely(retval)) {
+ /* Queues are down..... */
+ retval = -EIO;
+ /*
+ * FIXME(potential race):
+ * timer might be expired (absolutely unlikely)
+ */
+ if (timer)
+ del_timer(timer);
+ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
+ list_del(&fsf_req->list);
+ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ /*
+ * adjust the number of free SBALs in request queue as well as
+ * position of first one
+ */
+ zfcp_qdio_zero_sbals(req_queue->buffer,
+ fsf_req->sbal_first, fsf_req->sbal_number);
+ atomic_add(fsf_req->sbal_number, &req_queue->free_count);
+ req_queue->free_index -= fsf_req->sbal_number; /* increase */
+ req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
+ req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
+ ZFCP_LOG_DEBUG
+ ("error: do_QDIO failed. Buffers could not be enqueued "
+ "to request queue.\n");
+ } else {
+ req_queue->distance_from_int = new_distance_from_int;
+ /*
+ * increase FSF sequence counter -
+ * this must only be done for request successfully enqueued to
+ * QDIO this rejected requests may be cleaned up by calling
+ * routines resulting in missing sequence counter values
+ * otherwise,
+ */
+ /* Don't increase for unsolicited status */
+ if (likely(inc_seq_no)) {
+ adapter->fsf_req_seq_no++;
+ ZFCP_LOG_TRACE
+ ("FSF sequence counter value of adapter %s "
+ "increased to %i\n",
+ zfcp_get_busid_by_adapter(adapter),
+ adapter->fsf_req_seq_no);
+ }
+ /* count FSF requests pending */
+ atomic_inc(&adapter->fsf_reqs_active);
+ }
+ return retval;
+}
+
+/*
+ * function: zfcp_fsf_req_cleanup
+ *
+ * purpose: cleans up an FSF request and removes it from the specified list
+ *
+ * returns:
+ *
+ * assumption: no pending SB in SBALEs other than QTCB
+ */
+void
+zfcp_fsf_req_cleanup(struct zfcp_fsf_req *fsf_req)
+{
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+ unsigned long flags;
+
+ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
+ list_del(&fsf_req->list);
+ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ zfcp_fsf_req_free(fsf_req);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
new file mode 100644
index 000000000000..5889956bbf08
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -0,0 +1,472 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_fsf.h
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef FSF_H
+#define FSF_H
+
+#define FSF_QTCB_VERSION1 0x00000001
+#define FSF_QTCB_CURRENT_VERSION FSF_QTCB_VERSION1
+
+/* FSF commands */
+#define FSF_QTCB_FCP_CMND 0x00000001
+#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
+#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
+#define FSF_QTCB_OPEN_LUN 0x00000006
+#define FSF_QTCB_CLOSE_LUN 0x00000007
+#define FSF_QTCB_CLOSE_PORT 0x00000008
+#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
+#define FSF_QTCB_SEND_ELS 0x0000000B
+#define FSF_QTCB_SEND_GENERIC 0x0000000C
+#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
+#define FSF_QTCB_EXCHANGE_PORT_DATA 0x0000000E
+#define FSF_QTCB_DOWNLOAD_CONTROL_FILE 0x00000012
+#define FSF_QTCB_UPLOAD_CONTROL_FILE 0x00000013
+
+/* FSF QTCB types */
+#define FSF_IO_COMMAND 0x00000001
+#define FSF_SUPPORT_COMMAND 0x00000002
+#define FSF_CONFIG_COMMAND 0x00000003
+#define FSF_PORT_COMMAND 0x00000004
+
+/* FSF control file upload/download operations' subtype and options */
+#define FSF_CFDC_OPERATION_SUBTYPE 0x00020001
+#define FSF_CFDC_OPTION_NORMAL_MODE 0x00000000
+#define FSF_CFDC_OPTION_FORCE 0x00000001
+#define FSF_CFDC_OPTION_FULL_ACCESS 0x00000002
+#define FSF_CFDC_OPTION_RESTRICTED_ACCESS 0x00000004
+
+/* FSF protocol stati */
+#define FSF_PROT_GOOD 0x00000001
+#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
+#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
+#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
+#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
+#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
+#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
+#define FSF_PROT_LINK_DOWN 0x00000400
+#define FSF_PROT_REEST_QUEUE 0x00000800
+#define FSF_PROT_ERROR_STATE 0x01000000
+
+/* FSF stati */
+#define FSF_GOOD 0x00000000
+#define FSF_PORT_ALREADY_OPEN 0x00000001
+#define FSF_LUN_ALREADY_OPEN 0x00000002
+#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
+#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
+#define FSF_HANDLE_MISMATCH 0x00000005
+#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
+#define FSF_FCPLUN_NOT_VALID 0x00000009
+#define FSF_ACCESS_DENIED 0x00000010
+#define FSF_LUN_SHARING_VIOLATION 0x00000012
+#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
+#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
+#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
+#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
+#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
+#define FSF_ELS_COMMAND_REJECTED 0x00000050
+#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
+#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
+#define FSF_AUTHORIZATION_FAILURE 0x00000053
+#define FSF_CFDC_ERROR_DETECTED 0x00000054
+#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
+#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
+#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
+#define FSF_CONFLICTS_OVERRULED 0x00000058
+#define FSF_PORT_BOXED 0x00000059
+#define FSF_LUN_BOXED 0x0000005A
+#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
+#define FSF_PAYLOAD_SIZE_MISMATCH 0x00000060
+#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
+#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
+#define FSF_SBAL_MISMATCH 0x00000063
+#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
+#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
+#define FSF_UNKNOWN_COMMAND 0x000000E2
+#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
+#define FSF_INVALID_COMMAND_OPTION 0x000000E5
+/* #define FSF_ERROR 0x000000FF */
+
+#define FSF_STATUS_QUALIFIER_SIZE 16
+
+/* FSF status qualifier, recommendations */
+#define FSF_SQ_NO_RECOM 0x00
+#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
+#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
+#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
+#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
+#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
+#define FSF_SQ_COMMAND_ABORTED 0x06
+#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
+
+/* FSF status qualifier for CFDC commands */
+#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
+#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
+/* CFDC subtable codes */
+#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
+#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
+#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
+#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
+
+/* FSF status qualifier (most significant 4 bytes), local link down */
+#define FSF_PSQ_LINK_NOLIGHT 0x00000004
+#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
+#define FSF_PSQ_LINK_NOFCP 0x00000010
+
+/* payload size in status read buffer */
+#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
+
+/* number of status read buffers that should be sent by ULP */
+#define FSF_STATUS_READS_RECOM 16
+
+/* status types in status read buffer */
+#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
+#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
+#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
+#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
+#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
+#define FSF_STATUS_READ_LINK_UP 0x00000006
+#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
+#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
+
+/* status subtypes in status read buffer */
+#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
+#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
+
+/* status subtypes for CFDC */
+#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
+#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
+
+/* topologie that is detected by the adapter */
+#define FSF_TOPO_ERROR 0x00000000
+#define FSF_TOPO_P2P 0x00000001
+#define FSF_TOPO_FABRIC 0x00000002
+#define FSF_TOPO_AL 0x00000003
+#define FSF_TOPO_FABRIC_VIRT 0x00000004
+
+/* data direction for FCP commands */
+#define FSF_DATADIR_WRITE 0x00000001
+#define FSF_DATADIR_READ 0x00000002
+#define FSF_DATADIR_READ_WRITE 0x00000003
+#define FSF_DATADIR_CMND 0x00000004
+
+/* fc service class */
+#define FSF_CLASS_1 0x00000001
+#define FSF_CLASS_2 0x00000002
+#define FSF_CLASS_3 0x00000003
+
+/* SBAL chaining */
+#define FSF_MAX_SBALS_PER_REQ 36
+#define FSF_MAX_SBALS_PER_ELS_REQ 2
+
+/* logging space behind QTCB */
+#define FSF_QTCB_LOG_SIZE 1024
+
+/* channel features */
+#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
+#define FSF_FEATURE_CFDC 0x00000002
+#define FSF_FEATURE_LUN_SHARING 0x00000004
+#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
+#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
+
+/* option */
+#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
+#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
+
+/* adapter types */
+#define FSF_ADAPTER_TYPE_FICON 0x00000001
+#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
+
+/* port types */
+#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
+#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
+#define FSF_HBA_PORTTYPE_NPORT 0x00000005
+#define FSF_HBA_PORTTYPE_PTP 0x00000021
+/* following are not defined and used by FSF Spec
+ but are additionally defined by FC-HBA */
+#define FSF_HBA_PORTTYPE_OTHER 0x00000002
+#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
+#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
+#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
+#define FSF_HBA_PORTTYPE_FPORT 0x00000008
+#define FSF_HBA_PORTTYPE_LPORT 0x00000020
+
+/* port states */
+#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
+#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
+#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
+#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
+#define FSF_HBA_PORTSTATE_ERROR 0x00000007
+
+/* IO states of adapter */
+#define FSF_IOSTAT_NPORT_RJT 0x00000004
+#define FSF_IOSTAT_FABRIC_RJT 0x00000005
+#define FSF_IOSTAT_LS_RJT 0x00000009
+
+/* open LUN access flags*/
+#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
+#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
+#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
+
+struct fsf_queue_designator;
+struct fsf_status_read_buffer;
+struct fsf_port_closed_payload;
+struct fsf_bit_error_payload;
+union fsf_prot_status_qual;
+struct fsf_qual_version_error;
+struct fsf_qual_sequence_error;
+struct fsf_qtcb_prefix;
+struct fsf_qtcb_header;
+struct fsf_qtcb_bottom_config;
+struct fsf_qtcb_bottom_support;
+struct fsf_qtcb_bottom_io;
+union fsf_qtcb_bottom;
+
+struct fsf_queue_designator {
+ u8 cssid;
+ u8 chpid;
+ u8 hla;
+ u8 ua;
+ u32 res1;
+} __attribute__ ((packed));
+
+struct fsf_port_closed_payload {
+ struct fsf_queue_designator queue_designator;
+ u32 port_handle;
+} __attribute__ ((packed));
+
+struct fsf_bit_error_payload {
+ u32 res1;
+ u32 link_failure_error_count;
+ u32 loss_of_sync_error_count;
+ u32 loss_of_signal_error_count;
+ u32 primitive_sequence_error_count;
+ u32 invalid_transmission_word_error_count;
+ u32 crc_error_count;
+ u32 primitive_sequence_event_timeout_count;
+ u32 elastic_buffer_overrun_error_count;
+ u32 fcal_arbitration_timeout_count;
+ u32 advertised_receive_b2b_credit;
+ u32 current_receive_b2b_credit;
+ u32 advertised_transmit_b2b_credit;
+ u32 current_transmit_b2b_credit;
+} __attribute__ ((packed));
+
+struct fsf_status_read_buffer {
+ u32 status_type;
+ u32 status_subtype;
+ u32 length;
+ u32 res1;
+ struct fsf_queue_designator queue_designator;
+ u32 d_id;
+ u32 class;
+ u64 fcp_lun;
+ u8 res3[24];
+ u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
+} __attribute__ ((packed));
+
+struct fsf_qual_version_error {
+ u32 fsf_version;
+ u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_sequence_error {
+ u32 exp_req_seq_no;
+ u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_locallink_error {
+ u32 code;
+ u32 res1[3];
+} __attribute__ ((packed));
+
+union fsf_prot_status_qual {
+ struct fsf_qual_version_error version_error;
+ struct fsf_qual_sequence_error sequence_error;
+ struct fsf_qual_locallink_error locallink_error;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_prefix {
+ u64 req_id;
+ u32 qtcb_version;
+ u32 ulp_info;
+ u32 qtcb_type;
+ u32 req_seq_no;
+ u32 prot_status;
+ union fsf_prot_status_qual prot_status_qual;
+ u8 res1[20];
+} __attribute__ ((packed));
+
+union fsf_status_qual {
+ u8 byte[FSF_STATUS_QUALIFIER_SIZE];
+ u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
+ u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
+ struct fsf_queue_designator fsf_queue_designator;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_header {
+ u64 req_handle;
+ u32 fsf_command;
+ u32 res1;
+ u32 port_handle;
+ u32 lun_handle;
+ u32 res2;
+ u32 fsf_status;
+ union fsf_status_qual fsf_status_qual;
+ u8 res3[28];
+ u16 log_start;
+ u16 log_length;
+ u8 res4[16];
+} __attribute__ ((packed));
+
+struct fsf_nport_serv_param {
+ u8 common_serv_param[16];
+ u64 wwpn;
+ u64 wwnn;
+ u8 class1_serv_param[16];
+ u8 class2_serv_param[16];
+ u8 class3_serv_param[16];
+ u8 class4_serv_param[16];
+ u8 vendor_version_level[16];
+ u8 res1[16];
+} __attribute__ ((packed));
+
+struct fsf_plogi {
+ u32 code;
+ struct fsf_nport_serv_param serv_param;
+} __attribute__ ((packed));
+
+#define FSF_FCP_CMND_SIZE 288
+#define FSF_FCP_RSP_SIZE 128
+
+struct fsf_qtcb_bottom_io {
+ u32 data_direction;
+ u32 service_class;
+ u8 res1[8];
+ u32 fcp_cmnd_length;
+ u8 res2[12];
+ u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
+ u8 fcp_rsp[FSF_FCP_RSP_SIZE];
+ u8 res3[64];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_support {
+ u32 operation_subtype;
+ u8 res1[12];
+ u32 d_id;
+ u32 option;
+ u64 fcp_lun;
+ u64 res2;
+ u64 req_handle;
+ u32 service_class;
+ u8 res3[3];
+ u8 timeout;
+ u32 lun_access_info;
+ u8 res4[180];
+ u32 els1_length;
+ u32 els2_length;
+ u32 req_buf_length;
+ u32 resp_buf_length;
+ u8 els[256];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_config {
+ u32 lic_version;
+ u32 feature_selection;
+ u32 high_qtcb_version;
+ u32 low_qtcb_version;
+ u32 max_qtcb_size;
+ u32 max_data_transfer_size;
+ u32 supported_features;
+ u8 res1[4];
+ u32 fc_topology;
+ u32 fc_link_speed;
+ u32 adapter_type;
+ u32 peer_d_id;
+ u8 res2[12];
+ u32 s_id;
+ struct fsf_nport_serv_param nport_serv_param;
+ u8 res3[8];
+ u32 adapter_ports;
+ u32 hardware_version;
+ u8 serial_number[32];
+ u8 res4[272];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_port {
+ u8 res1[8];
+ u32 fc_port_id;
+ u32 port_type;
+ u32 port_state;
+ u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
+ u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
+ u8 active_fc4_types[32];
+ u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
+ u32 maximum_frame_size; /* fixed value of 2112 */
+ u64 seconds_since_last_reset;
+ u64 tx_frames;
+ u64 tx_words;
+ u64 rx_frames;
+ u64 rx_words;
+ u64 lip; /* 0 */
+ u64 nos; /* currently 0 */
+ u64 error_frames; /* currently 0 */
+ u64 dumped_frames; /* currently 0 */
+ u64 link_failure;
+ u64 loss_of_sync;
+ u64 loss_of_signal;
+ u64 psp_error_counts;
+ u64 invalid_tx_words;
+ u64 invalid_crcs;
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+ u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
+ u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
+ u8 res2[256];
+} __attribute__ ((packed));
+
+union fsf_qtcb_bottom {
+ struct fsf_qtcb_bottom_io io;
+ struct fsf_qtcb_bottom_support support;
+ struct fsf_qtcb_bottom_config config;
+ struct fsf_qtcb_bottom_port port;
+};
+
+struct fsf_qtcb {
+ struct fsf_qtcb_prefix prefix;
+ struct fsf_qtcb_header header;
+ union fsf_qtcb_bottom bottom;
+ u8 log[FSF_QTCB_LOG_SIZE];
+} __attribute__ ((packed));
+
+#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
new file mode 100644
index 000000000000..06e862d7bc90
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -0,0 +1,868 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_qdio.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * QDIO related routines
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Wolfgang Taphorn
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_QDIO_C_REVISION "$Revision: 1.20 $"
+
+#include "zfcp_ext.h"
+
+static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
+static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
+ (struct zfcp_qdio_queue *, int, int);
+static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
+ (struct zfcp_fsf_req *, int, int);
+static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
+ (struct zfcp_fsf_req *, unsigned long);
+static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
+ (struct zfcp_fsf_req *, unsigned long);
+static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
+static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
+static inline void zfcp_qdio_sbale_fill
+ (struct zfcp_fsf_req *, unsigned long, void *, int);
+static inline int zfcp_qdio_sbals_from_segment
+ (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
+static inline int zfcp_qdio_sbals_from_buffer
+ (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
+
+static qdio_handler_t zfcp_qdio_request_handler;
+static qdio_handler_t zfcp_qdio_response_handler;
+static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
+ unsigned int,
+ unsigned int, unsigned int);
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
+
+/*
+ * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
+ * array in the adapter struct.
+ * Cur_buf is the pointer array and count can be any number of required
+ * buffers, the page-fitting arithmetic is done entirely within this funciton.
+ *
+ * returns: number of buffers allocated
+ * locks: must only be called with zfcp_data.config_sema taken
+ */
+static int
+zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count)
+{
+ int buf_pos;
+ int qdio_buffers_per_page;
+ int page_pos = 0;
+ struct qdio_buffer *first_in_page = NULL;
+
+ qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
+ ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
+
+ for (buf_pos = 0; buf_pos < count; buf_pos++) {
+ if (page_pos == 0) {
+ cur_buf[buf_pos] = (struct qdio_buffer *)
+ get_zeroed_page(GFP_KERNEL);
+ if (cur_buf[buf_pos] == NULL) {
+ ZFCP_LOG_INFO("error: allocation of "
+ "QDIO buffer failed \n");
+ goto out;
+ }
+ first_in_page = cur_buf[buf_pos];
+ } else {
+ cur_buf[buf_pos] = first_in_page + page_pos;
+
+ }
+ /* was initialised to zero */
+ page_pos++;
+ page_pos %= qdio_buffers_per_page;
+ }
+ out:
+ return buf_pos;
+}
+
+/*
+ * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
+ * in the adapter struct cur_buf is the pointer array and count can be any
+ * number of buffers in the array that should be freed starting from buffer 0
+ *
+ * locks: must only be called with zfcp_data.config_sema taken
+ */
+static void
+zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count)
+{
+ int buf_pos;
+ int qdio_buffers_per_page;
+
+ qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
+ ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
+
+ for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page)
+ free_page((unsigned long) cur_buf[buf_pos]);
+ return;
+}
+
+/* locks: must only be called with zfcp_data.config_sema taken */
+int
+zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
+{
+ int buffer_count;
+ int retval = 0;
+
+ buffer_count =
+ zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]),
+ QDIO_MAX_BUFFERS_PER_Q);
+ if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
+ ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
+ "queue\n", buffer_count);
+ zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
+ buffer_count);
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ buffer_count =
+ zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
+ QDIO_MAX_BUFFERS_PER_Q);
+ if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
+ ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
+ "queue", buffer_count);
+ zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
+ buffer_count);
+ ZFCP_LOG_TRACE("freeing request_queue buffers\n");
+ zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
+ QDIO_MAX_BUFFERS_PER_Q);
+ retval = -ENOMEM;
+ goto out;
+ }
+ out:
+ return retval;
+}
+
+/* locks: must only be called with zfcp_data.config_sema taken */
+void
+zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
+{
+ ZFCP_LOG_TRACE("freeing request_queue buffers\n");
+ zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
+ QDIO_MAX_BUFFERS_PER_Q);
+
+ ZFCP_LOG_TRACE("freeing response_queue buffers\n");
+ zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
+ QDIO_MAX_BUFFERS_PER_Q);
+}
+
+int
+zfcp_qdio_allocate(struct zfcp_adapter *adapter)
+{
+ struct qdio_initialize *init_data;
+
+ init_data = &adapter->qdio_init_data;
+
+ init_data->cdev = adapter->ccw_device;
+ init_data->q_format = QDIO_SCSI_QFMT;
+ memcpy(init_data->adapter_name, &adapter->name, 8);
+ init_data->qib_param_field_format = 0;
+ init_data->qib_param_field = NULL;
+ init_data->input_slib_elements = NULL;
+ init_data->output_slib_elements = NULL;
+ init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
+ init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
+ init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
+ init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
+ init_data->no_input_qs = 1;
+ init_data->no_output_qs = 1;
+ init_data->input_handler = zfcp_qdio_response_handler;
+ init_data->output_handler = zfcp_qdio_request_handler;
+ init_data->int_parm = (unsigned long) adapter;
+ init_data->flags = QDIO_INBOUND_0COPY_SBALS |
+ QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
+ init_data->input_sbal_addr_array =
+ (void **) (adapter->response_queue.buffer);
+ init_data->output_sbal_addr_array =
+ (void **) (adapter->request_queue.buffer);
+
+ return qdio_allocate(init_data);
+}
+
+/*
+ * function: zfcp_qdio_handler_error_check
+ *
+ * purpose: called by the response handler to determine error condition
+ *
+ * returns: error flag
+ *
+ */
+static inline int
+zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
+ unsigned int status,
+ unsigned int qdio_error, unsigned int siga_error)
+{
+ int retval = 0;
+
+ if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
+ if (status & QDIO_STATUS_INBOUND_INT) {
+ ZFCP_LOG_TRACE("status is"
+ " QDIO_STATUS_INBOUND_INT \n");
+ }
+ if (status & QDIO_STATUS_OUTBOUND_INT) {
+ ZFCP_LOG_TRACE("status is"
+ " QDIO_STATUS_OUTBOUND_INT \n");
+ }
+ } // if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE))
+ if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
+ retval = -EIO;
+
+ ZFCP_LOG_FLAGS(1, "QDIO_STATUS_LOOK_FOR_ERROR \n");
+
+ ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, "
+ "qdio_error=0x%x, siga_error=0x%x)\n",
+ status, qdio_error, siga_error);
+
+ if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
+ ZFCP_LOG_FLAGS(2,
+ "QDIO_STATUS_ACTIVATE_CHECK_CONDITION\n");
+ }
+ if (status & QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR) {
+ ZFCP_LOG_FLAGS(2,
+ "QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR\n");
+ }
+ if (status & QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR) {
+ ZFCP_LOG_FLAGS(2,
+ "QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR\n");
+ }
+
+ if (siga_error & QDIO_SIGA_ERROR_ACCESS_EXCEPTION) {
+ ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_ACCESS_EXCEPTION\n");
+ }
+
+ if (siga_error & QDIO_SIGA_ERROR_B_BIT_SET) {
+ ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_B_BIT_SET\n");
+ }
+
+ switch (qdio_error) {
+ case 0:
+ ZFCP_LOG_FLAGS(3, "QDIO_OK");
+ break;
+ case SLSB_P_INPUT_ERROR:
+ ZFCP_LOG_FLAGS(1, "SLSB_P_INPUT_ERROR\n");
+ break;
+ case SLSB_P_OUTPUT_ERROR:
+ ZFCP_LOG_FLAGS(1, "SLSB_P_OUTPUT_ERROR\n");
+ break;
+ default:
+ ZFCP_LOG_NORMAL("bug: unknown QDIO error 0x%x\n",
+ qdio_error);
+ break;
+ }
+ /* Restarting IO on the failed adapter from scratch */
+ debug_text_event(adapter->erp_dbf, 1, "qdio_err");
+ /*
+ * Since we have been using this adapter, it is save to assume
+ * that it is not failed but recoverable. The card seems to
+ * report link-up events by self-initiated queue shutdown.
+ * That is why we need to clear the the link-down flag
+ * which is set again in case we have missed by a mile.
+ */
+ zfcp_erp_adapter_reopen(
+ adapter,
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+ ZFCP_STATUS_COMMON_ERP_FAILED);
+ }
+ return retval;
+}
+
+/*
+ * function: zfcp_qdio_request_handler
+ *
+ * purpose: is called by QDIO layer for completed SBALs in request queue
+ *
+ * returns: (void)
+ */
+static void
+zfcp_qdio_request_handler(struct ccw_device *ccw_device,
+ unsigned int status,
+ unsigned int qdio_error,
+ unsigned int siga_error,
+ unsigned int queue_number,
+ int first_element,
+ int elements_processed,
+ unsigned long int_parm)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_qdio_queue *queue;
+
+ adapter = (struct zfcp_adapter *) int_parm;
+ queue = &adapter->request_queue;
+
+ ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n",
+ zfcp_get_busid_by_adapter(adapter),
+ first_element, elements_processed);
+
+ if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
+ siga_error)))
+ goto out;
+ /*
+ * we stored address of struct zfcp_adapter data structure
+ * associated with irq in int_parm
+ */
+
+ /* cleanup all SBALs being program-owned now */
+ zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed);
+
+ /* increase free space in outbound queue */
+ atomic_add(elements_processed, &queue->free_count);
+ ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count));
+ wake_up(&adapter->request_wq);
+ ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n",
+ elements_processed, atomic_read(&queue->free_count));
+ out:
+ return;
+}
+
+/*
+ * function: zfcp_qdio_response_handler
+ *
+ * purpose: is called by QDIO layer for completed SBALs in response queue
+ *
+ * returns: (void)
+ */
+static void
+zfcp_qdio_response_handler(struct ccw_device *ccw_device,
+ unsigned int status,
+ unsigned int qdio_error,
+ unsigned int siga_error,
+ unsigned int queue_number,
+ int first_element,
+ int elements_processed,
+ unsigned long int_parm)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_qdio_queue *queue;
+ int buffer_index;
+ int i;
+ struct qdio_buffer *buffer;
+ int retval = 0;
+ u8 count;
+ u8 start;
+ volatile struct qdio_buffer_element *buffere = NULL;
+ int buffere_index;
+
+ adapter = (struct zfcp_adapter *) int_parm;
+ queue = &adapter->response_queue;
+
+ if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
+ siga_error)))
+ goto out;
+
+ /*
+ * we stored address of struct zfcp_adapter data structure
+ * associated with irq in int_parm
+ */
+
+ buffere = &(queue->buffer[first_element]->element[0]);
+ ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
+ /*
+ * go through all SBALs from input queue currently
+ * returned by QDIO layer
+ */
+
+ for (i = 0; i < elements_processed; i++) {
+
+ buffer_index = first_element + i;
+ buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
+ buffer = queue->buffer[buffer_index];
+
+ /* go through all SBALEs of SBAL */
+ for (buffere_index = 0;
+ buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER;
+ buffere_index++) {
+
+ /* look for QDIO request identifiers in SB */
+ buffere = &buffer->element[buffere_index];
+ retval = zfcp_qdio_reqid_check(adapter,
+ (void *) buffere->addr);
+
+ if (retval) {
+ ZFCP_LOG_NORMAL("bug: unexpected inbound "
+ "packet on adapter %s "
+ "(reqid=0x%lx, "
+ "first_element=%d, "
+ "elements_processed=%d)\n",
+ zfcp_get_busid_by_adapter(adapter),
+ (unsigned long) buffere->addr,
+ first_element,
+ elements_processed);
+ ZFCP_LOG_NORMAL("hex dump of inbound buffer "
+ "at address %p "
+ "(buffer_index=%d, "
+ "buffere_index=%d)\n", buffer,
+ buffer_index, buffere_index);
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) buffer, SBAL_SIZE);
+ }
+ /*
+ * A single used SBALE per inbound SBALE has been
+ * implemented by QDIO so far. Hope they will
+ * do some optimisation. Will need to change to
+ * unlikely() then.
+ */
+ if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
+ break;
+ };
+
+ if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) {
+ ZFCP_LOG_NORMAL("bug: End of inbound data "
+ "not marked!\n");
+ }
+ }
+
+ /*
+ * put range of SBALs back to response queue
+ * (including SBALs which have already been free before)
+ */
+ count = atomic_read(&queue->free_count) + elements_processed;
+ start = queue->free_index;
+
+ ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
+ "queue_no=%i, index_in_queue=%i, count=%i, "
+ "buffers=0x%lx\n",
+ zfcp_get_busid_by_adapter(adapter),
+ QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
+ 0, start, count, (unsigned long) &queue->buffer[start]);
+
+ retval = do_QDIO(ccw_device,
+ QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
+ 0, start, count, NULL);
+
+ if (unlikely(retval)) {
+ atomic_set(&queue->free_count, count);
+ ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
+ "queues may be down "
+ "(count=%d, start=%d, retval=%d)\n",
+ count, start, retval);
+ } else {
+ queue->free_index += count;
+ queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
+ atomic_set(&queue->free_count, 0);
+ ZFCP_LOG_TRACE("%i buffers enqueued to response "
+ "queue at position %i\n", count, start);
+ }
+ out:
+ return;
+}
+
+/*
+ * function: zfcp_qdio_reqid_check
+ *
+ * purpose: checks for valid reqids or unsolicited status
+ *
+ * returns: 0 - valid request id or unsolicited status
+ * !0 - otherwise
+ */
+int
+zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
+{
+ struct zfcp_fsf_req *fsf_req;
+ int retval = 0;
+
+ /* invalid (per convention used in this driver) */
+ if (unlikely(!sbale_addr)) {
+ ZFCP_LOG_NORMAL("bug: invalid reqid\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ /* valid request id and thus (hopefully :) valid fsf_req address */
+ fsf_req = (struct zfcp_fsf_req *) sbale_addr;
+
+ if (unlikely(adapter != fsf_req->adapter)) {
+ ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
+ "fsf_req->adapter=%p, adapter=%p)\n",
+ fsf_req, fsf_req->adapter, adapter);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ ZFCP_LOG_TRACE("fsf_req at %p, QTCB at %p\n", fsf_req, fsf_req->qtcb);
+ if (likely(fsf_req->qtcb)) {
+ ZFCP_LOG_TRACE("hex dump of QTCB:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) fsf_req->qtcb,
+ sizeof(struct fsf_qtcb));
+ }
+
+ /* finish the FSF request */
+ zfcp_fsf_req_complete(fsf_req);
+ out:
+ return retval;
+}
+
+/**
+ * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
+ * @queue: queue from which SBALE should be returned
+ * @sbal: specifies number of SBAL in queue
+ * @sbale: specifes number of SBALE in SBAL
+ */
+static inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
+{
+ return &queue->buffer[sbal]->element[sbale];
+}
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
+ * a struct zfcp_fsf_req
+ */
+inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
+{
+ return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
+ sbal, sbale);
+}
+
+/**
+ * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for
+ * a struct zfcp_fsf_req
+ */
+static inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
+{
+ return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue,
+ sbal, sbale);
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
+ * a struct zfcp_fsf_req
+ */
+inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
+{
+ return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
+ fsf_req->sbale_curr);
+}
+
+/**
+ * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used
+ * on the request_queue for a struct zfcp_fsf_req
+ * @fsf_req: the number of the last SBAL that can be used is stored herein
+ * @max_sbals: used to pass an upper limit for the number of SBALs
+ *
+ * Note: We can assume at least one free SBAL in the request_queue when called.
+ */
+static inline void
+zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
+{
+ int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
+ count = min(count, max_sbals);
+ fsf_req->sbal_last = fsf_req->sbal_first;
+ fsf_req->sbal_last += (count - 1);
+ fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+}
+
+/**
+ * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a
+ * request
+ * @fsf_req: zfcp_fsf_req to be processed
+ * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL
+ *
+ * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
+ */
+static inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
+{
+ volatile struct qdio_buffer_element *sbale;
+
+ /* set last entry flag in current SBALE of current SBAL */
+ sbale = zfcp_qdio_sbale_curr(fsf_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ /* don't exceed last allowed SBAL */
+ if (fsf_req->sbal_curr == fsf_req->sbal_last)
+ return NULL;
+
+ /* set chaining flag in first SBALE of current SBAL */
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
+
+ /* calculate index of next SBAL */
+ fsf_req->sbal_curr++;
+ fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q;
+
+ /* keep this requests number of SBALs up-to-date */
+ fsf_req->sbal_number++;
+
+ /* start at first SBALE of new SBAL */
+ fsf_req->sbale_curr = 0;
+
+ /* set storage-block type for new SBAL */
+ sbale = zfcp_qdio_sbale_curr(fsf_req);
+ sbale->flags |= sbtype;
+
+ return sbale;
+}
+
+/**
+ * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
+ */
+static inline volatile struct qdio_buffer_element *
+zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
+{
+ if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
+ return zfcp_qdio_sbal_chain(fsf_req, sbtype);
+
+ fsf_req->sbale_curr++;
+
+ return zfcp_qdio_sbale_curr(fsf_req);
+}
+
+/**
+ * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
+ * with zero from
+ */
+static inline int
+zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
+{
+ struct qdio_buffer **buf = queue->buffer;
+ int curr = first;
+ int count = 0;
+
+ for(;;) {
+ curr %= QDIO_MAX_BUFFERS_PER_Q;
+ count++;
+ memset(buf[curr], 0, sizeof(struct qdio_buffer));
+ if (curr == last)
+ break;
+ curr++;
+ }
+ return count;
+}
+
+
+/**
+ * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req
+ */
+static inline int
+zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
+{
+ return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue,
+ fsf_req->sbal_first, fsf_req->sbal_curr);
+}
+
+
+/**
+ * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
+ * on request_queue
+ */
+static inline void
+zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
+ void *addr, int length)
+{
+ volatile struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(fsf_req);
+ sbale->addr = addr;
+ sbale->length = length;
+}
+
+/**
+ * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s)
+ * @fsf_req: request to be processed
+ * @sbtype: SBALE flags
+ * @start_addr: address of memory segment
+ * @total_length: length of memory segment
+ *
+ * Alignment and length of the segment determine how many SBALEs are needed
+ * for the memory segment.
+ */
+static inline int
+zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
+ void *start_addr, unsigned long total_length)
+{
+ unsigned long remaining, length;
+ void *addr;
+
+ /* split segment up heeding page boundaries */
+ for (addr = start_addr, remaining = total_length; remaining > 0;
+ addr += length, remaining -= length) {
+ /* get next free SBALE for new piece */
+ if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) {
+ /* no SBALE left, clean up and leave */
+ zfcp_qdio_sbals_wipe(fsf_req);
+ return -EINVAL;
+ }
+ /* calculate length of new piece */
+ length = min(remaining,
+ (PAGE_SIZE - ((unsigned long) addr &
+ (PAGE_SIZE - 1))));
+ /* fill current SBALE with calculated piece */
+ zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length);
+ }
+ return total_length;
+}
+
+
+/**
+ * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
+ * @fsf_req: request to be processed
+ * @sbtype: SBALE flags
+ * @sg: scatter-gather list
+ * @sg_count: number of elements in scatter-gather list
+ * @max_sbals: upper bound for number of SBALs to be used
+ */
+inline int
+zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
+ struct scatterlist *sg, int sg_count, int max_sbals)
+{
+ int sg_index;
+ struct scatterlist *sg_segment;
+ int retval;
+ volatile struct qdio_buffer_element *sbale;
+ int bytes = 0;
+
+ /* figure out last allowed SBAL */
+ zfcp_qdio_sbal_limit(fsf_req, max_sbals);
+
+ /* set storage-block type for current SBAL */
+ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
+ sbale->flags |= sbtype;
+
+ /* process all segements of scatter-gather list */
+ for (sg_index = 0, sg_segment = sg, bytes = 0;
+ sg_index < sg_count;
+ sg_index++, sg_segment++) {
+ retval = zfcp_qdio_sbals_from_segment(
+ fsf_req,
+ sbtype,
+ zfcp_sg_to_address(sg_segment),
+ sg_segment->length);
+ if (retval < 0) {
+ bytes = retval;
+ goto out;
+ } else
+ bytes += retval;
+ }
+ /* assume that no other SBALEs are to follow in the same SBAL */
+ sbale = zfcp_qdio_sbale_curr(fsf_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+out:
+ return bytes;
+}
+
+
+/**
+ * zfcp_qdio_sbals_from_buffer - fill SBALs from buffer
+ * @fsf_req: request to be processed
+ * @sbtype: SBALE flags
+ * @buffer: data buffer
+ * @length: length of buffer
+ * @max_sbals: upper bound for number of SBALs to be used
+ */
+static inline int
+zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
+ void *buffer, unsigned long length, int max_sbals)
+{
+ struct scatterlist sg_segment;
+
+ zfcp_address_to_sg(buffer, &sg_segment);
+ sg_segment.length = length;
+
+ return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1,
+ max_sbals);
+}
+
+
+/**
+ * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
+ * @fsf_req: request to be processed
+ * @sbtype: SBALE flags
+ * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
+ * to fill SBALs
+ */
+inline int
+zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
+ unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
+{
+ if (scsi_cmnd->use_sg) {
+ return zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
+ (struct scatterlist *)
+ scsi_cmnd->request_buffer,
+ scsi_cmnd->use_sg,
+ ZFCP_MAX_SBALS_PER_REQ);
+ } else {
+ return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype,
+ scsi_cmnd->request_buffer,
+ scsi_cmnd->request_bufflen,
+ ZFCP_MAX_SBALS_PER_REQ);
+ }
+}
+
+/**
+ * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed
+ */
+int
+zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue,
+ struct zfcp_fsf_req *fsf_req)
+{
+ int new_distance_from_int;
+ int pci_pos;
+ volatile struct qdio_buffer_element *sbale;
+
+ new_distance_from_int = req_queue->distance_from_int +
+ fsf_req->sbal_number;
+
+ if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) {
+ new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL;
+ pci_pos = fsf_req->sbal_first;
+ pci_pos += fsf_req->sbal_number;
+ pci_pos -= new_distance_from_int;
+ pci_pos -= 1;
+ pci_pos %= QDIO_MAX_BUFFERS_PER_Q;
+ sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0);
+ sbale->flags |= SBAL_FLAGS0_PCI;
+ }
+ return new_distance_from_int;
+}
+
+/*
+ * function: zfcp_zero_sbals
+ *
+ * purpose: zeros specified range of SBALs
+ *
+ * returns:
+ */
+void
+zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count)
+{
+ int cur_pos;
+ int index;
+
+ for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) {
+ index = cur_pos % QDIO_MAX_BUFFERS_PER_Q;
+ memset(buf[index], 0, sizeof (struct qdio_buffer));
+ ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n",
+ index, buf[index]);
+ }
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
new file mode 100644
index 000000000000..e21b547fd427
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -0,0 +1,949 @@
+/*
+ *
+ * linux/drivers/s390/scsi/zfcp_scsi.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * (C) Copyright IBM Corp. 2002, 2004
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Raimund Schroeder <raimund.schroeder@de.ibm.com>
+ * Aron Zeh
+ * Wolfgang Taphorn
+ * Stefan Bader <stefan.bader@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
+
+#define ZFCP_SCSI_REVISION "$Revision: 1.74 $"
+
+#include "zfcp_ext.h"
+
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
+static int zfcp_scsi_slave_configure(struct scsi_device *sdp);
+static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
+ void (*done) (struct scsi_cmnd *));
+static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
+static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
+static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
+static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
+static int zfcp_task_management_function(struct zfcp_unit *, u8);
+
+static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
+ scsi_lun_t);
+static struct zfcp_port *zfcp_port_lookup(struct zfcp_adapter *, int,
+ scsi_id_t);
+
+static struct device_attribute *zfcp_sysfs_sdev_attrs[];
+
+struct scsi_transport_template *zfcp_transport_template;
+
+struct zfcp_data zfcp_data = {
+ .scsi_host_template = {
+ name: ZFCP_NAME,
+ proc_name: "zfcp",
+ proc_info: NULL,
+ detect: NULL,
+ slave_alloc: zfcp_scsi_slave_alloc,
+ slave_configure: zfcp_scsi_slave_configure,
+ slave_destroy: zfcp_scsi_slave_destroy,
+ queuecommand: zfcp_scsi_queuecommand,
+ eh_abort_handler: zfcp_scsi_eh_abort_handler,
+ eh_device_reset_handler: zfcp_scsi_eh_device_reset_handler,
+ eh_bus_reset_handler: zfcp_scsi_eh_bus_reset_handler,
+ eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler,
+ /* FIXME(openfcp): Tune */
+ can_queue: 4096,
+ this_id: 0,
+ /*
+ * FIXME:
+ * one less? can zfcp_create_sbale cope with it?
+ */
+ sg_tablesize: ZFCP_MAX_SBALES_PER_REQ,
+ cmd_per_lun: 1,
+ unchecked_isa_dma: 0,
+ use_clustering: 1,
+ sdev_attrs: zfcp_sysfs_sdev_attrs,
+ },
+ .driver_version = ZFCP_VERSION,
+ /* rest initialised with zeros */
+};
+
+/* Find start of Response Information in FCP response unit*/
+char *
+zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
+{
+ char *fcp_rsp_info_ptr;
+
+ fcp_rsp_info_ptr =
+ (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
+
+ return fcp_rsp_info_ptr;
+}
+
+/* Find start of Sense Information in FCP response unit*/
+char *
+zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
+{
+ char *fcp_sns_info_ptr;
+
+ fcp_sns_info_ptr =
+ (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
+ if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
+ fcp_sns_info_ptr = (char *) fcp_sns_info_ptr +
+ fcp_rsp_iu->fcp_rsp_len;
+
+ return fcp_sns_info_ptr;
+}
+
+fcp_dl_t *
+zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
+{
+ int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
+ fcp_dl_t *fcp_dl_addr;
+
+ fcp_dl_addr = (fcp_dl_t *)
+ ((unsigned char *) fcp_cmd +
+ sizeof (struct fcp_cmnd_iu) + additional_length);
+ /*
+ * fcp_dl_addr = start address of fcp_cmnd structure +
+ * size of fixed part + size of dynamically sized add_dcp_cdb field
+ * SEE FCP-2 documentation
+ */
+ return fcp_dl_addr;
+}
+
+fcp_dl_t
+zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd)
+{
+ return *zfcp_get_fcp_dl_ptr(fcp_cmd);
+}
+
+void
+zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
+{
+ *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl;
+}
+
+/*
+ * note: it's a bit-or operation not an assignment
+ * regarding the specified byte
+ */
+static inline void
+set_byte(u32 * result, char status, char pos)
+{
+ *result |= status << (pos * 8);
+}
+
+void
+set_host_byte(u32 * result, char status)
+{
+ set_byte(result, status, 2);
+}
+
+void
+set_driver_byte(u32 * result, char status)
+{
+ set_byte(result, status, 3);
+}
+
+/*
+ * function: zfcp_scsi_slave_alloc
+ *
+ * purpose:
+ *
+ * returns:
+ */
+
+static int
+zfcp_scsi_slave_alloc(struct scsi_device *sdp)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_unit *unit;
+ unsigned long flags;
+ int retval = -ENODEV;
+
+ adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
+ if (!adapter)
+ goto out;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
+ if (unit) {
+ sdp->hostdata = unit;
+ unit->device = sdp;
+ zfcp_unit_get(unit);
+ retval = 0;
+ }
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_scsi_slave_destroy
+ *
+ * purpose:
+ *
+ * returns:
+ */
+
+static void
+zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
+{
+ struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
+
+ if (unit) {
+ sdpnt->hostdata = NULL;
+ unit->device = NULL;
+ zfcp_unit_put(unit);
+ } else {
+ ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
+ "address %p\n", sdpnt);
+ }
+}
+
+/*
+ * called from scsi midlayer to allow finetuning of a device.
+ */
+static int
+zfcp_scsi_slave_configure(struct scsi_device *sdp)
+{
+ if (sdp->tagged_supported)
+ scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN);
+ else
+ scsi_adjust_queue_depth(sdp, 0, 1);
+ return 0;
+}
+
+/**
+ * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function
+ * @scpnt: pointer to struct scsi_cmnd where result is set
+ * @result: result to be set in scpnt (e.g. DID_ERROR)
+ */
+static void
+zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
+{
+ set_host_byte(&scpnt->result, result);
+ zfcp_cmd_dbf_event_scsi("failing", scpnt);
+ /* return directly */
+ scpnt->scsi_done(scpnt);
+}
+
+/**
+ * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and
+ * zfcp_scsi_command_sync
+ * @adapter: adapter where scsi command is issued
+ * @unit: unit to which scsi command is sent
+ * @scpnt: scsi command to be sent
+ * @timer: timer to be started if request is successfully initiated
+ *
+ * Note: In scsi_done function must be set in scpnt.
+ */
+int
+zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
+ struct scsi_cmnd *scpnt, struct timer_list *timer)
+{
+ int tmp;
+ int retval;
+
+ retval = 0;
+
+ BUG_ON((adapter == NULL) || (adapter != unit->port->adapter));
+ BUG_ON(scpnt->scsi_done == NULL);
+
+ if (unlikely(NULL == unit)) {
+ zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
+ goto out;
+ }
+
+ if (unlikely(
+ atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) ||
+ !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) {
+ ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port "
+ "0x%016Lx on adapter %s\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_adapter(adapter));
+ zfcp_scsi_command_fail(scpnt, DID_ERROR);
+ goto out;
+ }
+
+ if (unlikely(
+ !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))) {
+ ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
+ "on port 0x%016Lx in recovery\n",
+ zfcp_get_busid_by_unit(unit),
+ unit->fcp_lun, unit->port->wwpn);
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, timer,
+ ZFCP_REQ_AUTO_CLEANUP);
+
+ if (unlikely(tmp < 0)) {
+ ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+out:
+ return retval;
+}
+
+void
+zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
+{
+ struct completion *wait = (struct completion *) scpnt->SCp.ptr;
+ complete(wait);
+}
+
+
+/**
+ * zfcp_scsi_command_sync - send a SCSI command and wait for completion
+ * @unit: unit where command is sent to
+ * @scpnt: scsi command to be sent
+ * @timer: timer to be started if request is successfully initiated
+ * Return: 0
+ *
+ * Errors are indicated in scpnt->result
+ */
+int
+zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
+ struct timer_list *timer)
+{
+ int ret;
+ DECLARE_COMPLETION(wait);
+
+ scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
+ scpnt->scsi_done = zfcp_scsi_command_sync_handler;
+ ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, timer);
+ if (ret == 0)
+ wait_for_completion(&wait);
+
+ scpnt->SCp.ptr = NULL;
+
+ return 0;
+}
+
+/*
+ * function: zfcp_scsi_queuecommand
+ *
+ * purpose: enqueues a SCSI command to the specified target device
+ *
+ * returns: 0 - success, SCSI command enqueued
+ * !0 - failure
+ */
+int
+zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct zfcp_unit *unit;
+ struct zfcp_adapter *adapter;
+
+ /* reset the status for this request */
+ scpnt->result = 0;
+ scpnt->host_scribble = NULL;
+ scpnt->scsi_done = done;
+
+ /*
+ * figure out adapter and target device
+ * (stored there by zfcp_scsi_slave_alloc)
+ */
+ adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
+ unit = (struct zfcp_unit *) scpnt->device->hostdata;
+
+ return zfcp_scsi_command_async(adapter, unit, scpnt, NULL);
+}
+
+/*
+ * function: zfcp_unit_lookup
+ *
+ * purpose:
+ *
+ * returns:
+ *
+ * context:
+ */
+static struct zfcp_unit *
+zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id,
+ scsi_lun_t lun)
+{
+ struct zfcp_port *port;
+ struct zfcp_unit *unit, *retval = NULL;
+
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (id != port->scsi_id)
+ continue;
+ list_for_each_entry(unit, &port->unit_list_head, list) {
+ if (lun == unit->scsi_lun) {
+ retval = unit;
+ goto out;
+ }
+ }
+ }
+ out:
+ return retval;
+}
+
+static struct zfcp_port *
+zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
+{
+ struct zfcp_port *port;
+
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (id == port->scsi_id)
+ return port;
+ }
+ return (struct zfcp_port *) NULL;
+}
+
+/*
+ * function: zfcp_scsi_eh_abort_handler
+ *
+ * purpose: tries to abort the specified (timed out) SCSI command
+ *
+ * note: We do not need to care for a SCSI command which completes
+ * normally but late during this abort routine runs.
+ * We are allowed to return late commands to the SCSI stack.
+ * It tracks the state of commands and will handle late commands.
+ * (Usually, the normal completion of late commands is ignored with
+ * respect to the running abort operation. Grep for 'done_late'
+ * in the SCSI stacks sources.)
+ *
+ * returns: SUCCESS - command has been aborted and cleaned up in internal
+ * bookkeeping,
+ * SCSI stack won't be called for aborted command
+ * FAILED - otherwise
+ */
+int
+zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+{
+ int retval = SUCCESS;
+ struct zfcp_fsf_req *new_fsf_req, *old_fsf_req;
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
+ struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
+ struct zfcp_port *port = unit->port;
+ struct Scsi_Host *scsi_host = scpnt->device->host;
+ union zfcp_req_data *req_data = NULL;
+ unsigned long flags;
+ u32 status = 0;
+
+ /* the components of a abort_dbf record (fixed size record) */
+ u64 dbf_scsi_cmnd = (unsigned long) scpnt;
+ char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
+ wwn_t dbf_wwn = port->wwpn;
+ fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
+ u64 dbf_retries = scpnt->retries;
+ u64 dbf_allowed = scpnt->allowed;
+ u64 dbf_timeout = 0;
+ u64 dbf_fsf_req = 0;
+ u64 dbf_fsf_status = 0;
+ u64 dbf_fsf_qual[2] = { 0, 0 };
+ char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
+
+ memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
+ memcpy(dbf_opcode,
+ scpnt->cmnd,
+ min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
+
+ ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
+ scpnt, zfcp_get_busid_by_adapter(adapter));
+
+ spin_unlock_irq(scsi_host->host_lock);
+
+ /*
+ * Race condition between normal (late) completion and abort has
+ * to be avoided.
+ * The entirity of all accesses to scsi_req have to be atomic.
+ * scsi_req is usually part of the fsf_req and thus we block the
+ * release of fsf_req as long as we need to access scsi_req.
+ */
+ write_lock_irqsave(&adapter->abort_lock, flags);
+
+ /*
+ * Check whether command has just completed and can not be aborted.
+ * Even if the command has just been completed late, we can access
+ * scpnt since the SCSI stack does not release it at least until
+ * this routine returns. (scpnt is parameter passed to this routine
+ * and must not disappear during abort even on late completion.)
+ */
+ req_data = (union zfcp_req_data *) scpnt->host_scribble;
+ /* DEBUG */
+ ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
+ if (!req_data) {
+ ZFCP_LOG_DEBUG("late command completion overtook abort\n");
+ /*
+ * That's it.
+ * Do not initiate abort but return SUCCESS.
+ */
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+ retval = SUCCESS;
+ strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
+ goto out;
+ }
+
+ /* Figure out which fsf_req needs to be aborted. */
+ old_fsf_req = req_data->send_fcp_command_task.fsf_req;
+
+ dbf_fsf_req = (unsigned long) old_fsf_req;
+ dbf_timeout =
+ (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
+
+ ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
+ if (!old_fsf_req) {
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+ ZFCP_LOG_NORMAL("bug: no old fsf request found\n");
+ ZFCP_LOG_NORMAL("req_data:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) req_data, sizeof (union zfcp_req_data));
+ ZFCP_LOG_NORMAL("scsi_cmnd:\n");
+ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
+ (char *) scpnt, sizeof (struct scsi_cmnd));
+ retval = FAILED;
+ strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
+ goto out;
+ }
+ old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL;
+ /* mark old request as being aborted */
+ old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
+ /*
+ * We have to collect all information (e.g. unit) needed by
+ * zfcp_fsf_abort_fcp_command before calling that routine
+ * since that routine is not allowed to access
+ * fsf_req which it is going to abort.
+ * This is because of we need to release fsf_req_list_lock
+ * before calling zfcp_fsf_abort_fcp_command.
+ * Since this lock will not be held, fsf_req may complete
+ * late and may be released meanwhile.
+ */
+ ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
+
+ /*
+ * We block (call schedule)
+ * That's why we must release the lock and enable the
+ * interrupts before.
+ * On the other hand we do not need the lock anymore since
+ * all critical accesses to scsi_req are done.
+ */
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+ /* call FSF routine which does the abort */
+ new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
+ adapter, unit, 0);
+ ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
+ if (!new_fsf_req) {
+ retval = FAILED;
+ ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
+ "failed\n");
+ strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
+ goto out;
+ }
+
+ /* wait for completion of abort */
+ ZFCP_LOG_DEBUG("waiting for cleanup...\n");
+#if 1
+ /*
+ * FIXME:
+ * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
+ */
+ __wait_event(new_fsf_req->completion_wq,
+ new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ status = new_fsf_req->status;
+ dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
+ /*
+ * Ralphs special debug load provides timestamps in the FSF
+ * status qualifier. This might be specified later if being
+ * useful for debugging aborts.
+ */
+ dbf_fsf_qual[0] =
+ *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
+ dbf_fsf_qual[1] =
+ *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
+ zfcp_fsf_req_cleanup(new_fsf_req);
+#else
+ retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
+ ZFCP_UNINTERRUPTIBLE, &status);
+#endif
+ ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
+ /* status should be valid since signals were not permitted */
+ if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
+ retval = SUCCESS;
+ strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH);
+ } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
+ retval = SUCCESS;
+ strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
+ } else {
+ retval = FAILED;
+ strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
+ }
+
+ out:
+ debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
+ debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
+ debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
+ debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
+ debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
+ debug_text_event(adapter->abort_dbf, 1, dbf_result);
+
+ spin_lock_irq(scsi_host->host_lock);
+ return retval;
+}
+
+/*
+ * function: zfcp_scsi_eh_device_reset_handler
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
+{
+ int retval;
+ struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
+ struct Scsi_Host *scsi_host = scpnt->device->host;
+
+ spin_unlock_irq(scsi_host->host_lock);
+
+ if (!unit) {
+ ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n");
+ retval = SUCCESS;
+ goto out;
+ }
+ ZFCP_LOG_NORMAL("resetting unit 0x%016Lx\n", unit->fcp_lun);
+
+ /*
+ * If we do not know whether the unit supports 'logical unit reset'
+ * then try 'logical unit reset' and proceed with 'target reset'
+ * if 'logical unit reset' fails.
+ * If the unit is known not to support 'logical unit reset' then
+ * skip 'logical unit reset' and try 'target reset' immediately.
+ */
+ if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
+ &unit->status)) {
+ retval =
+ zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET);
+ if (retval) {
+ ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
+ if (retval == -ENOTSUPP)
+ atomic_set_mask
+ (ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
+ &unit->status);
+ /* fall through and try 'target reset' next */
+ } else {
+ ZFCP_LOG_DEBUG("unit reset succeeded (unit=%p)\n",
+ unit);
+ /* avoid 'target reset' */
+ retval = SUCCESS;
+ goto out;
+ }
+ }
+ retval = zfcp_task_management_function(unit, FCP_TARGET_RESET);
+ if (retval) {
+ ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
+ retval = FAILED;
+ } else {
+ ZFCP_LOG_DEBUG("target reset succeeded (unit=%p)\n", unit);
+ retval = SUCCESS;
+ }
+ out:
+ spin_lock_irq(scsi_host->host_lock);
+ return retval;
+}
+
+static int
+zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
+{
+ struct zfcp_adapter *adapter = unit->port->adapter;
+ int retval;
+ int status;
+ struct zfcp_fsf_req *fsf_req;
+
+ /* issue task management function */
+ fsf_req = zfcp_fsf_send_fcp_command_task_management
+ (adapter, unit, tm_flags, 0);
+ if (!fsf_req) {
+ ZFCP_LOG_INFO("error: creation of task management request "
+ "failed for unit 0x%016Lx on port 0x%016Lx on "
+ "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = zfcp_fsf_req_wait_and_cleanup(fsf_req,
+ ZFCP_UNINTERRUPTIBLE, &status);
+ /*
+ * check completion status of task management function
+ * (status should always be valid since no signals permitted)
+ */
+ if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED)
+ retval = -EIO;
+ else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP)
+ retval = -ENOTSUPP;
+ else
+ retval = 0;
+ out:
+ return retval;
+}
+
+/*
+ * function: zfcp_scsi_eh_bus_reset_handler
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
+{
+ int retval = 0;
+ struct zfcp_unit *unit;
+ struct Scsi_Host *scsi_host = scpnt->device->host;
+
+ spin_unlock_irq(scsi_host->host_lock);
+
+ unit = (struct zfcp_unit *) scpnt->device->hostdata;
+ ZFCP_LOG_NORMAL("bus reset because of problems with "
+ "unit 0x%016Lx\n", unit->fcp_lun);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ zfcp_erp_wait(unit->port->adapter);
+ retval = SUCCESS;
+
+ spin_lock_irq(scsi_host->host_lock);
+ return retval;
+}
+
+/*
+ * function: zfcp_scsi_eh_host_reset_handler
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
+{
+ int retval = 0;
+ struct zfcp_unit *unit;
+ struct Scsi_Host *scsi_host = scpnt->device->host;
+
+ spin_unlock_irq(scsi_host->host_lock);
+
+ unit = (struct zfcp_unit *) scpnt->device->hostdata;
+ ZFCP_LOG_NORMAL("host reset because of problems with "
+ "unit 0x%016Lx\n", unit->fcp_lun);
+ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
+ zfcp_erp_wait(unit->port->adapter);
+ retval = SUCCESS;
+
+ spin_lock_irq(scsi_host->host_lock);
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+int
+zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+{
+ int retval = 0;
+ static unsigned int unique_id = 0;
+
+ /* register adapter as SCSI host with mid layer of SCSI stack */
+ adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ sizeof (struct zfcp_adapter *));
+ if (!adapter->scsi_host) {
+ ZFCP_LOG_NORMAL("error: registration with SCSI stack failed "
+ "for adapter %s ",
+ zfcp_get_busid_by_adapter(adapter));
+ retval = -EIO;
+ goto out;
+ }
+ ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host);
+
+ /* tell the SCSI stack some characteristics of this adapter */
+ adapter->scsi_host->max_id = 1;
+ adapter->scsi_host->max_lun = 1;
+ adapter->scsi_host->max_channel = 0;
+ adapter->scsi_host->unique_id = unique_id++; /* FIXME */
+ adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
+ adapter->scsi_host->transportt = zfcp_transport_template;
+ /*
+ * Reverse mapping of the host number to avoid race condition
+ */
+ adapter->scsi_host_no = adapter->scsi_host->host_no;
+
+ /*
+ * save a pointer to our own adapter data structure within
+ * hostdata field of SCSI host data structure
+ */
+ adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
+
+ if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
+ scsi_host_put(adapter->scsi_host);
+ retval = -EIO;
+ goto out;
+ }
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
+ out:
+ return retval;
+}
+
+/*
+ * function:
+ *
+ * purpose:
+ *
+ * returns:
+ */
+void
+zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+{
+ struct Scsi_Host *shost;
+
+ shost = adapter->scsi_host;
+ if (!shost)
+ return;
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ adapter->scsi_host = NULL;
+ adapter->scsi_host_no = 0;
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
+
+ return;
+}
+
+
+void
+zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *adapter)
+{
+ adapter->scsi_er_timer.function = zfcp_fsf_scsi_er_timeout_handler;
+ adapter->scsi_er_timer.data = (unsigned long) adapter;
+ adapter->scsi_er_timer.expires = jiffies + ZFCP_SCSI_ER_TIMEOUT;
+ add_timer(&adapter->scsi_er_timer);
+}
+
+/*
+ * Support functions for FC transport class
+ */
+static void
+zfcp_get_port_id(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ port = zfcp_port_lookup(adapter, starget->channel, starget->id);
+ if (port)
+ fc_starget_port_id(starget) = port->d_id;
+ else
+ fc_starget_port_id(starget) = -1;
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+static void
+zfcp_get_port_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ port = zfcp_port_lookup(adapter, starget->channel, starget->id);
+ if (port)
+ fc_starget_port_name(starget) = port->wwpn;
+ else
+ fc_starget_port_name(starget) = -1;
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+static void
+zfcp_get_node_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
+ struct zfcp_port *port;
+ unsigned long flags;
+
+ read_lock_irqsave(&zfcp_data.config_lock, flags);
+ port = zfcp_port_lookup(adapter, starget->channel, starget->id);
+ if (port)
+ fc_starget_node_name(starget) = port->wwnn;
+ else
+ fc_starget_node_name(starget) = -1;
+ read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+}
+
+struct fc_function_template zfcp_transport_functions = {
+ .get_starget_port_id = zfcp_get_port_id,
+ .get_starget_port_name = zfcp_get_port_name,
+ .get_starget_node_name = zfcp_get_node_name,
+ .show_starget_port_id = 1,
+ .show_starget_port_name = 1,
+ .show_starget_node_name = 1,
+};
+
+/**
+ * ZFCP_DEFINE_SCSI_ATTR
+ * @_name: name of show attribute
+ * @_format: format string
+ * @_value: value to print
+ *
+ * Generates attribute for a unit.
+ */
+#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
+ char *buf) \
+{ \
+ struct scsi_device *sdev; \
+ struct zfcp_unit *unit; \
+ \
+ sdev = to_scsi_device(dev); \
+ unit = sdev->hostdata; \
+ return sprintf(buf, _format, _value); \
+} \
+ \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
+
+ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit));
+ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
+ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
+
+static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
+ &dev_attr_fcp_lun,
+ &dev_attr_wwpn,
+ &dev_attr_hba_id,
+ NULL
+};
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
new file mode 100644
index 000000000000..ff28ade1dfc7
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -0,0 +1,298 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_sysfs_adapter.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * sysfs adapter related routines
+ *
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_SYSFS_ADAPTER_C_REVISION "$Revision: 1.38 $"
+
+#include "zfcp_ext.h"
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+static const char fc_topologies[5][25] = {
+ "<error>",
+ "point-to-point",
+ "fabric",
+ "arbitrated loop",
+ "fabric (virt. adapter)"
+};
+
+/**
+ * ZFCP_DEFINE_ADAPTER_ATTR
+ * @_name: name of show attribute
+ * @_format: format string
+ * @_value: value to print
+ *
+ * Generates attributes for an adapter.
+ */
+#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
+ char *buf) \
+{ \
+ struct zfcp_adapter *adapter; \
+ \
+ adapter = dev_get_drvdata(dev); \
+ return sprintf(buf, _format, _value); \
+} \
+ \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
+ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
+ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
+ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
+ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
+ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
+ fc_topologies[adapter->fc_topology]);
+ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
+ adapter->hardware_version);
+ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
+ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
+ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
+
+/**
+ * zfcp_sysfs_port_add_store - add a port to sysfs tree
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "port_add" attribute of an adapter.
+ */
+static ssize_t
+zfcp_sysfs_port_add_store(struct device *dev, const char *buf, size_t count)
+{
+ wwn_t wwpn;
+ char *endp;
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ int retval = -EINVAL;
+
+ down(&zfcp_data.config_sema);
+
+ adapter = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ wwpn = simple_strtoull(buf, &endp, 0);
+ if ((endp + 1) < (buf + count))
+ goto out;
+
+ port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
+ if (!port)
+ goto out;
+
+ retval = 0;
+
+ zfcp_erp_port_reopen(port, 0);
+ zfcp_erp_wait(port->adapter);
+ zfcp_port_put(port);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
+
+/**
+ * zfcp_sysfs_port_remove_store - remove a port from sysfs tree
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "port_remove" attribute of an adapter.
+ */
+static ssize_t
+zfcp_sysfs_port_remove_store(struct device *dev, const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ wwn_t wwpn;
+ char *endp;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+
+ adapter = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ wwpn = simple_strtoull(buf, &endp, 0);
+ if ((endp + 1) < (buf + count)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ write_lock_irq(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ if (port && (atomic_read(&port->refcount) == 0)) {
+ zfcp_port_get(port);
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
+ list_move(&port->list, &adapter->port_remove_lh);
+ }
+ else {
+ port = NULL;
+ }
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ if (!port) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ zfcp_erp_port_shutdown(port, 0);
+ zfcp_erp_wait(adapter);
+ zfcp_port_put(port);
+ zfcp_port_dequeue(port);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
+
+/**
+ * zfcp_sysfs_adapter_failed_store - failed state of adapter
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "failed" attribute of an adapter.
+ * If a "0" gets written to "failed", error recovery will be
+ * started for the belonging adapter.
+ */
+static ssize_t
+zfcp_sysfs_adapter_failed_store(struct device *dev,
+ const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter;
+ unsigned int val;
+ char *endp;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+
+ adapter = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val != 0)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
+ ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+ zfcp_erp_wait(adapter);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+/**
+ * zfcp_sysfs_adapter_failed_show - failed state of adapter
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ *
+ * Show function of "failed" attribute of adapter. Will be
+ * "0" if adapter is working, otherwise "1".
+ */
+static ssize_t
+zfcp_sysfs_adapter_failed_show(struct device *dev, char *buf)
+{
+ struct zfcp_adapter *adapter;
+
+ adapter = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
+ zfcp_sysfs_adapter_failed_store);
+
+static struct attribute *zfcp_adapter_attrs[] = {
+ &dev_attr_failed.attr,
+ &dev_attr_in_recovery.attr,
+ &dev_attr_port_remove.attr,
+ &dev_attr_port_add.attr,
+ &dev_attr_wwnn.attr,
+ &dev_attr_wwpn.attr,
+ &dev_attr_s_id.attr,
+ &dev_attr_card_version.attr,
+ &dev_attr_lic_version.attr,
+ &dev_attr_fc_link_speed.attr,
+ &dev_attr_fc_service_class.attr,
+ &dev_attr_fc_topology.attr,
+ &dev_attr_scsi_host_no.attr,
+ &dev_attr_status.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
+
+static struct attribute_group zfcp_adapter_attr_group = {
+ .attrs = zfcp_adapter_attrs,
+};
+
+/**
+ * zfcp_sysfs_create_adapter_files - create sysfs adapter files
+ * @dev: pointer to belonging device
+ *
+ * Create all attributes of the sysfs representation of an adapter.
+ */
+int
+zfcp_sysfs_adapter_create_files(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
+}
+
+/**
+ * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
+ * @dev: pointer to belonging device
+ *
+ * Remove all attributes of the sysfs representation of an adapter.
+ */
+void
+zfcp_sysfs_adapter_remove_files(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c
new file mode 100644
index 000000000000..77a5e2dcc0ff
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_driver.c
@@ -0,0 +1,135 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_sysfs_driver.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * sysfs driver related routines
+ *
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_SYSFS_DRIVER_C_REVISION "$Revision: 1.17 $"
+
+#include "zfcp_ext.h"
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+/**
+ * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
+ * @_name: name of attribute
+ * @_define: name of ZFCP loglevel define
+ *
+ * Generates store function for a sysfs loglevel attribute of zfcp driver.
+ */
+#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
+static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
+ const char *buf, \
+ size_t count) \
+{ \
+ unsigned int loglevel; \
+ unsigned int new_loglevel; \
+ char *endp; \
+ \
+ new_loglevel = simple_strtoul(buf, &endp, 0); \
+ if ((endp + 1) < (buf + count)) \
+ return -EINVAL; \
+ if (new_loglevel > 3) \
+ return -EINVAL; \
+ down(&zfcp_data.config_sema); \
+ loglevel = atomic_read(&zfcp_data.loglevel); \
+ loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
+ loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
+ atomic_set(&zfcp_data.loglevel, loglevel); \
+ up(&zfcp_data.config_sema); \
+ return count; \
+} \
+ \
+static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
+ char *buf) \
+{ \
+ return sprintf(buf,"%d\n", (unsigned int) \
+ ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
+} \
+ \
+static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
+ zfcp_sysfs_loglevel_##_name##_show, \
+ zfcp_sysfs_loglevel_##_name##_store);
+
+ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
+ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
+ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
+ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
+ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
+ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
+ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
+ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
+
+static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", zfcp_data.driver_version);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
+
+static struct attribute *zfcp_driver_attrs[] = {
+ &driver_attr_loglevel_other.attr,
+ &driver_attr_loglevel_scsi.attr,
+ &driver_attr_loglevel_fsf.attr,
+ &driver_attr_loglevel_config.attr,
+ &driver_attr_loglevel_cio.attr,
+ &driver_attr_loglevel_qdio.attr,
+ &driver_attr_loglevel_erp.attr,
+ &driver_attr_loglevel_fc.attr,
+ &driver_attr_version.attr,
+ NULL
+};
+
+static struct attribute_group zfcp_driver_attr_group = {
+ .attrs = zfcp_driver_attrs,
+};
+
+/**
+ * zfcp_sysfs_create_driver_files - create sysfs driver files
+ * @dev: pointer to belonging device
+ *
+ * Create all sysfs attributes of the zfcp device driver
+ */
+int
+zfcp_sysfs_driver_create_files(struct device_driver *drv)
+{
+ return sysfs_create_group(&drv->kobj, &zfcp_driver_attr_group);
+}
+
+/**
+ * zfcp_sysfs_remove_driver_files - remove sysfs driver files
+ * @dev: pointer to belonging device
+ *
+ * Remove all sysfs attributes of the zfcp device driver
+ */
+void
+zfcp_sysfs_driver_remove_files(struct device_driver *drv)
+{
+ sysfs_remove_group(&drv->kobj, &zfcp_driver_attr_group);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
new file mode 100644
index 000000000000..6aafb2abb4b5
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_port.c
@@ -0,0 +1,311 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_sysfs_port.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * sysfs port related routines
+ *
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.47 $"
+
+#include "zfcp_ext.h"
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+/**
+ * zfcp_sysfs_port_release - gets called when a struct device port is released
+ * @dev: pointer to belonging device
+ */
+void
+zfcp_sysfs_port_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+/**
+ * ZFCP_DEFINE_PORT_ATTR
+ * @_name: name of show attribute
+ * @_format: format string
+ * @_value: value to print
+ *
+ * Generates attributes for a port.
+ */
+#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, \
+ char *buf) \
+{ \
+ struct zfcp_port *port; \
+ \
+ port = dev_get_drvdata(dev); \
+ return sprintf(buf, _format, _value); \
+} \
+ \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
+
+ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
+ZFCP_DEFINE_PORT_ATTR(wwnn, "0x%016llx\n", port->wwnn);
+ZFCP_DEFINE_PORT_ATTR(d_id, "0x%06x\n", port->d_id);
+ZFCP_DEFINE_PORT_ATTR(scsi_id, "0x%x\n", port->scsi_id);
+ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
+ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
+
+/**
+ * zfcp_sysfs_unit_add_store - add a unit to sysfs tree
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "unit_add" attribute of a port.
+ */
+static ssize_t
+zfcp_sysfs_unit_add_store(struct device *dev, const char *buf, size_t count)
+{
+ fcp_lun_t fcp_lun;
+ char *endp;
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ int retval = -EINVAL;
+
+ down(&zfcp_data.config_sema);
+
+ port = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ fcp_lun = simple_strtoull(buf, &endp, 0);
+ if ((endp + 1) < (buf + count))
+ goto out;
+
+ unit = zfcp_unit_enqueue(port, fcp_lun);
+ if (!unit)
+ goto out;
+
+ retval = 0;
+
+ zfcp_erp_unit_reopen(unit, 0);
+ zfcp_erp_wait(unit->port->adapter);
+ zfcp_unit_put(unit);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
+
+/**
+ * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ */
+static ssize_t
+zfcp_sysfs_unit_remove_store(struct device *dev, const char *buf, size_t count)
+{
+ struct zfcp_port *port;
+ struct zfcp_unit *unit;
+ fcp_lun_t fcp_lun;
+ char *endp;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+
+ port = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ fcp_lun = simple_strtoull(buf, &endp, 0);
+ if ((endp + 1) < (buf + count)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ write_lock_irq(&zfcp_data.config_lock);
+ unit = zfcp_get_unit_by_lun(port, fcp_lun);
+ if (unit && (atomic_read(&unit->refcount) == 0)) {
+ zfcp_unit_get(unit);
+ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
+ list_move(&unit->list, &port->unit_remove_lh);
+ }
+ else {
+ unit = NULL;
+ }
+ write_unlock_irq(&zfcp_data.config_lock);
+
+ if (!unit) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ zfcp_erp_unit_shutdown(unit, 0);
+ zfcp_erp_wait(unit->port->adapter);
+ zfcp_unit_put(unit);
+ zfcp_unit_dequeue(unit);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+
+/**
+ * zfcp_sysfs_port_failed_store - failed state of port
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "failed" attribute of a port.
+ * If a "0" gets written to "failed", error recovery will be
+ * started for the belonging port.
+ */
+static ssize_t
+zfcp_sysfs_port_failed_store(struct device *dev, const char *buf, size_t count)
+{
+ struct zfcp_port *port;
+ unsigned int val;
+ char *endp;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+
+ port = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val != 0)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+ zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+ zfcp_erp_wait(port->adapter);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+/**
+ * zfcp_sysfs_port_failed_show - failed state of port
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ *
+ * Show function of "failed" attribute of port. Will be
+ * "0" if port is working, otherwise "1".
+ */
+static ssize_t
+zfcp_sysfs_port_failed_show(struct device *dev, char *buf)
+{
+ struct zfcp_port *port;
+
+ port = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
+ zfcp_sysfs_port_failed_store);
+
+/**
+ * zfcp_port_common_attrs
+ * sysfs attributes that are common for all kind of fc ports.
+ */
+static struct attribute *zfcp_port_common_attrs[] = {
+ &dev_attr_failed.attr,
+ &dev_attr_in_recovery.attr,
+ &dev_attr_status.attr,
+ &dev_attr_wwnn.attr,
+ &dev_attr_d_id.attr,
+ &dev_attr_access_denied.attr,
+ NULL
+};
+
+static struct attribute_group zfcp_port_common_attr_group = {
+ .attrs = zfcp_port_common_attrs,
+};
+
+/**
+ * zfcp_port_no_ns_attrs
+ * sysfs attributes not to be used for nameserver ports.
+ */
+static struct attribute *zfcp_port_no_ns_attrs[] = {
+ &dev_attr_unit_add.attr,
+ &dev_attr_unit_remove.attr,
+ &dev_attr_scsi_id.attr,
+ NULL
+};
+
+static struct attribute_group zfcp_port_no_ns_attr_group = {
+ .attrs = zfcp_port_no_ns_attrs,
+};
+
+/**
+ * zfcp_sysfs_port_create_files - create sysfs port files
+ * @dev: pointer to belonging device
+ *
+ * Create all attributes of the sysfs representation of a port.
+ */
+int
+zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
+{
+ int retval;
+
+ retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
+
+ if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
+ return retval;
+
+ retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
+ if (retval)
+ sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
+
+ return retval;
+}
+
+/**
+ * zfcp_sysfs_port_remove_files - remove sysfs port files
+ * @dev: pointer to belonging device
+ *
+ * Remove all attributes of the sysfs representation of a port.
+ */
+void
+zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
+{
+ sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
+ if (!(flags & ZFCP_STATUS_PORT_WKA))
+ sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
new file mode 100644
index 000000000000..87c0b461831f
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_unit.c
@@ -0,0 +1,179 @@
+/*
+ * linux/drivers/s390/scsi/zfcp_sysfs_unit.c
+ *
+ * FCP adapter driver for IBM eServer zSeries
+ *
+ * sysfs unit related routines
+ *
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * Authors:
+ * Martin Peschke <mpeschke@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Andreas Herrmann <aherrman@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define ZFCP_SYSFS_UNIT_C_REVISION "$Revision: 1.30 $"
+
+#include "zfcp_ext.h"
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
+
+/**
+ * zfcp_sysfs_unit_release - gets called when a struct device unit is released
+ * @dev: pointer to belonging device
+ */
+void
+zfcp_sysfs_unit_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+/**
+ * ZFCP_DEFINE_UNIT_ATTR
+ * @_name: name of show attribute
+ * @_format: format string
+ * @_value: value to print
+ *
+ * Generates attribute for a unit.
+ */
+#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, \
+ char *buf) \
+{ \
+ struct zfcp_unit *unit; \
+ \
+ unit = dev_get_drvdata(dev); \
+ return sprintf(buf, _format, _value); \
+} \
+ \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
+
+ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
+ZFCP_DEFINE_UNIT_ATTR(scsi_lun, "0x%x\n", unit->scsi_lun);
+ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
+ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
+ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_UNIT_SHARED, &unit->status));
+ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
+ (ZFCP_STATUS_UNIT_READONLY, &unit->status));
+
+/**
+ * zfcp_sysfs_unit_failed_store - failed state of unit
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ * @count: number of bytes in buffer
+ *
+ * Store function of the "failed" attribute of a unit.
+ * If a "0" gets written to "failed", error recovery will be
+ * started for the belonging unit.
+ */
+static ssize_t
+zfcp_sysfs_unit_failed_store(struct device *dev, const char *buf, size_t count)
+{
+ struct zfcp_unit *unit;
+ unsigned int val;
+ char *endp;
+ int retval = 0;
+
+ down(&zfcp_data.config_sema);
+ unit = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (((endp + 1) < (buf + count)) || (val != 0)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+ zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
+ zfcp_erp_wait(unit->port->adapter);
+ out:
+ up(&zfcp_data.config_sema);
+ return retval ? retval : (ssize_t) count;
+}
+
+/**
+ * zfcp_sysfs_unit_failed_show - failed state of unit
+ * @dev: pointer to belonging device
+ * @buf: pointer to input buffer
+ *
+ * Show function of "failed" attribute of unit. Will be
+ * "0" if unit is working, otherwise "1".
+ */
+static ssize_t
+zfcp_sysfs_unit_failed_show(struct device *dev, char *buf)
+{
+ struct zfcp_unit *unit;
+
+ unit = dev_get_drvdata(dev);
+ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
+ zfcp_sysfs_unit_failed_store);
+
+static struct attribute *zfcp_unit_attrs[] = {
+ &dev_attr_scsi_lun.attr,
+ &dev_attr_failed.attr,
+ &dev_attr_in_recovery.attr,
+ &dev_attr_status.attr,
+ &dev_attr_access_denied.attr,
+ &dev_attr_access_shared.attr,
+ &dev_attr_access_readonly.attr,
+ NULL
+};
+
+static struct attribute_group zfcp_unit_attr_group = {
+ .attrs = zfcp_unit_attrs,
+};
+
+/**
+ * zfcp_sysfs_create_unit_files - create sysfs unit files
+ * @dev: pointer to belonging device
+ *
+ * Create all attributes of the sysfs representation of a unit.
+ */
+int
+zfcp_sysfs_unit_create_files(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
+}
+
+/**
+ * zfcp_sysfs_remove_unit_files - remove sysfs unit files
+ * @dev: pointer to belonging device
+ *
+ * Remove all attributes of the sysfs representation of a unit.
+ */
+void
+zfcp_sysfs_unit_remove_files(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
+}
+
+#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
new file mode 100644
index 000000000000..87c2db1bd4f5
--- /dev/null
+++ b/drivers/s390/sysinfo.c
@@ -0,0 +1,347 @@
+/*
+ * drivers/s390/sysinfo.c
+ *
+ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <asm/ebcdic.h>
+
+struct sysinfo_1_1_1
+{
+ char reserved_0[32];
+ char manufacturer[16];
+ char type[4];
+ char reserved_1[12];
+ char model[16];
+ char sequence[16];
+ char plant[4];
+};
+
+struct sysinfo_1_2_1
+{
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ char reserved_1[2];
+ unsigned short cpu_address;
+};
+
+struct sysinfo_1_2_2
+{
+ char reserved_0[32];
+ unsigned int capability;
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ unsigned short adjustment[0];
+};
+
+struct sysinfo_2_2_1
+{
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ unsigned short cpu_id;
+ unsigned short cpu_address;
+};
+
+struct sysinfo_2_2_2
+{
+ char reserved_0[32];
+ unsigned short lpar_number;
+ char reserved_1;
+ unsigned char characteristics;
+ #define LPAR_CHAR_DEDICATED (1 << 7)
+ #define LPAR_CHAR_SHARED (1 << 6)
+ #define LPAR_CHAR_LIMITED (1 << 5)
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char reserved_2[16];
+ unsigned short cpus_dedicated;
+ unsigned short cpus_shared;
+};
+
+struct sysinfo_3_2_2
+{
+ char reserved_0[31];
+ unsigned char count;
+ struct
+ {
+ char reserved_0[4];
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char cpi[16];
+ char reserved_1[24];
+
+ } vm[8];
+};
+
+union s390_sysinfo
+{
+ struct sysinfo_1_1_1 sysinfo_1_1_1;
+ struct sysinfo_1_2_1 sysinfo_1_2_1;
+ struct sysinfo_1_2_2 sysinfo_1_2_2;
+ struct sysinfo_2_2_1 sysinfo_2_2_1;
+ struct sysinfo_2_2_2 sysinfo_2_2_2;
+ struct sysinfo_3_2_2 sysinfo_3_2_2;
+};
+
+static inline int stsi (void *sysinfo,
+ int fc, int sel1, int sel2)
+{
+ int cc, retv;
+
+#ifndef CONFIG_ARCH_S390X
+ __asm__ __volatile__ ( "lr\t0,%2\n"
+ "\tlr\t1,%3\n"
+ "\tstsi\t0(%4)\n"
+ "0:\tipm\t%0\n"
+ "\tsrl\t%0,28\n"
+ "1:lr\t%1,0\n"
+ ".section .fixup,\"ax\"\n"
+ "2:\tlhi\t%0,3\n"
+ "\tbras\t1,3f\n"
+ "\t.long 1b\n"
+ "3:\tl\t1,0(1)\n"
+ "\tbr\t1\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ "\t.align 4\n"
+ "\t.long 0b,2b\n"
+ ".previous\n"
+ : "=d" (cc), "=d" (retv)
+ : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
+ : "cc", "memory", "0", "1" );
+#else
+ __asm__ __volatile__ ( "lr\t0,%2\n"
+ "lr\t1,%3\n"
+ "\tstsi\t0(%4)\n"
+ "0:\tipm\t%0\n"
+ "\tsrl\t%0,28\n"
+ "1:lr\t%1,0\n"
+ ".section .fixup,\"ax\"\n"
+ "2:\tlhi\t%0,3\n"
+ "\tjg\t1b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ "\t.align 8\n"
+ "\t.quad 0b,2b\n"
+ ".previous\n"
+ : "=d" (cc), "=d" (retv)
+ : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
+ : "cc", "memory", "0", "1" );
+#endif
+
+ return cc? -1 : retv;
+}
+
+static inline int stsi_0 (void)
+{
+ int rc = stsi (NULL, 0, 0, 0);
+ return rc == -1 ? rc : (((unsigned int)rc) >> 28);
+}
+
+static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info)
+{
+ int rc = stsi (info, 1, 1, 1);
+ if (rc != -1)
+ {
+ EBCASC (info->manufacturer, sizeof(info->manufacturer));
+ EBCASC (info->type, sizeof(info->type));
+ EBCASC (info->model, sizeof(info->model));
+ EBCASC (info->sequence, sizeof(info->sequence));
+ EBCASC (info->plant, sizeof(info->plant));
+ }
+ return rc == -1 ? rc : 0;
+}
+
+static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info)
+{
+ int rc = stsi (info, 1, 2, 1);
+ if (rc != -1)
+ {
+ EBCASC (info->sequence, sizeof(info->sequence));
+ EBCASC (info->plant, sizeof(info->plant));
+ }
+ return rc == -1 ? rc : 0;
+}
+
+static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info)
+{
+ int rc = stsi (info, 1, 2, 2);
+ return rc == -1 ? rc : 0;
+}
+
+static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info)
+{
+ int rc = stsi (info, 2, 2, 1);
+ if (rc != -1)
+ {
+ EBCASC (info->sequence, sizeof(info->sequence));
+ EBCASC (info->plant, sizeof(info->plant));
+ }
+ return rc == -1 ? rc : 0;
+}
+
+static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info)
+{
+ int rc = stsi (info, 2, 2, 2);
+ if (rc != -1)
+ {
+ EBCASC (info->name, sizeof(info->name));
+ }
+ return rc == -1 ? rc : 0;
+}
+
+static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info)
+{
+ int rc = stsi (info, 3, 2, 2);
+ if (rc != -1)
+ {
+ int i;
+ for (i = 0; i < info->count; i++)
+ {
+ EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
+ EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
+ }
+ }
+ return rc == -1 ? rc : 0;
+}
+
+
+static int proc_read_sysinfo(char *page, char **start,
+ off_t off, int count,
+ int *eof, void *data)
+{
+ unsigned long info_page = get_zeroed_page (GFP_KERNEL);
+ union s390_sysinfo *info = (union s390_sysinfo *) info_page;
+ int len = 0;
+ int level;
+ int i;
+
+ if (!info)
+ return 0;
+
+ level = stsi_0 ();
+
+ if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0)
+ {
+ len += sprintf (page+len, "Manufacturer: %-16.16s\n",
+ info->sysinfo_1_1_1.manufacturer);
+ len += sprintf (page+len, "Type: %-4.4s\n",
+ info->sysinfo_1_1_1.type);
+ len += sprintf (page+len, "Model: %-16.16s\n",
+ info->sysinfo_1_1_1.model);
+ len += sprintf (page+len, "Sequence Code: %-16.16s\n",
+ info->sysinfo_1_1_1.sequence);
+ len += sprintf (page+len, "Plant: %-4.4s\n",
+ info->sysinfo_1_1_1.plant);
+ }
+
+ if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
+ {
+ len += sprintf (page+len, "\n");
+ len += sprintf (page+len, "CPUs Total: %d\n",
+ info->sysinfo_1_2_2.cpus_total);
+ len += sprintf (page+len, "CPUs Configured: %d\n",
+ info->sysinfo_1_2_2.cpus_configured);
+ len += sprintf (page+len, "CPUs Standby: %d\n",
+ info->sysinfo_1_2_2.cpus_standby);
+ len += sprintf (page+len, "CPUs Reserved: %d\n",
+ info->sysinfo_1_2_2.cpus_reserved);
+
+ len += sprintf (page+len, "Capability: %d\n",
+ info->sysinfo_1_2_2.capability);
+
+ for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++)
+ len += sprintf (page+len, "Adjustment %02d-way: %d\n",
+ i, info->sysinfo_1_2_2.adjustment[i-2]);
+ }
+
+ if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0)
+ {
+ len += sprintf (page+len, "\n");
+ len += sprintf (page+len, "LPAR Number: %d\n",
+ info->sysinfo_2_2_2.lpar_number);
+
+ len += sprintf (page+len, "LPAR Characteristics: ");
+ if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
+ len += sprintf (page+len, "Dedicated ");
+ if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
+ len += sprintf (page+len, "Shared ");
+ if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
+ len += sprintf (page+len, "Limited ");
+ len += sprintf (page+len, "\n");
+
+ len += sprintf (page+len, "LPAR Name: %-8.8s\n",
+ info->sysinfo_2_2_2.name);
+
+ len += sprintf (page+len, "LPAR Adjustment: %d\n",
+ info->sysinfo_2_2_2.caf);
+
+ len += sprintf (page+len, "LPAR CPUs Total: %d\n",
+ info->sysinfo_2_2_2.cpus_total);
+ len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
+ info->sysinfo_2_2_2.cpus_configured);
+ len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
+ info->sysinfo_2_2_2.cpus_standby);
+ len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
+ info->sysinfo_2_2_2.cpus_reserved);
+ len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
+ info->sysinfo_2_2_2.cpus_dedicated);
+ len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
+ info->sysinfo_2_2_2.cpus_shared);
+ }
+
+ if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0)
+ {
+ for (i = 0; i < info->sysinfo_3_2_2.count; i++)
+ {
+ len += sprintf (page+len, "\n");
+ len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
+ i, info->sysinfo_3_2_2.vm[i].name);
+ len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
+ i, info->sysinfo_3_2_2.vm[i].cpi);
+
+ len += sprintf (page+len, "VM%02d Adjustment: %d\n",
+ i, info->sysinfo_3_2_2.vm[i].caf);
+
+ len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
+ i, info->sysinfo_3_2_2.vm[i].cpus_total);
+ len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
+ i, info->sysinfo_3_2_2.vm[i].cpus_configured);
+ len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
+ i, info->sysinfo_3_2_2.vm[i].cpus_standby);
+ len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
+ i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
+ }
+ }
+
+ free_page (info_page);
+ return len;
+}
+
+static __init int create_proc_sysinfo(void)
+{
+ create_proc_read_entry ("sysinfo", 0444, NULL,
+ proc_read_sysinfo, NULL);
+ return 0;
+}
+
+__initcall(create_proc_sysinfo);
+