diff options
Diffstat (limited to 'drivers/s390/char/sclp.c')
| -rw-r--r-- | drivers/s390/char/sclp.c | 732 |
1 files changed, 429 insertions, 303 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 3e4fb4e858da..98e334724a62 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * core function to access sclp interface * @@ -8,24 +9,43 @@ */ #include <linux/kernel_stat.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/err.h> +#include <linux/panic_notifier.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/reboot.h> #include <linux/jiffies.h> #include <linux/init.h> -#include <linux/suspend.h> -#include <linux/completion.h> #include <linux/platform_device.h> #include <asm/types.h> #include <asm/irq.h> +#include <asm/debug.h> #include "sclp.h" #define SCLP_HEADER "sclp: " +struct sclp_trace_entry { + char id[4] __nonstring; + u32 a; + u64 b; +}; + +#define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry) +#define SCLP_TRACE_MAX_SIZE 128 +#define SCLP_TRACE_EVENT_MAX_SIZE 64 + +/* Debug trace area intended for all entries in abbreviated form. */ +DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE, + &debug_hex_ascii_view); + +/* Error trace area intended for full entries relating to failed requests. */ +DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1, + SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view); + /* Lock to protect internal data consistency. */ static DEFINE_SPINLOCK(sclp_lock); @@ -36,30 +56,137 @@ static sccb_mask_t sclp_receive_mask; static sccb_mask_t sclp_send_mask; /* List of registered event listeners and senders. */ -static struct list_head sclp_reg_list; +static LIST_HEAD(sclp_reg_list); /* List of queued requests. */ -static struct list_head sclp_req_queue; +static LIST_HEAD(sclp_req_queue); -/* Data for read and and init requests. */ +/* Data for read and init requests. */ static struct sclp_req sclp_read_req; static struct sclp_req sclp_init_req; -static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); -static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); - -/* Suspend request */ -static DECLARE_COMPLETION(sclp_request_queue_flushed); +static void *sclp_read_sccb; +static struct init_sccb *sclp_init_sccb; /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ int sclp_console_pages = SCLP_CONSOLE_PAGES; /* Flag to indicate if buffer pages are dropped on buffer full condition */ -int sclp_console_drop = 0; +bool sclp_console_drop = true; /* Number of times the console dropped buffer pages */ unsigned long sclp_console_full; -static void sclp_suspend_req_cb(struct sclp_req *req, void *data) +/* The currently active SCLP command word. */ +static sclp_cmdw_t active_cmd; + +static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) { - complete(&sclp_request_queue_flushed); + if (sccb_int) + return __va(sccb_int); + return NULL; +} + +static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) +{ + struct sclp_trace_entry e; + + memset(&e, 0, sizeof(e)); + strtomem(e.id, id); + e.a = a; + e.b = b; + debug_event(&sclp_debug, prio, &e, sizeof(e)); + if (err) + debug_event(&sclp_debug_err, 0, &e, sizeof(e)); +} + +static inline int no_zeroes_len(void *data, int len) +{ + char *d = data; + + /* Minimize trace area usage by not tracing trailing zeroes. */ + while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0) + len--; + + return len; +} + +static inline void sclp_trace_bin(int prio, void *d, int len, int errlen) +{ + debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len)); + if (errlen) + debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen)); +} + +static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb) +{ + struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1); + int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE; + + /* Full SCCB tracing if debug level is set to max. */ + if (sclp_debug.level == DEBUG_MAX_LEVEL) + return len; + + /* Minimal tracing for console writes. */ + if (cmd == SCLP_CMDW_WRITE_EVENT_DATA && + (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG)) + limit = SCLP_TRACE_ENTRY_SIZE; + + return min(len, limit); +} + +static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b, + sclp_cmdw_t cmd, struct sccb_header *sccb, + bool err) +{ + sclp_trace(prio, id, a, b, err); + if (sccb) { + sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb), + err ? sccb->length : 0); + } +} + +static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b, + struct evbuf_header *evbuf, bool err) +{ + sclp_trace(prio, id, a, b, err); + sclp_trace_bin(prio + 1, evbuf, + min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE), + err ? evbuf->length : 0); +} + +static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req, + bool err) +{ + struct sccb_header *sccb = req->sccb; + union { + struct { + u16 status; + u16 response; + u16 timeout; + u16 start_count; + }; + u64 b; + } summary; + + summary.status = req->status; + summary.response = sccb ? sccb->response_code : 0; + summary.timeout = (u16)req->queue_timeout; + summary.start_count = (u16)req->start_count; + + sclp_trace(prio, id, __pa(sccb), summary.b, err); +} + +static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b, + struct sclp_register *reg) +{ + struct { + u64 receive; + u64 send; + } d; + + d.receive = reg->receive_mask; + d.send = reg->send_mask; + + sclp_trace(prio, id, a, b, false); + sclp_trace_bin(prio, &d, sizeof(d), 0); } static int __init sclp_setup_console_pages(char *str) @@ -76,27 +203,16 @@ __setup("sclp_con_pages=", sclp_setup_console_pages); static int __init sclp_setup_console_drop(char *str) { - int drop, rc; - - rc = kstrtoint(str, 0, &drop); - if (!rc && drop) - sclp_console_drop = 1; - return 1; + return kstrtobool(str, &sclp_console_drop) == 0; } __setup("sclp_con_drop=", sclp_setup_console_drop); -static struct sclp_req sclp_suspend_req; - /* Timer for request retries. */ static struct timer_list sclp_request_timer; -/* Internal state: is the driver initialized? */ -static volatile enum sclp_init_state_t { - sclp_init_state_uninitialized, - sclp_init_state_initializing, - sclp_init_state_initialized -} sclp_init_state = sclp_init_state_uninitialized; +/* Timer for queued requests. */ +static struct timer_list sclp_queue_timer; /* Internal state: is a request active at the sclp? */ static volatile enum sclp_running_state_t { @@ -125,12 +241,6 @@ static volatile enum sclp_mask_state_t { sclp_mask_state_initializing } sclp_mask_state = sclp_mask_state_idle; -/* Internal state: is the driver suspended? */ -static enum sclp_suspend_state_t { - sclp_suspend_state_running, - sclp_suspend_state_suspended, -} sclp_suspend_state = sclp_suspend_state_running; - /* Maximum retry counts */ #define SCLP_INIT_RETRY 3 #define SCLP_MASK_RETRY 3 @@ -139,35 +249,10 @@ static enum sclp_suspend_state_t { #define SCLP_BUSY_INTERVAL 10 #define SCLP_RETRY_INTERVAL 30 +static void sclp_request_timeout(bool force_restart); static void sclp_process_queue(void); static void __sclp_make_read_req(void); static int sclp_init_mask(int calculate); -static int sclp_init(void); - -/* Perform service call. Return 0 on success, non-zero otherwise. */ -int -sclp_service_call(sclp_cmdw_t command, void *sccb) -{ - int cc = 4; /* Initialize for program check handling */ - - asm volatile( - "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ - "1: ipm %0\n" - " srl %0,28\n" - "2:\n" - EX_TABLE(0b, 2b) - EX_TABLE(1b, 2b) - : "+&d" (cc) : "d" (command), "a" (__pa(sccb)) - : "cc", "memory"); - if (cc == 4) - return -EINVAL; - if (cc == 3) - return -EIO; - if (cc == 2) - return -EBUSY; - return 0; -} - static void __sclp_queue_read_req(void) @@ -182,25 +267,35 @@ __sclp_queue_read_req(void) /* Set up request retry timer. Called while sclp_lock is locked. */ static inline void -__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), - unsigned long data) +__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) { - del_timer(&sclp_request_timer); - sclp_request_timer.function = function; - sclp_request_timer.data = data; + timer_delete(&sclp_request_timer); + sclp_request_timer.function = cb; sclp_request_timer.expires = jiffies + time; add_timer(&sclp_request_timer); } -/* Request timeout handler. Restart the request queue. If DATA is non-zero, +static void sclp_request_timeout_restart(struct timer_list *unused) +{ + sclp_request_timeout(true); +} + +static void sclp_request_timeout_normal(struct timer_list *unused) +{ + sclp_request_timeout(false); +} + +/* Request timeout handler. Restart the request queue. If force_restart, * force restart of running request. */ -static void -sclp_request_timeout(unsigned long data) +static void sclp_request_timeout(bool force_restart) { unsigned long flags; + /* TMO: A timeout occurred (a=force_restart) */ + sclp_trace(2, "TMO", force_restart, 0, true); + spin_lock_irqsave(&sclp_lock, flags); - if (data) { + if (force_restart) { if (sclp_running_state == sclp_running_state_running) { /* Break running state and queue NOP read event request * to get a defined interface state. */ @@ -209,12 +304,107 @@ sclp_request_timeout(unsigned long data) } } else { __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, - sclp_request_timeout, 0); + sclp_request_timeout_normal); } spin_unlock_irqrestore(&sclp_lock, flags); sclp_process_queue(); } +/* + * Returns the expire value in jiffies of the next pending request timeout, + * if any. Needs to be called with sclp_lock. + */ +static unsigned long __sclp_req_queue_find_next_timeout(void) +{ + unsigned long expires_next = 0; + struct sclp_req *req; + + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (!expires_next || + (time_before(req->queue_expires, expires_next))) + expires_next = req->queue_expires; + } + return expires_next; +} + +/* + * Returns expired request, if any, and removes it from the list. + */ +static struct sclp_req *__sclp_req_queue_remove_expired_req(void) +{ + unsigned long flags, now; + struct sclp_req *req; + + spin_lock_irqsave(&sclp_lock, flags); + now = jiffies; + /* Don't need list_for_each_safe because we break out after list_del */ + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (time_before_eq(req->queue_expires, now)) { + if (req->status == SCLP_REQ_QUEUED) { + req->status = SCLP_REQ_QUEUED_TIMEOUT; + list_del(&req->list); + goto out; + } + } + } + req = NULL; +out: + spin_unlock_irqrestore(&sclp_lock, flags); + return req; +} + +/* + * Timeout handler for queued requests. Removes request from list and + * invokes callback. This timer can be set per request in situations where + * waiting too long would be harmful to the system, e.g. during SE reboot. + */ +static void sclp_req_queue_timeout(struct timer_list *unused) +{ + unsigned long flags, expires_next; + struct sclp_req *req; + + do { + req = __sclp_req_queue_remove_expired_req(); + + if (req) { + /* RQTM: Request timed out (a=sccb, b=summary) */ + sclp_trace_req(2, "RQTM", req, true); + } + + if (req && req->callback) + req->callback(req, req->callback_data); + } while (req); + + spin_lock_irqsave(&sclp_lock, flags); + expires_next = __sclp_req_queue_find_next_timeout(); + if (expires_next) + mod_timer(&sclp_queue_timer, expires_next); + spin_unlock_irqrestore(&sclp_lock, flags); +} + +static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb) +{ + static u64 srvc_count; + int rc; + + /* SRV1: Service call about to be issued (a=command, b=sccb address) */ + sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false); + + rc = sclp_service_call(command, sccb); + + /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */ + sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0); + + if (rc == 0) + active_cmd = command; + + return rc; +} + /* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise. * Called while sclp_lock is locked. */ @@ -225,8 +415,8 @@ __sclp_start_request(struct sclp_req *req) if (sclp_running_state != sclp_running_state_idle) return 0; - del_timer(&sclp_request_timer); - rc = sclp_service_call(req->command, req->sccb); + timer_delete(&sclp_request_timer); + rc = sclp_service_call_trace(req->command, req->sccb); req->start_count++; if (rc == 0) { @@ -234,12 +424,12 @@ __sclp_start_request(struct sclp_req *req) req->status = SCLP_REQ_RUNNING; sclp_running_state = sclp_running_state_running; __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, - sclp_request_timeout, 1); + sclp_request_timeout_restart); return 0; } else if (rc == -EBUSY) { /* Try again later */ __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, - sclp_request_timeout, 0); + sclp_request_timeout_normal); return 0; } /* Request failed */ @@ -260,11 +450,9 @@ sclp_process_queue(void) spin_unlock_irqrestore(&sclp_lock, flags); return; } - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); while (!list_empty(&sclp_req_queue)) { req = list_entry(sclp_req_queue.next, struct sclp_req, list); - if (!req->sccb) - goto do_post; rc = __sclp_start_request(req); if (rc == 0) break; @@ -273,12 +461,15 @@ sclp_process_queue(void) /* Cannot abort already submitted request - could still * be active at the SCLP */ __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, - sclp_request_timeout, 0); + sclp_request_timeout_normal); break; } -do_post: /* Post-processing for aborted request */ list_del(&req->list); + + /* RQAB: Request aborted (a=sccb, b=summary) */ + sclp_trace_req(2, "RQAB", req, true); + if (req->callback) { spin_unlock_irqrestore(&sclp_lock, flags); req->callback(req, req->callback_data); @@ -290,10 +481,8 @@ do_post: static int __sclp_can_add_request(struct sclp_req *req) { - if (req == &sclp_suspend_req || req == &sclp_init_req) + if (req == &sclp_init_req) return 1; - if (sclp_suspend_state != sclp_suspend_state_running) - return 0; if (sclp_init_state != sclp_init_state_initialized) return 0; if (sclp_activation_state != sclp_activation_state_active) @@ -313,23 +502,28 @@ sclp_add_request(struct sclp_req *req) spin_unlock_irqrestore(&sclp_lock, flags); return -EIO; } + + /* RQAD: Request was added (a=sccb, b=caller) */ + sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false); + req->status = SCLP_REQ_QUEUED; req->start_count = 0; list_add_tail(&req->list, &sclp_req_queue); rc = 0; + if (req->queue_timeout) { + req->queue_expires = jiffies + req->queue_timeout * HZ; + if (!timer_pending(&sclp_queue_timer) || + time_after(sclp_queue_timer.expires, req->queue_expires)) + mod_timer(&sclp_queue_timer, req->queue_expires); + } else + req->queue_expires = 0; /* Start if request is first in list */ if (sclp_running_state == sclp_running_state_idle && req->list.prev == &sclp_req_queue) { - if (!req->sccb) { - list_del(&req->list); - rc = -ENODATA; - goto out; - } rc = __sclp_start_request(req); if (rc) list_del(&req->list); } -out: spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -360,11 +554,16 @@ sclp_dispatch_evbufs(struct sccb_header *sccb) reg = NULL; list_for_each(l, &sclp_reg_list) { reg = list_entry(l, struct sclp_register, list); - if (reg->receive_mask & (1 << (32 - evbuf->type))) + if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type)) break; else reg = NULL; } + + /* EVNT: Event callback (b=receiver) */ + sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0, + evbuf, !reg); + if (reg && reg->receiver_fn) { spin_unlock_irqrestore(&sclp_lock, flags); reg->receiver_fn(evbuf); @@ -420,12 +619,36 @@ __sclp_find_req(u32 sccb) list_for_each(l, &sclp_req_queue) { req = list_entry(l, struct sclp_req, list); - if (sccb == (u32) (addr_t) req->sccb) - return req; + if (sccb == __pa(req->sccb)) + return req; } return NULL; } +static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) +{ + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); + struct evbuf_header *evbuf; + u16 response; + + if (!sccb) + return true; + + /* Check SCCB response. */ + response = sccb->response_code & 0xff; + if (response != 0x10 && response != 0x20) + return false; + + /* Check event-processed flag on outgoing events. */ + if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) { + evbuf = (struct evbuf_header *)(sccb + 1); + if (!(evbuf->flags & 0x80)) + return false; + } + + return true; +} + /* Handler for external interruption. Perform request post-processing. * Prepare read event data request if necessary. Start processing of next * request on queue. */ @@ -440,21 +663,35 @@ static void sclp_interrupt_handler(struct ext_code ext_code, spin_lock(&sclp_lock); finished_sccb = param32 & 0xfffffff8; evbuf_pending = param32 & 0x3; + + /* INT: Interrupt received (a=intparm, b=cmd) */ + sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, + sclpint_to_sccb(finished_sccb), + !ok_response(finished_sccb, active_cmd)); + if (finished_sccb) { - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); sclp_running_state = sclp_running_state_reset_pending; req = __sclp_find_req(finished_sccb); if (req) { /* Request post-processing */ list_del(&req->list); req->status = SCLP_REQ_DONE; + + /* RQOK: Request success (a=sccb, b=summary) */ + sclp_trace_req(2, "RQOK", req, false); + if (req->callback) { spin_unlock(&sclp_lock); req->callback(req, req->callback_data); spin_lock(&sclp_lock); } + } else { + /* UNEX: Unexpected SCCB completion (a=sccb address) */ + sclp_trace(0, "UNEX", finished_sccb, 0, true); } sclp_running_state = sclp_running_state_idle; + active_cmd = 0; } if (evbuf_pending && sclp_activation_state == sclp_activation_state_active) @@ -476,17 +713,21 @@ void sclp_sync_wait(void) { unsigned long long old_tick; + struct ctlreg cr0, cr0_sync; unsigned long flags; - unsigned long cr0, cr0_sync; + static u64 sync_count; u64 timeout; int irq_context; + /* SYN1: Synchronous wait start (a=runstate, b=sync count) */ + sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false); + /* We'll be disabling timer interrupts, so we need a custom timeout * mechanism */ timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_tod_clock() + + timeout = get_tod_clock_monotonic() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -498,27 +739,27 @@ sclp_sync_wait(void) /* Enable service-signal interruption, disable timer interrupts */ old_tick = local_tick_disable(); trace_hardirqs_on(); - __ctl_store(cr0, 0, 0); - cr0_sync = cr0; - cr0_sync &= 0xffff00a0; - cr0_sync |= 0x00000200; - __ctl_load(cr0_sync, 0, 0); - __arch_local_irq_stosm(0x01); + local_ctl_store(0, &cr0); + cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; + cr0_sync.val |= 1UL << (63 - 54); + local_ctl_load(0, &cr0_sync); + arch_local_irq_enable_external(); /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ - if (timer_pending(&sclp_request_timer) && - get_tod_clock() > timeout && - del_timer(&sclp_request_timer)) - sclp_request_timer.function(sclp_request_timer.data); + if (get_tod_clock_monotonic() > timeout && timer_delete(&sclp_request_timer)) + sclp_request_timer.function(&sclp_request_timer); cpu_relax(); } local_irq_disable(); - __ctl_load(cr0, 0, 0); + local_ctl_load(0, &cr0); if (!irq_context) _local_bh_enable(); local_tick_enable(old_tick); local_irq_restore(flags); + + /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */ + sclp_trace(4, "SYN2", sclp_running_state, sync_count, false); } EXPORT_SYMBOL(sclp_sync_wait); @@ -548,8 +789,13 @@ sclp_dispatch_state_change(void) reg = NULL; } spin_unlock_irqrestore(&sclp_lock, flags); - if (reg && reg->state_change_fn) + if (reg && reg->state_change_fn) { + /* STCG: State-change callback (b=callback) */ + sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn, + false); + reg->state_change_fn(reg); + } } while (reg); } @@ -562,9 +808,12 @@ struct sclp_statechangebuf { u16 _zeros : 12; u16 mask_length; u64 sclp_active_facility_mask; - sccb_mask_t sclp_receive_mask; - sccb_mask_t sclp_send_mask; - u32 read_data_function_mask; + u8 masks[2 * 1021 + 4]; /* variable length */ + /* + * u8 sclp_receive_mask[mask_length]; + * u8 sclp_send_mask[mask_length]; + * u32 read_data_function_mask; + */ } __attribute__((packed)); @@ -575,17 +824,17 @@ sclp_state_change_cb(struct evbuf_header *evbuf) unsigned long flags; struct sclp_statechangebuf *scbuf; + BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE); + scbuf = (struct sclp_statechangebuf *) evbuf; - if (scbuf->mask_length != sizeof(sccb_mask_t)) - return; spin_lock_irqsave(&sclp_lock, flags); if (scbuf->validity_sclp_receive_mask) - sclp_receive_mask = scbuf->sclp_receive_mask; + sclp_receive_mask = sccb_get_recv_mask(scbuf); if (scbuf->validity_sclp_send_mask) - sclp_send_mask = scbuf->sclp_send_mask; + sclp_send_mask = sccb_get_send_mask(scbuf); spin_unlock_irqrestore(&sclp_lock, flags); if (scbuf->validity_sclp_active_facility_mask) - sclp_facilities = scbuf->sclp_active_facility_mask; + sclp.facilities = scbuf->sclp_active_facility_mask; sclp_dispatch_state_change(); } @@ -620,6 +869,9 @@ sclp_register(struct sclp_register *reg) sccb_mask_t send_mask; int rc; + /* REG: Event listener registered (b=caller) */ + sclp_trace_register(2, "REG", 0, _RET_IP_, reg); + rc = sclp_init(); if (rc) return rc; @@ -633,7 +885,6 @@ sclp_register(struct sclp_register *reg) /* Trigger initial state change callback */ reg->sclp_receive_mask = 0; reg->sclp_send_mask = 0; - reg->pm_event_posted = 0; list_add(®->list, &sclp_reg_list); spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_init_mask(1); @@ -653,6 +904,9 @@ sclp_unregister(struct sclp_register *reg) { unsigned long flags; + /* UREG: Event listener unregistered (b=caller) */ + sclp_trace_register(2, "UREG", 0, _RET_IP_, reg); + spin_lock_irqsave(&sclp_lock, flags); list_del(®->list); spin_unlock_irqrestore(&sclp_lock, flags); @@ -692,11 +946,10 @@ EXPORT_SYMBOL(sclp_remove_processed); /* Prepare init mask request. Called while sclp_lock is locked. */ static inline void -__sclp_make_init_req(u32 receive_mask, u32 send_mask) +__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) { - struct init_sccb *sccb; + struct init_sccb *sccb = sclp_init_sccb; - sccb = (struct init_sccb *) sclp_init_sccb; clear_page(sccb); memset(&sclp_init_req, 0, sizeof(struct sclp_req)); sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; @@ -705,12 +958,15 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) sclp_init_req.callback = NULL; sclp_init_req.callback_data = NULL; sclp_init_req.sccb = sccb; - sccb->header.length = sizeof(struct init_sccb); - sccb->mask_length = sizeof(sccb_mask_t); - sccb->receive_mask = receive_mask; - sccb->send_mask = send_mask; - sccb->sclp_receive_mask = 0; - sccb->sclp_send_mask = 0; + sccb->header.length = sizeof(*sccb); + if (sclp_mask_compat_mode) + sccb->mask_length = SCLP_MASK_SIZE_COMPAT; + else + sccb->mask_length = sizeof(sccb_mask_t); + sccb_set_recv_mask(sccb, receive_mask); + sccb_set_send_mask(sccb, send_mask); + sccb_set_sclp_recv_mask(sccb, 0); + sccb_set_sclp_send_mask(sccb, 0); } /* Start init mask request. If calculate is non-zero, calculate the mask as @@ -720,7 +976,7 @@ static int sclp_init_mask(int calculate) { unsigned long flags; - struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; + struct init_sccb *sccb = sclp_init_sccb; sccb_mask_t receive_mask; sccb_mask_t send_mask; int retry; @@ -766,8 +1022,8 @@ sclp_init_mask(int calculate) sccb->header.response_code == 0x20) { /* Successful request */ if (calculate) { - sclp_receive_mask = sccb->sclp_receive_mask; - sclp_send_mask = sccb->sclp_send_mask; + sclp_receive_mask = sccb_get_sclp_recv_mask(sccb); + sclp_send_mask = sccb_get_sclp_send_mask(sccb); } else { sclp_receive_mask = 0; sclp_send_mask = 0; @@ -854,7 +1110,7 @@ static void sclp_check_handler(struct ext_code ext_code, /* Is this the interrupt we are waiting for? */ if (finished_sccb == 0) return; - if (finished_sccb != (u32) (addr_t) sclp_init_sccb) + if (finished_sccb != __pa(sclp_init_sccb)) panic("sclp: unsolicited interrupt for buffer at 0x%x\n", finished_sccb); spin_lock(&sclp_lock); @@ -867,7 +1123,7 @@ static void sclp_check_handler(struct ext_code ext_code, /* Initial init mask request timed out. Modify request state to failed. */ static void -sclp_check_timeout(unsigned long data) +sclp_check_timeout(struct timer_list *unused) { unsigned long flags; @@ -892,7 +1148,7 @@ sclp_check_interface(void) spin_lock_irqsave(&sclp_lock, flags); /* Prepare init mask command */ - rc = register_external_interrupt(0x2401, sclp_check_handler); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); if (rc) { spin_unlock_irqrestore(&sclp_lock, flags); return rc; @@ -900,32 +1156,38 @@ sclp_check_interface(void) for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { __sclp_make_init_req(0, 0); sccb = (struct init_sccb *) sclp_init_req.sccb; - rc = sclp_service_call(sclp_init_req.command, sccb); + rc = sclp_service_call_trace(sclp_init_req.command, sccb); if (rc == -EIO) break; sclp_init_req.status = SCLP_REQ_RUNNING; sclp_running_state = sclp_running_state_running; __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, - sclp_check_timeout, 0); + sclp_check_timeout); spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal interruption - needs to happen * with IRQs enabled. */ - service_subclass_irq_register(); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); /* Wait for signal from interrupt or timeout */ sclp_sync_wait(); /* Disable service-signal interruption - needs to happen * with IRQs enabled. */ - service_subclass_irq_unregister(); + irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); spin_lock_irqsave(&sclp_lock, flags); - del_timer(&sclp_request_timer); - if (sclp_init_req.status == SCLP_REQ_DONE && - sccb->header.response_code == 0x20) { - rc = 0; - break; - } else - rc = -EBUSY; + timer_delete(&sclp_request_timer); + rc = -EBUSY; + if (sclp_init_req.status == SCLP_REQ_DONE) { + if (sccb->header.response_code == 0x20) { + rc = 0; + break; + } else if (sccb->header.response_code == 0x74f0) { + if (!sclp_mask_compat_mode) { + sclp_mask_compat_mode = true; + retry = 0; + } + } + } } - unregister_external_interrupt(0x2401, sclp_check_handler); + unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -940,135 +1202,38 @@ sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) } static struct notifier_block sclp_reboot_notifier = { - .notifier_call = sclp_reboot_event + .notifier_call = sclp_reboot_event, + .priority = INT_MIN, }; -/* - * Suspend/resume SCLP notifier implementation - */ - -static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) +static ssize_t con_pages_show(struct device_driver *dev, char *buf) { - struct sclp_register *reg; - unsigned long flags; - - if (!rollback) { - spin_lock_irqsave(&sclp_lock, flags); - list_for_each_entry(reg, &sclp_reg_list, list) - reg->pm_event_posted = 0; - spin_unlock_irqrestore(&sclp_lock, flags); - } - do { - spin_lock_irqsave(&sclp_lock, flags); - list_for_each_entry(reg, &sclp_reg_list, list) { - if (rollback && reg->pm_event_posted) - goto found; - if (!rollback && !reg->pm_event_posted) - goto found; - } - spin_unlock_irqrestore(&sclp_lock, flags); - return; -found: - spin_unlock_irqrestore(&sclp_lock, flags); - if (reg->pm_event_fn) - reg->pm_event_fn(reg, sclp_pm_event); - reg->pm_event_posted = rollback ? 0 : 1; - } while (1); + return sysfs_emit(buf, "%i\n", sclp_console_pages); } -/* - * Susend/resume callbacks for platform device - */ - -static int sclp_freeze(struct device *dev) -{ - unsigned long flags; - int rc; - - sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); - - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_suspended; - spin_unlock_irqrestore(&sclp_lock, flags); +static DRIVER_ATTR_RO(con_pages); - /* Init supend data */ - memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); - sclp_suspend_req.callback = sclp_suspend_req_cb; - sclp_suspend_req.status = SCLP_REQ_FILLED; - init_completion(&sclp_request_queue_flushed); - - rc = sclp_add_request(&sclp_suspend_req); - if (rc == 0) - wait_for_completion(&sclp_request_queue_flushed); - else if (rc != -ENODATA) - goto fail_thaw; - - rc = sclp_deactivate(); - if (rc) - goto fail_thaw; - return 0; - -fail_thaw: - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_running; - spin_unlock_irqrestore(&sclp_lock, flags); - sclp_pm_event(SCLP_PM_EVENT_THAW, 1); - return rc; -} - -static int sclp_undo_suspend(enum sclp_pm_event event) +static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count) { - unsigned long flags; int rc; - rc = sclp_reactivate(); - if (rc) - return rc; - - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_running; - spin_unlock_irqrestore(&sclp_lock, flags); - - sclp_pm_event(event, 0); - return 0; + rc = kstrtobool(buf, &sclp_console_drop); + return rc ?: count; } -static int sclp_thaw(struct device *dev) +static ssize_t con_drop_show(struct device_driver *dev, char *buf) { - return sclp_undo_suspend(SCLP_PM_EVENT_THAW); + return sysfs_emit(buf, "%i\n", sclp_console_drop); } -static int sclp_restore(struct device *dev) -{ - return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); -} - -static const struct dev_pm_ops sclp_pm_ops = { - .freeze = sclp_freeze, - .thaw = sclp_thaw, - .restore = sclp_restore, -}; +static DRIVER_ATTR_RW(con_drop); -static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf) +static ssize_t con_full_show(struct device_driver *dev, char *buf) { - return sprintf(buf, "%i\n", sclp_console_pages); + return sysfs_emit(buf, "%lu\n", sclp_console_full); } -static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL); - -static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf) -{ - return sprintf(buf, "%i\n", sclp_console_drop); -} - -static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL); - -static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf) -{ - return sprintf(buf, "%lu\n", sclp_console_full); -} - -static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL); +static DRIVER_ATTR_RO(con_full); static struct attribute *sclp_drv_attrs[] = { &driver_attr_con_pages.attr, @@ -1087,18 +1252,13 @@ static const struct attribute_group *sclp_drv_attr_groups[] = { static struct platform_driver sclp_pdrv = { .driver = { .name = "sclp", - .owner = THIS_MODULE, - .pm = &sclp_pm_ops, .groups = sclp_drv_attr_groups, }, }; -static struct platform_device *sclp_pdev; - /* Initialize SCLP driver. Return zero if driver is operational, non-zero * otherwise. */ -static int -sclp_init(void) +int sclp_init(void) { unsigned long flags; int rc = 0; @@ -1108,11 +1268,13 @@ sclp_init(void) if (sclp_init_state != sclp_init_state_uninitialized) goto fail_unlock; sclp_init_state = sclp_init_state_initializing; + sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); + sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); + BUG_ON(!sclp_read_sccb || !sclp_init_sccb); /* Set up variables */ - INIT_LIST_HEAD(&sclp_req_queue); - INIT_LIST_HEAD(&sclp_reg_list); list_add(&sclp_state_change_event.list, &sclp_reg_list); - init_timer(&sclp_request_timer); + timer_setup(&sclp_request_timer, NULL, 0); + timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0); /* Check interface */ spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_check_interface(); @@ -1124,68 +1286,32 @@ sclp_init(void) if (rc) goto fail_init_state_uninitialized; /* Register interrupt handler */ - rc = register_external_interrupt(0x2401, sclp_interrupt_handler); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); if (rc) goto fail_unregister_reboot_notifier; sclp_init_state = sclp_init_state_initialized; spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal external interruption - needs to happen with * IRQs enabled. */ - service_subclass_irq_register(); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); sclp_init_mask(1); return 0; fail_unregister_reboot_notifier: unregister_reboot_notifier(&sclp_reboot_notifier); fail_init_state_uninitialized: + list_del(&sclp_state_change_event.list); sclp_init_state = sclp_init_state_uninitialized; + free_page((unsigned long) sclp_read_sccb); + free_page((unsigned long) sclp_init_sccb); fail_unlock: spin_unlock_irqrestore(&sclp_lock, flags); return rc; } -/* - * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able - * to print the panic message. - */ -static int sclp_panic_notify(struct notifier_block *self, - unsigned long event, void *data) -{ - if (sclp_suspend_state == sclp_suspend_state_suspended) - sclp_undo_suspend(SCLP_PM_EVENT_THAW); - return NOTIFY_OK; -} - -static struct notifier_block sclp_on_panic_nb = { - .notifier_call = sclp_panic_notify, - .priority = SCLP_PANIC_PRIO, -}; - static __init int sclp_initcall(void) { - int rc; - - rc = platform_driver_register(&sclp_pdrv); - if (rc) - return rc; - - sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); - rc = PTR_RET(sclp_pdev); - if (rc) - goto fail_platform_driver_unregister; - - rc = atomic_notifier_chain_register(&panic_notifier_list, - &sclp_on_panic_nb); - if (rc) - goto fail_platform_device_unregister; - - return sclp_init(); - -fail_platform_device_unregister: - platform_device_unregister(sclp_pdev); -fail_platform_driver_unregister: - platform_driver_unregister(&sclp_pdrv); - return rc; + return platform_driver_register(&sclp_pdrv); } arch_initcall(sclp_initcall); |
