summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/eeh_event.c
blob: c23a454af08a8593d4bcd0101e4d9b1f3c25db45 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *
 * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
 */

#include <linux/delay.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>

/** Overview:
 *  EEH error states may be detected within exception handlers;
 *  however, the recovery processing needs to occur asynchronously
 *  in a normal kernel context and not an interrupt context.
 *  This pair of routines creates an event and queues it onto a
 *  work-queue, where a worker thread can drive recovery.
 */

static DEFINE_SPINLOCK(eeh_eventlist_lock);
static DECLARE_COMPLETION(eeh_eventlist_event);
static LIST_HEAD(eeh_eventlist);

/**
 * eeh_event_handler - Dispatch EEH events.
 * @dummy - unused
 *
 * The detection of a frozen slot can occur inside an interrupt,
 * where it can be hard to do anything about it.  The goal of this
 * routine is to pull these detection events out of the context
 * of the interrupt handler, and re-dispatch them for processing
 * at a later time in a normal context.
 */
static int eeh_event_handler(void * dummy)
{
	unsigned long flags;
	struct eeh_event *event;

	while (!kthread_should_stop()) {
		if (wait_for_completion_interruptible(&eeh_eventlist_event))
			break;

		/* Fetch EEH event from the queue */
		spin_lock_irqsave(&eeh_eventlist_lock, flags);
		event = NULL;
		if (!list_empty(&eeh_eventlist)) {
			event = list_entry(eeh_eventlist.next,
					   struct eeh_event, list);
			list_del(&event->list);
		}
		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
		if (!event)
			continue;

		/* We might have event without binding PE */
		if (event->pe)
			eeh_handle_normal_event(event->pe);
		else
			eeh_handle_special_event();

		kfree(event);
	}

	return 0;
}

/**
 * eeh_event_init - Start kernel thread to handle EEH events
 *
 * This routine is called to start the kernel thread for processing
 * EEH event.
 */
int eeh_event_init(void)
{
	struct task_struct *t;
	int ret = 0;

	t = kthread_run(eeh_event_handler, NULL, "eehd");
	if (IS_ERR(t)) {
		ret = PTR_ERR(t);
		pr_err("%s: Failed to start EEH daemon (%d)\n",
			__func__, ret);
		return ret;
	}

	return 0;
}

/**
 * eeh_send_failure_event - Generate a PCI error event
 * @pe: EEH PE
 *
 * This routine can be called within an interrupt context;
 * the actual event will be delivered in a normal context
 * (from a workqueue).
 */
int __eeh_send_failure_event(struct eeh_pe *pe)
{
	unsigned long flags;
	struct eeh_event *event;

	event = kzalloc(sizeof(*event), GFP_ATOMIC);
	if (!event) {
		pr_err("EEH: out of memory, event not handled\n");
		return -ENOMEM;
	}
	event->pe = pe;

	/*
	 * Mark the PE as recovering before inserting it in the queue.
	 * This prevents the PE from being free()ed by a hotplug driver
	 * while the PE is sitting in the event queue.
	 */
	if (pe) {
#ifdef CONFIG_STACKTRACE
		/*
		 * Save the current stack trace so we can dump it from the
		 * event handler thread.
		 */
		pe->trace_entries = stack_trace_save(pe->stack_trace,
					 ARRAY_SIZE(pe->stack_trace), 0);
#endif /* CONFIG_STACKTRACE */

		eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
	}

	/* We may or may not be called in an interrupt context */
	spin_lock_irqsave(&eeh_eventlist_lock, flags);
	list_add(&event->list, &eeh_eventlist);
	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);

	/* For EEH deamon to knick in */
	complete(&eeh_eventlist_event);

	return 0;
}

int eeh_send_failure_event(struct eeh_pe *pe)
{
	/*
	 * If we've manually suppressed recovery events via debugfs
	 * then just drop it on the floor.
	 */
	if (eeh_debugfs_no_recover) {
		pr_err("EEH: Event dropped due to no_recover setting\n");
		return 0;
	}

	return __eeh_send_failure_event(pe);
}

/**
 * eeh_remove_event - Remove EEH event from the queue
 * @pe: Event binding to the PE
 * @force: Event will be removed unconditionally
 *
 * On PowerNV platform, we might have subsequent coming events
 * is part of the former one. For that case, those subsequent
 * coming events are totally duplicated and unnecessary, thus
 * they should be removed.
 */
void eeh_remove_event(struct eeh_pe *pe, bool force)
{
	unsigned long flags;
	struct eeh_event *event, *tmp;

	/*
	 * If we have NULL PE passed in, we have dead IOC
	 * or we're sure we can report all existing errors
	 * by the caller.
	 *
	 * With "force", the event with associated PE that
	 * have been isolated, the event won't be removed
	 * to avoid event lost.
	 */
	spin_lock_irqsave(&eeh_eventlist_lock, flags);
	list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
		if (!force && event->pe &&
		    (event->pe->state & EEH_PE_ISOLATED))
			continue;

		if (!pe) {
			list_del(&event->list);
			kfree(event);
		} else if (pe->type & EEH_PE_PHB) {
			if (event->pe && event->pe->phb == pe->phb) {
				list_del(&event->list);
				kfree(event);
			}
		} else if (event->pe == pe) {
			list_del(&event->list);
			kfree(event);
		}
	}
	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
}