summaryrefslogtreecommitdiff
path: root/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
blob: cffe2d7229953d1b865c02411409c2f3ad8a3d68 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */

#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/string.h>

#include "adf_common_drv.h"
#include "adf_sysfs_ras_counters.h"

static ssize_t errors_correctable_show(struct device *dev,
				       struct device_attribute *dev_attr,
				       char *buf)
{
	struct adf_accel_dev *accel_dev;
	unsigned long counter;

	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
	if (!accel_dev)
		return -EINVAL;

	counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR);
	return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
}

static ssize_t errors_nonfatal_show(struct device *dev,
				    struct device_attribute *dev_attr,
				    char *buf)
{
	struct adf_accel_dev *accel_dev;
	unsigned long counter;

	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
	if (!accel_dev)
		return -EINVAL;

	counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR);
	return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
}

static ssize_t errors_fatal_show(struct device *dev,
				 struct device_attribute *dev_attr,
				 char *buf)
{
	struct adf_accel_dev *accel_dev;
	unsigned long counter;

	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
	if (!accel_dev)
		return -EINVAL;

	counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL);
	return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
}

static ssize_t reset_error_counters_store(struct device *dev,
					  struct device_attribute *dev_attr,
					  const char *buf, size_t count)
{
	struct adf_accel_dev *accel_dev;

	if (buf[0] != '1' || count != 2)
		return -EINVAL;

	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
	if (!accel_dev)
		return -EINVAL;

	ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);

	return count;
}

static DEVICE_ATTR_RO(errors_correctable);
static DEVICE_ATTR_RO(errors_nonfatal);
static DEVICE_ATTR_RO(errors_fatal);
static DEVICE_ATTR_WO(reset_error_counters);

static struct attribute *qat_ras_attrs[] = {
	&dev_attr_errors_correctable.attr,
	&dev_attr_errors_nonfatal.attr,
	&dev_attr_errors_fatal.attr,
	&dev_attr_reset_error_counters.attr,
	NULL,
};

static struct attribute_group qat_ras_group = {
	.attrs = qat_ras_attrs,
	.name = "qat_ras",
};

void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev)
{
	if (!accel_dev->ras_errors.enabled)
		return;

	ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);

	if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group))
		dev_err(&GET_DEV(accel_dev),
			"Failed to create qat_ras attribute group.\n");
}

void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev)
{
	if (!accel_dev->ras_errors.enabled)
		return;

	device_remove_group(&GET_DEV(accel_dev), &qat_ras_group);

	ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);
}