1
0
Fork 0
alistair23-linux/drivers/edac/edac_mc_sysfs.c

1064 lines
27 KiB
C
Raw Normal View History

/*
* edac_mc kernel module
* (C) 2005-2007 Linux Networx (http://lnxi.com)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
*
*/
#include <linux/ctype.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 02:04:11 -06:00
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/bug.h>
#include "edac_core.h"
#include "edac_module.h"
/* MC EDAC Controls, setable by module parameter, and sysfs */
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
static int edac_mc_panic_on_ue;
static int edac_mc_poll_msec = 1000;
/* Getter functions for above */
int edac_mc_get_log_ue(void)
{
return edac_mc_log_ue;
}
int edac_mc_get_log_ce(void)
{
return edac_mc_log_ce;
}
int edac_mc_get_panic_on_ue(void)
{
return edac_mc_panic_on_ue;
}
/* this is temporary */
int edac_mc_get_poll_msec(void)
{
return edac_mc_poll_msec;
}
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
long l;
int ret;
if (!val)
return -EINVAL;
ret = strict_strtol(val, 0, &l);
if (ret == -EINVAL || ((int)l != l))
return -EINVAL;
*((int *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
return 0;
}
/* Parameter declarations for above */
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
"Log uncorrectable error to console: 0=off 1=on");
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
"Log correctable error to console: 0=off 1=on");
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
/*
* various constants for Memory Controllers
*/
static const char *mem_types[] = {
[MEM_EMPTY] = "Empty",
[MEM_RESERVED] = "Reserved",
[MEM_UNKNOWN] = "Unknown",
[MEM_FPM] = "FPM",
[MEM_EDO] = "EDO",
[MEM_BEDO] = "BEDO",
[MEM_SDR] = "Unbuffered-SDR",
[MEM_RDR] = "Registered-SDR",
[MEM_DDR] = "Unbuffered-DDR",
[MEM_RDDR] = "Registered-DDR",
[MEM_RMBS] = "RMBS",
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
[MEM_XDR] = "XDR",
[MEM_DDR3] = "Unbuffered-DDR3",
[MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
[DEV_UNKNOWN] = "Unknown",
[DEV_X1] = "x1",
[DEV_X2] = "x2",
[DEV_X4] = "x4",
[DEV_X8] = "x8",
[DEV_X16] = "x16",
[DEV_X32] = "x32",
[DEV_X64] = "x64"
};
static const char *edac_caps[] = {
[EDAC_UNKNOWN] = "Unknown",
[EDAC_NONE] = "None",
[EDAC_RESERVED] = "Reserved",
[EDAC_PARITY] = "PARITY",
[EDAC_EC] = "EC",
[EDAC_SECDED] = "SECDED",
[EDAC_S2ECD2ED] = "S2ECD2ED",
[EDAC_S4ECD4ED] = "S4ECD4ED",
[EDAC_S8ECD8ED] = "S8ECD8ED",
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
/* EDAC sysfs CSROW data structures and methods
*/
/* Set of more default csrow<id> attribute show/store functions */
static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
int private)
{
return sprintf(data, "%u\n", csrow->ue_count);
}
static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
int private)
{
return sprintf(data, "%u\n", csrow->ce_count);
}
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
int private)
{
return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
int private)
{
edac: move dimm properties to struct dimm_info On systems based on chip select rows, all channels need to use memories with the same properties, otherwise the memories on channels A and B won't be recognized. However, such assumption is not true for all types of memory controllers. Controllers for FB-DIMM's don't have such requirements. Also, modern Intel controllers seem to be capable of handling such differences. So, we need to get rid of storing the DIMM information into a per-csrow data, storing it, instead at the right place. The first step is to move grain, mtype, dtype and edac_mode to the per-dimm struct. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Borislav Petkov <borislav.petkov@amd.com> Cc: Mark Gross <mark.gross@intel.com> Cc: Jason Uhlenkott <juhlenko@akamai.com> Cc: Tim Small <tim@buttersideup.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: Olof Johansson <olof@lixom.net> Cc: Egor Martovetsky <egor@pasemi.com> Cc: Michal Marek <mmarek@suse.cz> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Joe Perches <joe@perches.com> Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Hitoshi Mitake <h.mitake@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: James Bottomley <James.Bottomley@parallels.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Cc: Shaohui Xie <Shaohui.Xie@freescale.com> Cc: Josh Boyer <jwboyer@gmail.com> Cc: Mike Williams <mike@mikebwilliams.com> Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 14:38:08 -07:00
return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
int private)
{
edac: move dimm properties to struct dimm_info On systems based on chip select rows, all channels need to use memories with the same properties, otherwise the memories on channels A and B won't be recognized. However, such assumption is not true for all types of memory controllers. Controllers for FB-DIMM's don't have such requirements. Also, modern Intel controllers seem to be capable of handling such differences. So, we need to get rid of storing the DIMM information into a per-csrow data, storing it, instead at the right place. The first step is to move grain, mtype, dtype and edac_mode to the per-dimm struct. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Borislav Petkov <borislav.petkov@amd.com> Cc: Mark Gross <mark.gross@intel.com> Cc: Jason Uhlenkott <juhlenko@akamai.com> Cc: Tim Small <tim@buttersideup.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: Olof Johansson <olof@lixom.net> Cc: Egor Martovetsky <egor@pasemi.com> Cc: Michal Marek <mmarek@suse.cz> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Joe Perches <joe@perches.com> Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Hitoshi Mitake <h.mitake@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: James Bottomley <James.Bottomley@parallels.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Cc: Shaohui Xie <Shaohui.Xie@freescale.com> Cc: Josh Boyer <jwboyer@gmail.com> Cc: Mike Williams <mike@mikebwilliams.com> Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 14:38:08 -07:00
return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
int private)
{
edac: move dimm properties to struct dimm_info On systems based on chip select rows, all channels need to use memories with the same properties, otherwise the memories on channels A and B won't be recognized. However, such assumption is not true for all types of memory controllers. Controllers for FB-DIMM's don't have such requirements. Also, modern Intel controllers seem to be capable of handling such differences. So, we need to get rid of storing the DIMM information into a per-csrow data, storing it, instead at the right place. The first step is to move grain, mtype, dtype and edac_mode to the per-dimm struct. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Borislav Petkov <borislav.petkov@amd.com> Cc: Mark Gross <mark.gross@intel.com> Cc: Jason Uhlenkott <juhlenko@akamai.com> Cc: Tim Small <tim@buttersideup.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: Olof Johansson <olof@lixom.net> Cc: Egor Martovetsky <egor@pasemi.com> Cc: Michal Marek <mmarek@suse.cz> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Joe Perches <joe@perches.com> Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Hitoshi Mitake <h.mitake@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: James Bottomley <James.Bottomley@parallels.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Cc: Shaohui Xie <Shaohui.Xie@freescale.com> Cc: Josh Boyer <jwboyer@gmail.com> Cc: Mike Williams <mike@mikebwilliams.com> Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 14:38:08 -07:00
return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
char *data, int channel)
{
/* if field has not been initialized, there is nothing to send */
edac: Create a dimm struct and move the labels into it The way a DIMM is currently represented implies that they're linked into a per-csrow struct. However, some drivers don't see csrows, as they're ridden behind some chip like the AMB's on FBDIMM's, for example. This forced drivers to fake^Wvirtualize a csrow struct, and to create a mess under csrow/channel original's concept. Move the DIMM labels into a per-DIMM struct, and add there the real location of the socket, in terms of csrow/channel. Latter patches will modify the location to properly represent the memory architecture. All other drivers will use a per-csrow type of location. Some of those drivers will require a latter conversion, as they also fake the csrows internally. TODO: While this patch doesn't change the existing behavior, on csrows-based memory controllers, a csrow/channel pair points to a memory rank. There's a known bug at the EDAC core that allows having different labels for the same DIMM, if it has more than one rank. A latter patch is need to merge the several ranks for a DIMM into the same dimm_info struct, in order to avoid having different labels for the same DIMM. The edac_mc_alloc() will now contain a per-dimm initialization loop that will be changed by latter patches in order to match other types of memory architectures. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 10:12:32 -07:00
if (!csrow->channels[channel].dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
edac: Create a dimm struct and move the labels into it The way a DIMM is currently represented implies that they're linked into a per-csrow struct. However, some drivers don't see csrows, as they're ridden behind some chip like the AMB's on FBDIMM's, for example. This forced drivers to fake^Wvirtualize a csrow struct, and to create a mess under csrow/channel original's concept. Move the DIMM labels into a per-DIMM struct, and add there the real location of the socket, in terms of csrow/channel. Latter patches will modify the location to properly represent the memory architecture. All other drivers will use a per-csrow type of location. Some of those drivers will require a latter conversion, as they also fake the csrows internally. TODO: While this patch doesn't change the existing behavior, on csrows-based memory controllers, a csrow/channel pair points to a memory rank. There's a known bug at the EDAC core that allows having different labels for the same DIMM, if it has more than one rank. A latter patch is need to merge the several ranks for a DIMM into the same dimm_info struct, in order to avoid having different labels for the same DIMM. The edac_mc_alloc() will now contain a per-dimm initialization loop that will be changed by latter patches in order to match other types of memory architectures. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 10:12:32 -07:00
csrow->channels[channel].dimm->label);
}
static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
const char *data,
size_t count, int channel)
{
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
edac: Create a dimm struct and move the labels into it The way a DIMM is currently represented implies that they're linked into a per-csrow struct. However, some drivers don't see csrows, as they're ridden behind some chip like the AMB's on FBDIMM's, for example. This forced drivers to fake^Wvirtualize a csrow struct, and to create a mess under csrow/channel original's concept. Move the DIMM labels into a per-DIMM struct, and add there the real location of the socket, in terms of csrow/channel. Latter patches will modify the location to properly represent the memory architecture. All other drivers will use a per-csrow type of location. Some of those drivers will require a latter conversion, as they also fake the csrows internally. TODO: While this patch doesn't change the existing behavior, on csrows-based memory controllers, a csrow/channel pair points to a memory rank. There's a known bug at the EDAC core that allows having different labels for the same DIMM, if it has more than one rank. A latter patch is need to merge the several ranks for a DIMM into the same dimm_info struct, in order to avoid having different labels for the same DIMM. The edac_mc_alloc() will now contain a per-dimm initialization loop that will be changed by latter patches in order to match other types of memory architectures. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 10:12:32 -07:00
strncpy(csrow->channels[channel].dimm->label, data, max_size);
csrow->channels[channel].dimm->label[max_size] = '\0';
return max_size;
}
/* show function for dynamic chX_ce_count attribute */
static ssize_t channel_ce_count_show(struct csrow_info *csrow,
char *data, int channel)
{
return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
}
/* csrow specific attribute structure */
struct csrowdev_attribute {
struct attribute attr;
ssize_t(*show) (struct csrow_info *, char *, int);
ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
int private;
};
#define to_csrow(k) container_of(k, struct csrow_info, kobj)
#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
/* Set of show/store higher level functions for default csrow attributes */
static ssize_t csrowdev_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->show)
return csrowdev_attr->show(csrow,
buffer, csrowdev_attr->private);
return -EIO;
}
static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t count)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->store)
return csrowdev_attr->store(csrow,
buffer,
count, csrowdev_attr->private);
return -EIO;
}
static const struct sysfs_ops csrowfs_ops = {
.show = csrowdev_show,
.store = csrowdev_store
};
#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
static struct csrowdev_attribute attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
.private = _private, \
};
/* default cwrow<id>/attribute files */
CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
/* default attributes of the CSROW<id> object */
static struct csrowdev_attribute *default_csrow_attr[] = {
&attr_dev_type,
&attr_mem_type,
&attr_edac_mode,
&attr_size_mb,
&attr_ue_count,
&attr_ce_count,
NULL,
};
/* possible dynamic channel DIMM Label attribute files */
CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 0);
CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 1);
CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 2);
CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 3);
CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 4);
CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 5);
/* Total possible dynamic DIMM Label attribute file table */
static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
&attr_ch0_dimm_label,
&attr_ch1_dimm_label,
&attr_ch2_dimm_label,
&attr_ch3_dimm_label,
&attr_ch4_dimm_label,
&attr_ch5_dimm_label
};
/* possible dynamic channel ce_count attribute files */
CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
/* Total possible dynamic ce_count attribute file table */
static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
&attr_ch0_ce_count,
&attr_ch1_ce_count,
&attr_ch2_ce_count,
&attr_ch3_ce_count,
&attr_ch4_ce_count,
&attr_ch5_ce_count
};
#define EDAC_NR_CHANNELS 6
/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
static int edac_create_channel_files(struct kobject *kobj, int chan)
{
int err = -ENODEV;
if (chan >= EDAC_NR_CHANNELS)
return err;
/* create the DIMM label attribute file */
err = sysfs_create_file(kobj,
(struct attribute *)
dynamic_csrow_dimm_attr[chan]);
if (!err) {
/* create the CE Count attribute file */
err = sysfs_create_file(kobj,
(struct attribute *)
dynamic_csrow_ce_count_attr[chan]);
} else {
debugf1("%s() dimm labels and ce_count files created",
__func__);
}
return err;
}
/* No memory to release for this kobj */
static void edac_csrow_instance_release(struct kobject *kobj)
{
struct mem_ctl_info *mci;
struct csrow_info *cs;
debugf1("%s()\n", __func__);
cs = container_of(kobj, struct csrow_info, kobj);
mci = cs->mci;
kobject_put(&mci->edac_mci_kobj);
}
/* the kobj_type instance for a CSROW */
static struct kobj_type ktype_csrow = {
.release = edac_csrow_instance_release,
.sysfs_ops = &csrowfs_ops,
.default_attrs = (struct attribute **)default_csrow_attr,
};
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
struct kobject *kobj_mci = &mci->edac_mci_kobj;
struct kobject *kobj;
int chan;
int err;
/* generate ..../edac/mc/mc<id>/csrow<index> */
memset(&csrow->kobj, 0, sizeof(csrow->kobj));
csrow->mci = mci; /* include container up link */
/* bump the mci instance's kobject's ref count */
kobj = kobject_get(&mci->edac_mci_kobj);
if (!kobj) {
err = -ENODEV;
goto err_out;
}
/* Instanstiate the csrow object */
err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
"csrow%d", index);
if (err)
goto err_release_top_kobj;
/* At this point, to release a csrow kobj, one must
* call the kobject_put and allow that tear down
* to work the releasing
*/
/* Create the dyanmic attribute files on this csrow,
* namely, the DIMM labels and the channel ce_count
*/
for (chan = 0; chan < csrow->nr_channels; chan++) {
err = edac_create_channel_files(&csrow->kobj, chan);
if (err) {
/* special case the unregister here */
kobject_put(&csrow->kobj);
goto err_out;
}
}
kobject_uevent(&csrow->kobj, KOBJ_ADD);
return 0;
/* error unwind stack */
err_release_top_kobj:
kobject_put(&mci->edac_mci_kobj);
err_out:
return err;
}
/* default sysfs methods and data structures for the main MCI kobject */
static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
int row, chan;
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
mci->ue_count = 0;
mci->ce_count = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
ri->ue_count = 0;
ri->ce_count = 0;
for (chan = 0; chan < ri->nr_channels; chan++)
ri->channels[chan].ce_count = 0;
}
mci->start_time = jiffies;
return count;
}
/* Memory scrubbing interface:
*
* A MC driver can limit the scrubbing bandwidth based on the CPU type.
* Therefore, ->set_sdram_scrub_rate should be made to return the actual
* bandwidth that is accepted or 0 when scrubbing is to be disabled.
*
* Negative value still means that an error has occurred while setting
* the scrub rate.
*/
static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
unsigned long bandwidth = 0;
int new_bw = 0;
if (!mci->set_sdram_scrub_rate)
return -ENODEV;
if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL;
new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
if (new_bw < 0) {
edac_printk(KERN_WARNING, EDAC_MC,
"Error setting scrub rate to: %lu\n", bandwidth);
return -EINVAL;
}
return count;
}
/*
* ->get_sdram_scrub_rate() return value semantics same as above.
*/
static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
{
int bandwidth = 0;
if (!mci->get_sdram_scrub_rate)
return -ENODEV;
bandwidth = mci->get_sdram_scrub_rate(mci);
if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
return bandwidth;
}
return sprintf(data, "%d\n", bandwidth);
}
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%d\n", mci->ue_count);
}
static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%d\n", mci->ce_count);
}
static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%d\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%d\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
}
static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
{
return sprintf(data, "%s\n", mci->ctl_name);
}
static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
{
int total_pages, csrow_idx;
for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
if (!csrow->nr_pages)
continue;
total_pages += csrow->nr_pages;
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
}
#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
/* MCI show/store functions for top most object */
static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
return -EIO;
}
static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t count)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
return -EIO;
}
/* Intermediate show/store table */
static const struct sysfs_ops mci_ops = {
.show = mcidev_show,
.store = mcidev_store
};
#define MCIDEV_ATTR(_name,_mode,_show,_store) \
static struct mcidev_sysfs_attribute mci_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
/* default Control file */
MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
/* default Attribute files */
MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
/* memory scrubber attribute file */
MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
mci_sdram_scrub_rate_store);
static struct mcidev_sysfs_attribute *mci_attr[] = {
&mci_attr_reset_counters,
&mci_attr_mc_name,
&mci_attr_size_mb,
&mci_attr_seconds_since_reset,
&mci_attr_ue_noinfo_count,
&mci_attr_ce_noinfo_count,
&mci_attr_ue_count,
&mci_attr_ce_count,
&mci_attr_sdram_scrub_rate,
NULL
};
/*
* Release of a MC controlling instance
*
* each MC control instance has the following resources upon entry:
* a) a ref count on the top memctl kobj
* b) a ref count on this module
*
* this function must decrement those ref counts and then
* issue a free on the instance's memory
*/
static void edac_mci_control_release(struct kobject *kobj)
{
struct mem_ctl_info *mci;
mci = to_mci(kobj);
debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
/* decrement the module ref count */
module_put(mci->owner);
}
static struct kobj_type ktype_mci = {
.release = edac_mci_control_release,
.sysfs_ops = &mci_ops,
.default_attrs = (struct attribute **)mci_attr,
};
/* EDAC memory controller sysfs kset:
* /sys/devices/system/edac/mc
*/
static struct kset *mc_kset;
/*
* edac_mc_register_sysfs_main_kobj
*
* setups and registers the main kobject for each mci
*/
int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
{
struct kobject *kobj_mci;
int err;
debugf1("%s()\n", __func__);
kobj_mci = &mci->edac_mci_kobj;
/* Init the mci's kobject */
memset(kobj_mci, 0, sizeof(*kobj_mci));
/* Record which module 'owns' this control structure
* and bump the ref count of the module
*/
mci->owner = THIS_MODULE;
/* bump ref count on this module */
if (!try_module_get(mci->owner)) {
err = -ENODEV;
goto fail_out;
}
/* this instance become part of the mc_kset */
kobj_mci->kset = mc_kset;
/* register the mc<id> kobject to the mc_kset */
err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
"mc%d", mci->mc_idx);
if (err) {
debugf1("%s()Failed to register '.../edac/mc%d'\n",
__func__, mci->mc_idx);
goto kobj_reg_fail;
}
kobject_uevent(kobj_mci, KOBJ_ADD);
/* At this point, to 'free' the control struct,
* edac_mc_unregister_sysfs_main_kobj() must be used
*/
debugf1("%s() Registered '.../edac/mc%d' kobject\n",
__func__, mci->mc_idx);
return 0;
/* Error exit stack */
kobj_reg_fail:
module_put(mci->owner);
fail_out:
return err;
}
/*
* edac_mc_register_sysfs_main_kobj
*
* tears down and the main mci kobject from the mc_kset
*/
void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
{
debugf1("%s()\n", __func__);
/* delete the kobj from the mc_kset */
kobject_put(&mci->edac_mci_kobj);
}
#define EDAC_DEVICE_SYMLINK "device"
#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
/* MCI show/store functions for top most object */
static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
return -EIO;
}
static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t count)
{
struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
return -EIO;
}
/* No memory to release for this kobj */
static void edac_inst_grp_release(struct kobject *kobj)
{
struct mcidev_sysfs_group_kobj *grp;
struct mem_ctl_info *mci;
debugf1("%s()\n", __func__);
grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
mci = grp->mci;
}
/* Intermediate show/store table */
static struct sysfs_ops inst_grp_ops = {
.show = inst_grp_show,
.store = inst_grp_store
};
/* the kobj_type instance for a instance group */
static struct kobj_type ktype_inst_grp = {
.release = edac_inst_grp_release,
.sysfs_ops = &inst_grp_ops,
};
/*
* edac_create_mci_instance_attributes
* create MC driver specific attributes bellow an specified kobj
* This routine calls itself recursively, in order to create an entire
* object tree.
*/
static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj)
{
int err;
debugf4("%s()\n", __func__);
while (sysfs_attrib) {
debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
if (!grp_kobj)
return -ENOMEM;
grp_kobj->grp = sysfs_attrib->grp;
grp_kobj->mci = mci;
list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
debugf0("%s() grp %s, mci %p\n", __func__,
sysfs_attrib->grp->name, mci);
err = kobject_init_and_add(&grp_kobj->kobj,
&ktype_inst_grp,
&mci->edac_mci_kobj,
sysfs_attrib->grp->name);
if (err < 0) {
printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
return err;
}
err = edac_create_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj);
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
if (err < 0) {
printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
return err;
}
} else
break;
sysfs_attrib++;
}
return 0;
}
/*
* edac_remove_mci_instance_attributes
* remove MC driver specific attributes at the topmost level
* directory of this mci instance.
*/
static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj, int count)
{
struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
debugf1("%s()\n", __func__);
/*
* loop if there are attributes and until we hit a NULL entry
* Remove first all the attributes
*/
while (sysfs_attrib) {
debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
break;
sysfs_attrib++;
}
/* Remove the group objects */
if (count)
return;
list_for_each_entry_safe(grp_kobj, tmp,
&mci->grp_kobj_list, list) {
list_del(&grp_kobj->list);
kfree(grp_kobj);
}
}
/*
* Create a new Memory Controller kobject instance,
* mc<id> under the 'mc' directory
*
* Return:
* 0 Success
* !0 Failure
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
int i;
int err;
struct csrow_info *csrow;
struct kobject *kobj_mci = &mci->edac_mci_kobj;
debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
INIT_LIST_HEAD(&mci->grp_kobj_list);
/* create a symlink for the device */
err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
EDAC_DEVICE_SYMLINK);
if (err) {
debugf1("%s() failure to create symlink\n", __func__);
goto fail0;
}
/* If the low level driver desires some attributes,
* then create them now for the driver.
*/
if (mci->mc_driver_sysfs_attributes) {
err = edac_create_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj);
if (err) {
debugf1("%s() failure to create mci attributes\n",
__func__);
goto fail0;
}
}
/* Make directories for each CSROW object under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
csrow = &mci->csrows[i];
/* Only expose populated CSROWs */
if (csrow->nr_pages > 0) {
err = edac_create_csrow_object(mci, csrow, i);
if (err) {
debugf1("%s() failure: create csrow %d obj\n",
__func__, i);
goto fail1;
}
}
}
return 0;
/* CSROW error: backout what has already been registered, */
fail1:
for (i--; i >= 0; i--) {
edac: Create a dimm struct and move the labels into it The way a DIMM is currently represented implies that they're linked into a per-csrow struct. However, some drivers don't see csrows, as they're ridden behind some chip like the AMB's on FBDIMM's, for example. This forced drivers to fake^Wvirtualize a csrow struct, and to create a mess under csrow/channel original's concept. Move the DIMM labels into a per-DIMM struct, and add there the real location of the socket, in terms of csrow/channel. Latter patches will modify the location to properly represent the memory architecture. All other drivers will use a per-csrow type of location. Some of those drivers will require a latter conversion, as they also fake the csrows internally. TODO: While this patch doesn't change the existing behavior, on csrows-based memory controllers, a csrow/channel pair points to a memory rank. There's a known bug at the EDAC core that allows having different labels for the same DIMM, if it has more than one rank. A latter patch is need to merge the several ranks for a DIMM into the same dimm_info struct, in order to avoid having different labels for the same DIMM. The edac_mc_alloc() will now contain a per-dimm initialization loop that will be changed by latter patches in order to match other types of memory architectures. Reviewed-by: Aristeu Rozanski <arozansk@redhat.com> Reviewed-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Doug Thompson <norsk5@yahoo.com> Cc: Ranganathan Desikan <ravi@jetztechnologies.com> Cc: "Arvind R." <arvino55@gmail.com> Cc: "Niklas Söderlund" <niklas.soderlund@ericsson.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
2012-01-27 10:12:32 -07:00
if (mci->csrows[i].nr_pages > 0)
kobject_put(&mci->csrows[i].kobj);
}
/* remove the mci instance's attributes, if any */
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
/* remove the symlink */
sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
fail0:
return err;
}
/*
* remove a Memory Controller instance
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
int i;
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
kobject_put(&mci->csrows[i].kobj);
}
}
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
/*
* edac_setup_sysfs_mc_kset(void)
*
* Initialize the mc_kset for the 'mc' entry
* This requires creating the top 'mc' directory with a kset
* and its controls/attributes.
*
* To this 'mc' kset, instance 'mci' will be grouped as children.
*
* Return: 0 SUCCESS
* !0 FAILURE error code
*/
int edac_sysfs_setup_mc_kset(void)
{
int err = -EINVAL;
struct bus_type *edac_subsys;
debugf1("%s()\n", __func__);
/* get the /sys/devices/system/edac subsys reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
debugf1("%s() no edac_subsys error=%d\n", __func__, err);
goto fail_out;
}
/* Init the MC's kobject */
mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
if (!mc_kset) {
err = -ENOMEM;
debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
goto fail_kset;
}
debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
return 0;
fail_kset:
edac_put_sysfs_subsys();
fail_out:
return err;
}
/*
* edac_sysfs_teardown_mc_kset
*
* deconstruct the mc_ket for memory controllers
*/
void edac_sysfs_teardown_mc_kset(void)
{
kset_unregister(mc_kset);
edac_put_sysfs_subsys();
}