2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
40 #define edac_atomic_scrub(va, size) do { } while (0)
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
46 static int edac_report = EDAC_REPORTING_ENABLED;
48 /* lock to memory controller's control array */
49 static DEFINE_MUTEX(mem_ctls_mutex);
50 static LIST_HEAD(mc_devices);
53 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
54 * apei/ghes and i7core_edac to be used at the same time.
56 static const char *edac_mc_owner;
58 int edac_get_report_status(void)
62 EXPORT_SYMBOL_GPL(edac_get_report_status);
64 void edac_set_report_status(int new)
66 if (new == EDAC_REPORTING_ENABLED ||
67 new == EDAC_REPORTING_DISABLED ||
68 new == EDAC_REPORTING_FORCE)
71 EXPORT_SYMBOL_GPL(edac_set_report_status);
73 static int edac_report_set(const char *str, const struct kernel_param *kp)
78 if (!strncmp(str, "on", 2))
79 edac_report = EDAC_REPORTING_ENABLED;
80 else if (!strncmp(str, "off", 3))
81 edac_report = EDAC_REPORTING_DISABLED;
82 else if (!strncmp(str, "force", 5))
83 edac_report = EDAC_REPORTING_FORCE;
88 static int edac_report_get(char *buffer, const struct kernel_param *kp)
92 switch (edac_report) {
93 case EDAC_REPORTING_ENABLED:
94 ret = sprintf(buffer, "on");
96 case EDAC_REPORTING_DISABLED:
97 ret = sprintf(buffer, "off");
99 case EDAC_REPORTING_FORCE:
100 ret = sprintf(buffer, "force");
110 static const struct kernel_param_ops edac_report_ops = {
111 .set = edac_report_set,
112 .get = edac_report_get,
115 module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
117 unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
120 struct mem_ctl_info *mci = dimm->mci;
124 for (i = 0; i < mci->n_layers; i++) {
125 n = snprintf(p, len, "%s %d ",
126 edac_layer_name[mci->layers[i].type],
138 #ifdef CONFIG_EDAC_DEBUG
140 static void edac_mc_dump_channel(struct rank_info *chan)
142 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
143 edac_dbg(4, " channel = %p\n", chan);
144 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
145 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
148 static void edac_mc_dump_dimm(struct dimm_info *dimm)
155 edac_dimm_info_location(dimm, location, sizeof(location));
157 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
158 dimm->mci->csbased ? "rank" : "dimm",
159 dimm->idx, location, dimm->csrow, dimm->cschannel);
160 edac_dbg(4, " dimm = %p\n", dimm);
161 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
162 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
163 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
164 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
167 static void edac_mc_dump_csrow(struct csrow_info *csrow)
169 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
170 edac_dbg(4, " csrow = %p\n", csrow);
171 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
172 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
173 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
174 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
175 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
176 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
179 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
181 edac_dbg(3, "\tmci = %p\n", mci);
182 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
183 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
184 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
185 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
186 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
187 mci->nr_csrows, mci->csrows);
188 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
189 mci->tot_dimms, mci->dimms);
190 edac_dbg(3, "\tdev = %p\n", mci->pdev);
191 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
192 mci->mod_name, mci->ctl_name);
193 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
196 #endif /* CONFIG_EDAC_DEBUG */
198 const char * const edac_mem_types[] = {
199 [MEM_EMPTY] = "Empty",
200 [MEM_RESERVED] = "Reserved",
201 [MEM_UNKNOWN] = "Unknown",
205 [MEM_SDR] = "Unbuffered-SDR",
206 [MEM_RDR] = "Registered-SDR",
207 [MEM_DDR] = "Unbuffered-DDR",
208 [MEM_RDDR] = "Registered-DDR",
210 [MEM_DDR2] = "Unbuffered-DDR2",
211 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
212 [MEM_RDDR2] = "Registered-DDR2",
214 [MEM_DDR3] = "Unbuffered-DDR3",
215 [MEM_RDDR3] = "Registered-DDR3",
216 [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
217 [MEM_DDR4] = "Unbuffered-DDR4",
218 [MEM_RDDR4] = "Registered-DDR4",
219 [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
220 [MEM_NVDIMM] = "Non-volatile-RAM",
222 EXPORT_SYMBOL_GPL(edac_mem_types);
225 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
226 * @p: pointer to a pointer with the memory offset to be used. At
227 * return, this will be incremented to point to the next offset
228 * @size: Size of the data structure to be reserved
229 * @n_elems: Number of elements that should be reserved
231 * If 'size' is a constant, the compiler will optimize this whole function
232 * down to either a no-op or the addition of a constant to the value of '*p'.
234 * The 'p' pointer is absolutely needed to keep the proper advancing
235 * further in memory to the proper offsets when allocating the struct along
236 * with its embedded structs, as edac_device_alloc_ctl_info() does it
237 * above, for example.
239 * At return, the pointer 'p' will be incremented to be used on a next call
242 void *edac_align_ptr(void **p, unsigned int size, int n_elems)
244 unsigned int align, r;
247 *p += size * n_elems;
250 * 'p' can possibly be an unaligned item X such that sizeof(X) is
251 * 'size'. Adjust 'p' so that its alignment is at least as
252 * stringent as what the compiler would provide for X and return
253 * the aligned result.
254 * Here we assume that the alignment of a "long long" is the most
255 * stringent alignment that the compiler will ever provide by default.
256 * As far as I know, this is a reasonable assumption.
258 if (size > sizeof(long))
259 align = sizeof(long long);
260 else if (size > sizeof(int))
261 align = sizeof(long);
262 else if (size > sizeof(short))
264 else if (size > sizeof(char))
265 align = sizeof(short);
269 r = (unsigned long)p % align;
276 return (void *)(((unsigned long)ptr) + align - r);
279 static void _edac_mc_free(struct mem_ctl_info *mci)
281 struct csrow_info *csr;
285 for (i = 0; i < mci->tot_dimms; i++)
286 kfree(mci->dimms[i]);
291 for (row = 0; row < mci->nr_csrows; row++) {
292 csr = mci->csrows[row];
297 for (chn = 0; chn < mci->num_cschannel; chn++)
298 kfree(csr->channels[chn]);
299 kfree(csr->channels);
308 struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
309 unsigned int n_layers,
310 struct edac_mc_layer *layers,
313 struct mem_ctl_info *mci;
314 struct edac_mc_layer *layer;
315 struct csrow_info *csr;
316 struct rank_info *chan;
317 struct dimm_info *dimm;
318 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
319 unsigned int pos[EDAC_MAX_LAYERS];
320 unsigned int idx, size, tot_dimms = 1, count = 1;
321 unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
322 void *pvt, *p, *ptr = NULL;
323 int i, j, row, chn, n, len;
324 bool per_rank = false;
326 if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
330 * Calculate the total amount of dimms and csrows/cschannels while
331 * in the old API emulation mode
333 for (idx = 0; idx < n_layers; idx++) {
334 tot_dimms *= layers[idx].size;
336 if (layers[idx].is_virt_csrow)
337 tot_csrows *= layers[idx].size;
339 tot_channels *= layers[idx].size;
341 if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
345 /* Figure out the offsets of the various items from the start of an mc
346 * structure. We want the alignment of each item to be at least as
347 * stringent as what the compiler would provide if we could simply
348 * hardcode everything into a single struct.
350 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
351 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
352 for (i = 0; i < n_layers; i++) {
353 count *= layers[i].size;
354 edac_dbg(4, "errcount layer %d size %d\n", i, count);
355 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
356 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
357 tot_errcount += 2 * count;
360 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
361 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
362 size = ((unsigned long)pvt) + sz_pvt;
364 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
367 per_rank ? "ranks" : "dimms",
368 tot_csrows * tot_channels);
370 mci = kzalloc(size, GFP_KERNEL);
374 /* Adjust pointers so they point within the memory we just allocated
375 * rather than an imaginary chunk of memory located at address 0.
377 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
378 for (i = 0; i < n_layers; i++) {
379 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
380 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
382 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
384 /* setup index and various internal pointers */
385 mci->mc_idx = mc_num;
386 mci->tot_dimms = tot_dimms;
388 mci->n_layers = n_layers;
390 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
391 mci->nr_csrows = tot_csrows;
392 mci->num_cschannel = tot_channels;
393 mci->csbased = per_rank;
396 * Alocate and fill the csrow/channels structs
398 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
401 for (row = 0; row < tot_csrows; row++) {
402 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
405 mci->csrows[row] = csr;
406 csr->csrow_idx = row;
408 csr->nr_channels = tot_channels;
409 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
414 for (chn = 0; chn < tot_channels; chn++) {
415 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
418 csr->channels[chn] = chan;
419 chan->chan_idx = chn;
425 * Allocate and fill the dimm structs
427 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
431 memset(&pos, 0, sizeof(pos));
434 for (idx = 0; idx < tot_dimms; idx++) {
435 chan = mci->csrows[row]->channels[chn];
437 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
440 mci->dimms[idx] = dimm;
445 * Copy DIMM location and initialize it.
447 len = sizeof(dimm->label);
449 n = snprintf(p, len, "mc#%u", mc_num);
452 for (j = 0; j < n_layers; j++) {
453 n = snprintf(p, len, "%s#%u",
454 edac_layer_name[layers[j].type],
458 dimm->location[j] = pos[j];
464 /* Link it to the csrows old API data */
467 dimm->cschannel = chn;
469 /* Increment csrow location */
470 if (layers[0].is_virt_csrow) {
472 if (chn == tot_channels) {
478 if (row == tot_csrows) {
484 /* Increment dimm location */
485 for (j = n_layers - 1; j >= 0; j--) {
487 if (pos[j] < layers[j].size)
493 mci->op_state = OP_ALLOC;
502 EXPORT_SYMBOL_GPL(edac_mc_alloc);
504 void edac_mc_free(struct mem_ctl_info *mci)
508 if (device_is_registered(&mci->dev))
509 edac_unregister_sysfs(mci);
513 EXPORT_SYMBOL_GPL(edac_mc_free);
515 bool edac_has_mcs(void)
519 mutex_lock(&mem_ctls_mutex);
521 ret = list_empty(&mc_devices);
523 mutex_unlock(&mem_ctls_mutex);
527 EXPORT_SYMBOL_GPL(edac_has_mcs);
529 /* Caller must hold mem_ctls_mutex */
530 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
532 struct mem_ctl_info *mci;
533 struct list_head *item;
537 list_for_each(item, &mc_devices) {
538 mci = list_entry(item, struct mem_ctl_info, link);
540 if (mci->pdev == dev)
550 * scan list of controllers looking for the one that manages
552 * @dev: pointer to a struct device related with the MCI
554 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
556 struct mem_ctl_info *ret;
558 mutex_lock(&mem_ctls_mutex);
559 ret = __find_mci_by_dev(dev);
560 mutex_unlock(&mem_ctls_mutex);
564 EXPORT_SYMBOL_GPL(find_mci_by_dev);
567 * edac_mc_workq_function
568 * performs the operation scheduled by a workq request
570 static void edac_mc_workq_function(struct work_struct *work_req)
572 struct delayed_work *d_work = to_delayed_work(work_req);
573 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
575 mutex_lock(&mem_ctls_mutex);
577 if (mci->op_state != OP_RUNNING_POLL) {
578 mutex_unlock(&mem_ctls_mutex);
582 if (edac_op_state == EDAC_OPSTATE_POLL)
583 mci->edac_check(mci);
585 mutex_unlock(&mem_ctls_mutex);
587 /* Queue ourselves again. */
588 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
592 * edac_mc_reset_delay_period(unsigned long value)
594 * user space has updated our poll period value, need to
595 * reset our workq delays
597 void edac_mc_reset_delay_period(unsigned long value)
599 struct mem_ctl_info *mci;
600 struct list_head *item;
602 mutex_lock(&mem_ctls_mutex);
604 list_for_each(item, &mc_devices) {
605 mci = list_entry(item, struct mem_ctl_info, link);
607 if (mci->op_state == OP_RUNNING_POLL)
608 edac_mod_work(&mci->work, value);
610 mutex_unlock(&mem_ctls_mutex);
615 /* Return 0 on success, 1 on failure.
616 * Before calling this function, caller must
617 * assign a unique value to mci->mc_idx.
621 * called with the mem_ctls_mutex lock held
623 static int add_mc_to_global_list(struct mem_ctl_info *mci)
625 struct list_head *item, *insert_before;
626 struct mem_ctl_info *p;
628 insert_before = &mc_devices;
630 p = __find_mci_by_dev(mci->pdev);
631 if (unlikely(p != NULL))
634 list_for_each(item, &mc_devices) {
635 p = list_entry(item, struct mem_ctl_info, link);
637 if (p->mc_idx >= mci->mc_idx) {
638 if (unlikely(p->mc_idx == mci->mc_idx))
641 insert_before = item;
646 list_add_tail_rcu(&mci->link, insert_before);
650 edac_printk(KERN_WARNING, EDAC_MC,
651 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
652 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
656 edac_printk(KERN_WARNING, EDAC_MC,
657 "bug in low-level driver: attempt to assign\n"
658 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
662 static int del_mc_from_global_list(struct mem_ctl_info *mci)
664 list_del_rcu(&mci->link);
666 /* these are for safe removal of devices from global list while
667 * NMI handlers may be traversing list
670 INIT_LIST_HEAD(&mci->link);
672 return list_empty(&mc_devices);
675 struct mem_ctl_info *edac_mc_find(int idx)
677 struct mem_ctl_info *mci;
678 struct list_head *item;
680 mutex_lock(&mem_ctls_mutex);
682 list_for_each(item, &mc_devices) {
683 mci = list_entry(item, struct mem_ctl_info, link);
684 if (mci->mc_idx == idx)
690 mutex_unlock(&mem_ctls_mutex);
693 EXPORT_SYMBOL(edac_mc_find);
695 const char *edac_get_owner(void)
697 return edac_mc_owner;
699 EXPORT_SYMBOL_GPL(edac_get_owner);
701 /* FIXME - should a warning be printed if no error detection? correction? */
702 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
703 const struct attribute_group **groups)
708 #ifdef CONFIG_EDAC_DEBUG
709 if (edac_debug_level >= 3)
710 edac_mc_dump_mci(mci);
712 if (edac_debug_level >= 4) {
713 struct dimm_info *dimm;
716 for (i = 0; i < mci->nr_csrows; i++) {
717 struct csrow_info *csrow = mci->csrows[i];
721 for (j = 0; j < csrow->nr_channels; j++)
722 nr_pages += csrow->channels[j]->dimm->nr_pages;
725 edac_mc_dump_csrow(csrow);
726 for (j = 0; j < csrow->nr_channels; j++)
727 if (csrow->channels[j]->dimm->nr_pages)
728 edac_mc_dump_channel(csrow->channels[j]);
731 mci_for_each_dimm(mci, dimm)
732 edac_mc_dump_dimm(dimm);
735 mutex_lock(&mem_ctls_mutex);
737 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
742 if (add_mc_to_global_list(mci))
745 /* set load time so that error rate can be tracked */
746 mci->start_time = jiffies;
748 mci->bus = edac_get_sysfs_subsys();
750 if (edac_create_sysfs_mci_device(mci, groups)) {
751 edac_mc_printk(mci, KERN_WARNING,
752 "failed to create sysfs device\n");
756 if (mci->edac_check) {
757 mci->op_state = OP_RUNNING_POLL;
759 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
760 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
763 mci->op_state = OP_RUNNING_INTERRUPT;
766 /* Report action taken */
767 edac_mc_printk(mci, KERN_INFO,
768 "Giving out device to module %s controller %s: DEV %s (%s)\n",
769 mci->mod_name, mci->ctl_name, mci->dev_name,
770 edac_op_state_to_string(mci->op_state));
772 edac_mc_owner = mci->mod_name;
774 mutex_unlock(&mem_ctls_mutex);
778 del_mc_from_global_list(mci);
781 mutex_unlock(&mem_ctls_mutex);
784 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
786 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
788 struct mem_ctl_info *mci;
792 mutex_lock(&mem_ctls_mutex);
794 /* find the requested mci struct in the global list */
795 mci = __find_mci_by_dev(dev);
797 mutex_unlock(&mem_ctls_mutex);
801 /* mark MCI offline: */
802 mci->op_state = OP_OFFLINE;
804 if (del_mc_from_global_list(mci))
805 edac_mc_owner = NULL;
807 mutex_unlock(&mem_ctls_mutex);
810 edac_stop_work(&mci->work);
812 /* remove from sysfs */
813 edac_remove_sysfs_mci_device(mci);
815 edac_printk(KERN_INFO, EDAC_MC,
816 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
817 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
821 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
823 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
828 unsigned long flags = 0;
832 /* ECC error page was not in our memory. Ignore it. */
833 if (!pfn_valid(page))
836 /* Find the actual page structure then map it and fix */
837 pg = pfn_to_page(page);
840 local_irq_save(flags);
842 virt_addr = kmap_atomic(pg);
844 /* Perform architecture specific atomic scrub operation */
845 edac_atomic_scrub(virt_addr + offset, size);
847 /* Unmap and complete */
848 kunmap_atomic(virt_addr);
851 local_irq_restore(flags);
854 /* FIXME - should return -1 */
855 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
857 struct csrow_info **csrows = mci->csrows;
860 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
863 for (i = 0; i < mci->nr_csrows; i++) {
864 struct csrow_info *csrow = csrows[i];
866 for (j = 0; j < csrow->nr_channels; j++) {
867 struct dimm_info *dimm = csrow->channels[j]->dimm;
873 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
875 csrow->first_page, page, csrow->last_page,
878 if ((page >= csrow->first_page) &&
879 (page <= csrow->last_page) &&
880 ((page & csrow->page_mask) ==
881 (csrow->first_page & csrow->page_mask))) {
888 edac_mc_printk(mci, KERN_ERR,
889 "could not look up page error address %lx\n",
890 (unsigned long)page);
894 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
896 const char *edac_layer_name[] = {
897 [EDAC_MC_LAYER_BRANCH] = "branch",
898 [EDAC_MC_LAYER_CHANNEL] = "channel",
899 [EDAC_MC_LAYER_SLOT] = "slot",
900 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
901 [EDAC_MC_LAYER_ALL_MEM] = "memory",
903 EXPORT_SYMBOL_GPL(edac_layer_name);
905 static void edac_inc_ce_error(struct mem_ctl_info *mci,
906 bool enable_per_layer_report,
907 const int pos[EDAC_MAX_LAYERS],
914 if (!enable_per_layer_report) {
915 mci->ce_noinfo_count += count;
919 for (i = 0; i < mci->n_layers; i++) {
923 mci->ce_per_layer[i][index] += count;
925 if (i < mci->n_layers - 1)
926 index *= mci->layers[i + 1].size;
930 static void edac_inc_ue_error(struct mem_ctl_info *mci,
931 bool enable_per_layer_report,
932 const int pos[EDAC_MAX_LAYERS],
939 if (!enable_per_layer_report) {
940 mci->ue_noinfo_count += count;
944 for (i = 0; i < mci->n_layers; i++) {
948 mci->ue_per_layer[i][index] += count;
950 if (i < mci->n_layers - 1)
951 index *= mci->layers[i + 1].size;
955 static void edac_ce_error(struct mem_ctl_info *mci,
956 const u16 error_count,
957 const int pos[EDAC_MAX_LAYERS],
959 const char *location,
962 const char *other_detail,
963 const bool enable_per_layer_report,
964 const unsigned long page_frame_number,
965 const unsigned long offset_in_page,
968 unsigned long remapped_page;
974 if (edac_mc_get_log_ce()) {
975 if (other_detail && *other_detail)
976 edac_mc_printk(mci, KERN_WARNING,
977 "%d CE %s%son %s (%s %s - %s)\n",
978 error_count, msg, msg_aux, label,
979 location, detail, other_detail);
981 edac_mc_printk(mci, KERN_WARNING,
982 "%d CE %s%son %s (%s %s)\n",
983 error_count, msg, msg_aux, label,
986 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
988 if (mci->scrub_mode == SCRUB_SW_SRC) {
990 * Some memory controllers (called MCs below) can remap
991 * memory so that it is still available at a different
992 * address when PCI devices map into memory.
993 * MC's that can't do this, lose the memory where PCI
994 * devices are mapped. This mapping is MC-dependent
995 * and so we call back into the MC driver for it to
996 * map the MC page to a physical (CPU) page which can
997 * then be mapped to a virtual page - which can then
1000 remapped_page = mci->ctl_page_to_phys ?
1001 mci->ctl_page_to_phys(mci, page_frame_number) :
1004 edac_mc_scrub_block(remapped_page,
1005 offset_in_page, grain);
1009 static void edac_ue_error(struct mem_ctl_info *mci,
1010 const u16 error_count,
1011 const int pos[EDAC_MAX_LAYERS],
1013 const char *location,
1016 const char *other_detail,
1017 const bool enable_per_layer_report)
1024 if (edac_mc_get_log_ue()) {
1025 if (other_detail && *other_detail)
1026 edac_mc_printk(mci, KERN_WARNING,
1027 "%d UE %s%son %s (%s %s - %s)\n",
1028 error_count, msg, msg_aux, label,
1029 location, detail, other_detail);
1031 edac_mc_printk(mci, KERN_WARNING,
1032 "%d UE %s%son %s (%s %s)\n",
1033 error_count, msg, msg_aux, label,
1037 if (edac_mc_get_panic_on_ue()) {
1038 if (other_detail && *other_detail)
1039 panic("UE %s%son %s (%s%s - %s)\n",
1040 msg, msg_aux, label, location, detail, other_detail);
1042 panic("UE %s%son %s (%s%s)\n",
1043 msg, msg_aux, label, location, detail);
1046 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1049 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
1050 struct mem_ctl_info *mci,
1051 struct edac_raw_error_desc *e)
1054 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1057 /* Sanity-check driver-supplied grain value. */
1058 if (WARN_ON_ONCE(!e->grain))
1061 grain_bits = fls_long(e->grain - 1);
1063 /* Report the error via the trace interface */
1064 if (IS_ENABLED(CONFIG_RAS))
1065 trace_mc_event(type, e->msg, e->label, e->error_count,
1066 mci->mc_idx, e->top_layer, e->mid_layer,
1068 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1069 grain_bits, e->syndrome, e->other_detail);
1071 /* Memory type dependent details about the error */
1072 if (type == HW_EVENT_ERR_CORRECTED) {
1073 snprintf(detail, sizeof(detail),
1074 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1075 e->page_frame_number, e->offset_in_page,
1076 e->grain, e->syndrome);
1077 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1078 detail, e->other_detail, e->enable_per_layer_report,
1079 e->page_frame_number, e->offset_in_page, e->grain);
1081 snprintf(detail, sizeof(detail),
1082 "page:0x%lx offset:0x%lx grain:%ld",
1083 e->page_frame_number, e->offset_in_page, e->grain);
1085 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1086 detail, e->other_detail, e->enable_per_layer_report);
1091 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1093 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1094 struct mem_ctl_info *mci,
1095 const u16 error_count,
1096 const unsigned long page_frame_number,
1097 const unsigned long offset_in_page,
1098 const unsigned long syndrome,
1099 const int top_layer,
1100 const int mid_layer,
1101 const int low_layer,
1103 const char *other_detail)
1105 struct dimm_info *dimm;
1107 int row = -1, chan = -1;
1108 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1109 int i, n_labels = 0;
1110 struct edac_raw_error_desc *e = &mci->error_desc;
1112 edac_dbg(3, "MC%d\n", mci->mc_idx);
1114 /* Fills the error report buffer */
1115 memset(e, 0, sizeof (*e));
1116 e->error_count = error_count;
1117 e->top_layer = top_layer;
1118 e->mid_layer = mid_layer;
1119 e->low_layer = low_layer;
1120 e->page_frame_number = page_frame_number;
1121 e->offset_in_page = offset_in_page;
1122 e->syndrome = syndrome;
1124 e->other_detail = other_detail;
1127 * Check if the event report is consistent and if the memory
1128 * location is known. If it is known, enable_per_layer_report will be
1129 * true, the DIMM(s) label info will be filled and the per-layer
1130 * error counters will be incremented.
1132 for (i = 0; i < mci->n_layers; i++) {
1133 if (pos[i] >= (int)mci->layers[i].size) {
1135 edac_mc_printk(mci, KERN_ERR,
1136 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1137 edac_layer_name[mci->layers[i].type],
1138 pos[i], mci->layers[i].size);
1140 * Instead of just returning it, let's use what's
1141 * known about the error. The increment routines and
1142 * the DIMM filter logic will do the right thing by
1143 * pointing the likely damaged DIMMs.
1148 e->enable_per_layer_report = true;
1152 * Get the dimm label/grain that applies to the match criteria.
1153 * As the error algorithm may not be able to point to just one memory
1154 * stick, the logic here will get all possible labels that could
1155 * pottentially be affected by the error.
1156 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1157 * to have only the MC channel and the MC dimm (also called "branch")
1158 * but the channel is not known, as the memory is arranged in pairs,
1159 * where each memory belongs to a separate channel within the same
1165 mci_for_each_dimm(mci, dimm) {
1166 if (top_layer >= 0 && top_layer != dimm->location[0])
1168 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1170 if (low_layer >= 0 && low_layer != dimm->location[2])
1173 /* get the max grain, over the error match range */
1174 if (dimm->grain > e->grain)
1175 e->grain = dimm->grain;
1178 * If the error is memory-controller wide, there's no need to
1179 * seek for the affected DIMMs because the whole
1180 * channel/memory controller/... may be affected.
1181 * Also, don't show errors for empty DIMM slots.
1183 if (!e->enable_per_layer_report || !dimm->nr_pages)
1186 if (n_labels >= EDAC_MAX_LABELS) {
1187 e->enable_per_layer_report = false;
1191 if (p != e->label) {
1192 strcpy(p, OTHER_LABEL);
1193 p += strlen(OTHER_LABEL);
1195 strcpy(p, dimm->label);
1199 * get csrow/channel of the DIMM, in order to allow
1200 * incrementing the compat API counters
1202 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1203 mci->csbased ? "rank" : "dimm",
1204 dimm->csrow, dimm->cschannel);
1207 else if (row >= 0 && row != dimm->csrow)
1211 chan = dimm->cschannel;
1212 else if (chan >= 0 && chan != dimm->cschannel)
1216 if (!e->enable_per_layer_report) {
1217 strcpy(e->label, "any memory");
1219 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1221 strcpy(e->label, "unknown memory");
1222 if (type == HW_EVENT_ERR_CORRECTED) {
1224 mci->csrows[row]->ce_count += error_count;
1226 mci->csrows[row]->channels[chan]->ce_count += error_count;
1230 mci->csrows[row]->ue_count += error_count;
1233 /* Fill the RAM location data */
1236 for (i = 0; i < mci->n_layers; i++) {
1240 p += sprintf(p, "%s:%d ",
1241 edac_layer_name[mci->layers[i].type],
1244 if (p > e->location)
1247 edac_raw_mc_handle_error(type, mci, e);
1249 EXPORT_SYMBOL_GPL(edac_mc_handle_error);