2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
28 #include "amdgpu_ras.h"
29 #include "amdgpu_atomfirmware.h"
32 /* interrupt bottom half */
33 struct work_struct ih_work;
39 unsigned int ring_size;
40 unsigned int element_size;
41 unsigned int aligned_element_size;
48 char debugfs_name[32];
52 unsigned long ue_count;
53 unsigned long ce_count;
56 struct ras_err_handler_data {
57 /* point to bad pages array */
62 /* the count of entries */
64 /* the space can place new entries */
66 /* last reserved entry's index + 1 */
71 struct ras_common_if head;
75 struct list_head node;
77 struct amdgpu_device *adev;
81 struct device_attribute sysfs_attr;
85 struct ras_fs_data fs_data;
88 struct ras_ih_data ih_data;
90 struct ras_err_data err_data;
93 const char *ras_error_string[] = {
97 "multi_uncorrectable",
101 const char *ras_block_string[] = {
118 #define ras_err_str(i) (ras_error_string[ffs(i)])
119 #define ras_block_str(i) (ras_block_string[i])
121 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
122 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
124 static void amdgpu_ras_self_test(struct amdgpu_device *adev)
129 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
130 size_t size, loff_t *pos)
132 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
133 struct ras_query_if info = {
139 if (amdgpu_ras_error_query(obj->adev, &info))
142 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
144 "ce", info.ce_count);
149 s = min_t(u64, s, size);
152 if (copy_to_user(buf, &val[*pos], s))
160 static ssize_t amdgpu_ras_debugfs_write(struct file *f, const char __user *buf,
161 size_t size, loff_t *pos)
163 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
164 struct ras_inject_if info = {
167 ssize_t s = min_t(u64, 64, size);
170 memset(val, 0, sizeof(val));
175 if (copy_from_user(str, buf, s))
178 /* only care ue/ce for now. */
179 if (memcmp(str, "ue", 2) == 0) {
180 info.head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
182 } else if (memcmp(str, "ce", 2) == 0) {
183 info.head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
187 if (sscanf(str, "0x%llx 0x%llx", &info.address, &info.value) != 2) {
188 if (sscanf(str, "%llu %llu", &info.address, &info.value) != 2)
194 if (amdgpu_ras_error_inject(obj->adev, &info))
200 static const struct file_operations amdgpu_ras_debugfs_ops = {
201 .owner = THIS_MODULE,
202 .read = amdgpu_ras_debugfs_read,
203 .write = amdgpu_ras_debugfs_write,
204 .llseek = default_llseek
207 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
211 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
213 if (strcmp(name, ras_block_str(i)) == 0)
219 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
220 const char __user *buf, size_t size,
221 loff_t *pos, struct ras_debug_if *data)
223 ssize_t s = min_t(u64, 64, size);
235 memset(str, 0, sizeof(str));
236 memset(data, 0, sizeof(*data));
238 if (copy_from_user(str, buf, s))
241 if (sscanf(str, "disable %32s", block_name) == 1)
243 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
245 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
247 else if (str[0] && str[1] && str[2] && str[3])
248 /* ascii string, but commands are not matched. */
252 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
255 data->head.block = block_id;
256 data->head.type = memcmp("ue", err, 2) == 0 ?
257 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
258 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
262 if (sscanf(str, "%*s %*s %*s %llu %llu",
263 &address, &value) != 2)
264 if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
265 &address, &value) != 2)
267 data->inject.address = address;
268 data->inject.value = value;
271 if (size < sizeof(*data))
274 if (copy_from_user(data, buf, sizeof(*data)))
281 * DOC: ras debugfs control interface
283 * It accepts struct ras_debug_if who has two members.
285 * First member: ras_debug_if::head or ras_debug_if::inject.
287 * head is used to indicate which IP block will be under control.
289 * head has four members, they are block, type, sub_block_index, name.
290 * block: which IP will be under control.
291 * type: what kind of error will be enabled/disabled/injected.
292 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
293 * name: the name of IP.
295 * inject has two more members than head, they are address, value.
296 * As their names indicate, inject operation will write the
297 * value to the address.
299 * Second member: struct ras_debug_if::op.
300 * It has three kinds of operations.
301 * 0: disable RAS on the block. Take ::head as its data.
302 * 1: enable RAS on the block. Take ::head as its data.
303 * 2: inject errors on the block. Take ::inject as its data.
305 * How to use the interface?
307 * copy the struct ras_debug_if in your codes and initialize it.
308 * write the struct to the control node.
311 * echo op block [error [address value]] > .../ras/ras_ctrl
312 * op: disable, enable, inject
313 * disable: only block is needed
314 * enable: block and error are needed
315 * inject: error, address, value are needed
316 * block: umc, smda, gfx, .........
317 * see ras_block_string[] for details
319 * ue: multi_uncorrectable
320 * ce: single_correctable
322 * here are some examples for bash commands,
323 * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
324 * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
325 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
327 * How to check the result?
329 * For disable/enable, please check ras features at
330 * /sys/class/drm/card[0/1/2...]/device/ras/features
332 * For inject, please check corresponding err count at
333 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
335 * NOTE: operation is only allowed on blocks which are supported.
336 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
338 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
339 size_t size, loff_t *pos)
341 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
342 struct ras_debug_if data;
345 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
349 if (!amdgpu_ras_is_supported(adev, data.head.block))
354 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
357 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
360 ret = amdgpu_ras_error_inject(adev, &data.inject);
373 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
374 .owner = THIS_MODULE,
376 .write = amdgpu_ras_debugfs_ctrl_write,
377 .llseek = default_llseek
380 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
381 struct device_attribute *attr, char *buf)
383 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
384 struct ras_query_if info = {
388 if (amdgpu_ras_error_query(obj->adev, &info))
391 return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
393 "ce", info.ce_count);
398 #define get_obj(obj) do { (obj)->use++; } while (0)
399 #define alive_obj(obj) ((obj)->use)
401 static inline void put_obj(struct ras_manager *obj)
403 if (obj && --obj->use == 0)
404 list_del(&obj->node);
405 if (obj && obj->use < 0) {
406 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
410 /* make one obj and return it. */
411 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
412 struct ras_common_if *head)
414 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
415 struct ras_manager *obj;
420 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
423 obj = &con->objs[head->block];
424 /* already exist. return obj? */
430 list_add(&obj->node, &con->head);
436 /* return an obj equal to head, or the first when head is NULL */
437 static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
438 struct ras_common_if *head)
440 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
441 struct ras_manager *obj;
448 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
451 obj = &con->objs[head->block];
453 if (alive_obj(obj)) {
454 WARN_ON(head->block != obj->head.block);
458 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
460 if (alive_obj(obj)) {
461 WARN_ON(i != obj->head.block);
471 /* feature ctl begin */
472 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
473 struct ras_common_if *head)
475 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
477 return con->hw_supported & BIT(head->block);
480 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
481 struct ras_common_if *head)
483 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
485 return con->features & BIT(head->block);
489 * if obj is not created, then create one.
490 * set feature enable flag.
492 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
493 struct ras_common_if *head, int enable)
495 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
496 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
498 /* If hardware does not support ras, then do not create obj.
499 * But if hardware support ras, we can create the obj.
500 * Ras framework checks con->hw_supported to see if it need do
501 * corresponding initialization.
502 * IP checks con->support to see if it need disable ras.
504 if (!amdgpu_ras_is_feature_allowed(adev, head))
506 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
511 obj = amdgpu_ras_create_obj(adev, head);
515 /* In case we create obj somewhere else */
518 con->features |= BIT(head->block);
520 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
521 con->features &= ~BIT(head->block);
529 /* wrapper of psp_ras_enable_features */
530 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
531 struct ras_common_if *head, bool enable)
533 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
534 union ta_ras_cmd_input info;
541 info.disable_features = (struct ta_ras_disable_features_input) {
542 .block_id = head->block,
543 .error_type = head->type,
546 info.enable_features = (struct ta_ras_enable_features_input) {
547 .block_id = head->block,
548 .error_type = head->type,
552 /* Do not enable if it is not allowed. */
553 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
554 /* Are we alerady in that state we are going to set? */
555 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
558 ret = psp_ras_enable_features(&adev->psp, &info, enable);
560 DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
561 enable ? "enable":"disable",
562 ras_block_str(head->block),
568 __amdgpu_ras_feature_enable(adev, head, enable);
573 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
576 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
577 struct ras_manager *obj, *tmp;
579 list_for_each_entry_safe(obj, tmp, &con->head, node) {
581 * aka just release the obj and corresponding flags
584 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
587 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
592 return con->features;
595 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
598 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
599 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
602 for (i = 0; i < ras_block_count; i++) {
603 struct ras_common_if head = {
605 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
606 .sub_block_index = 0,
608 strcpy(head.name, ras_block_str(i));
611 * bypass psp. vbios enable ras for us.
612 * so just create the obj
614 if (__amdgpu_ras_feature_enable(adev, &head, 1))
617 if (amdgpu_ras_feature_enable(adev, &head, 1))
622 return con->features;
624 /* feature ctl end */
626 /* query/inject/cure begin */
627 int amdgpu_ras_error_query(struct amdgpu_device *adev,
628 struct ras_query_if *info)
630 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
634 /* TODO might read the register to read the count */
636 info->ue_count = obj->err_data.ue_count;
637 info->ce_count = obj->err_data.ce_count;
642 /* wrapper of psp_ras_trigger_error */
643 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
644 struct ras_inject_if *info)
646 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
647 struct ta_ras_trigger_error_input block_info = {
648 .block_id = info->head.block,
649 .inject_error_type = info->head.type,
650 .sub_block_index = info->head.sub_block_index,
651 .address = info->address,
652 .value = info->value,
659 ret = psp_ras_trigger_error(&adev->psp, &block_info);
661 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
662 ras_block_str(info->head.block),
668 int amdgpu_ras_error_cure(struct amdgpu_device *adev,
669 struct ras_cure_if *info)
671 /* psp fw has no cure interface for now. */
675 /* get the total error counts on all IPs */
676 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
679 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
680 struct ras_manager *obj;
681 struct ras_err_data data = {0, 0};
686 list_for_each_entry(obj, &con->head, node) {
687 struct ras_query_if info = {
691 if (amdgpu_ras_error_query(adev, &info))
694 data.ce_count += info.ce_count;
695 data.ue_count += info.ue_count;
698 return is_ce ? data.ce_count : data.ue_count;
700 /* query/inject/cure end */
705 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
706 struct device_attribute *attr, char *buf)
708 struct amdgpu_ras *con =
709 container_of(attr, struct amdgpu_ras, features_attr);
710 struct drm_device *ddev = dev_get_drvdata(dev);
711 struct amdgpu_device *adev = ddev->dev_private;
712 struct ras_common_if head;
713 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
716 struct ras_manager *obj;
718 s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
720 for (i = 0; i < ras_block_count; i++) {
723 if (amdgpu_ras_is_feature_enabled(adev, &head)) {
724 obj = amdgpu_ras_find_obj(adev, &head);
725 s += scnprintf(&buf[s], PAGE_SIZE - s,
728 ras_err_str(obj->head.type));
730 s += scnprintf(&buf[s], PAGE_SIZE - s,
738 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
740 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
741 struct attribute *attrs[] = {
742 &con->features_attr.attr,
745 struct attribute_group group = {
750 con->features_attr = (struct device_attribute) {
755 .show = amdgpu_ras_sysfs_features_read,
757 sysfs_attr_init(attrs[0]);
759 return sysfs_create_group(&adev->dev->kobj, &group);
762 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
764 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
765 struct attribute *attrs[] = {
766 &con->features_attr.attr,
769 struct attribute_group group = {
774 sysfs_remove_group(&adev->dev->kobj, &group);
779 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
780 struct ras_fs_if *head)
782 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
784 if (!obj || obj->attr_inuse)
789 memcpy(obj->fs_data.sysfs_name,
791 sizeof(obj->fs_data.sysfs_name));
793 obj->sysfs_attr = (struct device_attribute){
795 .name = obj->fs_data.sysfs_name,
798 .show = amdgpu_ras_sysfs_read,
800 sysfs_attr_init(&obj->sysfs_attr.attr);
802 if (sysfs_add_file_to_group(&adev->dev->kobj,
803 &obj->sysfs_attr.attr,
814 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
815 struct ras_common_if *head)
817 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
819 if (!obj || !obj->attr_inuse)
822 sysfs_remove_file_from_group(&adev->dev->kobj,
823 &obj->sysfs_attr.attr,
831 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
833 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
834 struct ras_manager *obj, *tmp;
836 list_for_each_entry_safe(obj, tmp, &con->head, node) {
837 amdgpu_ras_sysfs_remove(adev, &obj->head);
840 amdgpu_ras_sysfs_remove_feature_node(adev);
847 static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
849 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
850 struct drm_minor *minor = adev->ddev->primary;
851 struct dentry *root = minor->debugfs_root, *dir;
854 dir = debugfs_create_dir("ras", root);
860 ent = debugfs_create_file("ras_ctrl",
861 S_IWUGO | S_IRUGO, con->dir,
862 adev, &amdgpu_ras_debugfs_ctrl_ops);
864 debugfs_remove(con->dir);
872 int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
873 struct ras_fs_if *head)
875 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
876 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
879 if (!obj || obj->ent)
884 memcpy(obj->fs_data.debugfs_name,
886 sizeof(obj->fs_data.debugfs_name));
888 ent = debugfs_create_file(obj->fs_data.debugfs_name,
889 S_IWUGO | S_IRUGO, con->dir,
890 obj, &amdgpu_ras_debugfs_ops);
900 int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
901 struct ras_common_if *head)
903 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
905 if (!obj || !obj->ent)
908 debugfs_remove(obj->ent);
915 static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
917 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
918 struct ras_manager *obj, *tmp;
920 list_for_each_entry_safe(obj, tmp, &con->head, node) {
921 amdgpu_ras_debugfs_remove(adev, &obj->head);
924 debugfs_remove(con->ent);
925 debugfs_remove(con->dir);
935 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
937 amdgpu_ras_sysfs_create_feature_node(adev);
938 amdgpu_ras_debugfs_create_ctrl_node(adev);
943 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
945 amdgpu_ras_debugfs_remove_all(adev);
946 amdgpu_ras_sysfs_remove_all(adev);
952 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
954 struct ras_ih_data *data = &obj->ih_data;
955 struct amdgpu_iv_entry entry;
958 while (data->rptr != data->wptr) {
960 memcpy(&entry, &data->ring[data->rptr],
964 data->rptr = (data->aligned_element_size +
965 data->rptr) % data->ring_size;
967 /* Let IP handle its data, maybe we need get the output
968 * from the callback to udpate the error type/count, etc
971 ret = data->cb(obj->adev, &entry);
972 /* ue will trigger an interrupt, and in that case
973 * we need do a reset to recovery the whole system.
974 * But leave IP do that recovery, here we just dispatch
977 if (ret == AMDGPU_RAS_UE) {
978 obj->err_data.ue_count++;
980 /* Might need get ce count by register, but not all IP
981 * saves ce count, some IP just use one bit or two bits
982 * to indicate ce happened.
988 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
990 struct ras_ih_data *data =
991 container_of(work, struct ras_ih_data, ih_work);
992 struct ras_manager *obj =
993 container_of(data, struct ras_manager, ih_data);
995 amdgpu_ras_interrupt_handler(obj);
998 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
999 struct ras_dispatch_if *info)
1001 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1002 struct ras_ih_data *data = &obj->ih_data;
1007 if (data->inuse == 0)
1010 /* Might be overflow... */
1011 memcpy(&data->ring[data->wptr], info->entry,
1012 data->element_size);
1015 data->wptr = (data->aligned_element_size +
1016 data->wptr) % data->ring_size;
1018 schedule_work(&data->ih_work);
1023 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1024 struct ras_ih_if *info)
1026 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1027 struct ras_ih_data *data;
1032 data = &obj->ih_data;
1033 if (data->inuse == 0)
1036 cancel_work_sync(&data->ih_work);
1039 memset(data, 0, sizeof(*data));
1045 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1046 struct ras_ih_if *info)
1048 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1049 struct ras_ih_data *data;
1052 /* in case we registe the IH before enable ras feature */
1053 obj = amdgpu_ras_create_obj(adev, &info->head);
1059 data = &obj->ih_data;
1060 /* add the callback.etc */
1061 *data = (struct ras_ih_data) {
1064 .element_size = sizeof(struct amdgpu_iv_entry),
1069 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1071 data->aligned_element_size = ALIGN(data->element_size, 8);
1072 /* the ring can store 64 iv entries. */
1073 data->ring_size = 64 * data->aligned_element_size;
1074 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1086 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1088 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1089 struct ras_manager *obj, *tmp;
1091 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1092 struct ras_ih_if info = {
1095 amdgpu_ras_interrupt_remove_handler(adev, &info);
1102 /* recovery begin */
1103 static void amdgpu_ras_do_recovery(struct work_struct *work)
1105 struct amdgpu_ras *ras =
1106 container_of(work, struct amdgpu_ras, recovery_work);
1108 amdgpu_device_gpu_recover(ras->adev, 0);
1109 atomic_set(&ras->in_recovery, 0);
1112 static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
1113 struct amdgpu_bo **bo_ptr)
1115 /* no need to free it actually. */
1116 amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
1120 /* reserve vram with size@offset */
1121 static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
1122 uint64_t offset, uint64_t size,
1123 struct amdgpu_bo **bo_ptr)
1125 struct ttm_operation_ctx ctx = { false, false };
1126 struct amdgpu_bo_param bp;
1129 struct amdgpu_bo *bo;
1133 memset(&bp, 0, sizeof(bp));
1135 bp.byte_align = PAGE_SIZE;
1136 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1137 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1138 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1139 bp.type = ttm_bo_type_kernel;
1142 r = amdgpu_bo_create(adev, &bp, &bo);
1146 r = amdgpu_bo_reserve(bo, false);
1150 offset = ALIGN(offset, PAGE_SIZE);
1151 for (i = 0; i < bo->placement.num_placement; ++i) {
1152 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1153 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1156 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1157 r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
1161 r = amdgpu_bo_pin_restricted(bo,
1162 AMDGPU_GEM_DOMAIN_VRAM,
1171 amdgpu_bo_unreserve(bo);
1175 amdgpu_bo_unreserve(bo);
1177 amdgpu_bo_unref(&bo);
1181 /* alloc/realloc bps array */
1182 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1183 struct ras_err_handler_data *data, int pages)
1185 unsigned int old_space = data->count + data->space_left;
1186 unsigned int new_space = old_space + pages;
1187 unsigned int align_space = ALIGN(new_space, 1024);
1188 void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1194 memcpy(tmp, data->bps,
1195 data->count * sizeof(*data->bps));
1200 data->space_left += align_space - old_space;
1204 /* it deal with vram only. */
1205 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1206 unsigned long *bps, int pages)
1208 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1209 struct ras_err_handler_data *data;
1213 if (!con || !con->eh_data || !bps || pages <= 0)
1216 mutex_lock(&con->recovery_lock);
1217 data = con->eh_data;
1221 if (data->space_left <= pages)
1222 if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
1228 data->bps[data->count++].bp = bps[i];
1230 data->space_left -= pages;
1232 mutex_unlock(&con->recovery_lock);
1237 /* called in gpu recovery/init */
1238 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
1240 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1241 struct ras_err_handler_data *data;
1243 struct amdgpu_bo *bo;
1246 if (!con || !con->eh_data)
1249 mutex_lock(&con->recovery_lock);
1250 data = con->eh_data;
1253 /* reserve vram at driver post stage. */
1254 for (i = data->last_reserved; i < data->count; i++) {
1255 bp = data->bps[i].bp;
1257 if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
1259 DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
1261 data->bps[i].bo = bo;
1262 data->last_reserved = i + 1;
1265 mutex_unlock(&con->recovery_lock);
1269 /* called when driver unload */
1270 static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
1272 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1273 struct ras_err_handler_data *data;
1274 struct amdgpu_bo *bo;
1277 if (!con || !con->eh_data)
1280 mutex_lock(&con->recovery_lock);
1281 data = con->eh_data;
1285 for (i = data->last_reserved - 1; i >= 0; i--) {
1286 bo = data->bps[i].bo;
1288 amdgpu_ras_release_vram(adev, &bo);
1290 data->bps[i].bo = bo;
1291 data->last_reserved = i;
1294 mutex_unlock(&con->recovery_lock);
1298 static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1301 * write the array to eeprom when SMU disabled.
1306 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1309 * read the array to eeprom when SMU disabled.
1314 static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1316 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1317 struct ras_err_handler_data **data = &con->eh_data;
1319 *data = kmalloc(sizeof(**data),
1320 GFP_KERNEL|__GFP_ZERO);
1324 mutex_init(&con->recovery_lock);
1325 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1326 atomic_set(&con->in_recovery, 0);
1329 amdgpu_ras_load_bad_pages(adev);
1330 amdgpu_ras_reserve_bad_pages(adev);
1335 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1337 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1338 struct ras_err_handler_data *data = con->eh_data;
1340 cancel_work_sync(&con->recovery_work);
1341 amdgpu_ras_save_bad_pages(adev);
1342 amdgpu_ras_release_bad_pages(adev);
1344 mutex_lock(&con->recovery_lock);
1345 con->eh_data = NULL;
1348 mutex_unlock(&con->recovery_lock);
1355 * check hardware's ras ability which will be saved in hw_supported.
1356 * if hardware does not support ras, we can skip some ras initializtion and
1357 * forbid some ras operations from IP.
1358 * if software itself, say boot parameter, limit the ras ability. We still
1359 * need allow IP do some limited operations, like disable. In such case,
1360 * we have to initialize ras as normal. but need check if operation is
1361 * allowed or not in each function.
1363 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1364 uint32_t *hw_supported, uint32_t *supported)
1369 if (amdgpu_sriov_vf(adev) ||
1370 adev->asic_type != CHIP_VEGA20)
1373 if (adev->is_atom_fw &&
1374 (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
1375 amdgpu_atomfirmware_sram_ecc_supported(adev)))
1376 *hw_supported = AMDGPU_RAS_BLOCK_MASK;
1378 *supported = amdgpu_ras_enable == 0 ?
1379 0 : *hw_supported & amdgpu_ras_mask;
1382 int amdgpu_ras_init(struct amdgpu_device *adev)
1384 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1389 con = kmalloc(sizeof(struct amdgpu_ras) +
1390 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
1391 GFP_KERNEL|__GFP_ZERO);
1395 con->objs = (struct ras_manager *)(con + 1);
1397 amdgpu_ras_set_context(adev, con);
1399 amdgpu_ras_check_supported(adev, &con->hw_supported,
1402 INIT_LIST_HEAD(&con->head);
1403 /* Might need get this flag from vbios. */
1404 con->flags = RAS_DEFAULT_FLAGS;
1406 if (amdgpu_ras_recovery_init(adev))
1409 amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
1411 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
1412 amdgpu_ras_enable_all_features(adev, 1);
1414 if (amdgpu_ras_fs_init(adev))
1417 amdgpu_ras_self_test(adev);
1419 DRM_INFO("RAS INFO: ras initialized successfully, "
1420 "hardware ability[%x] ras_mask[%x]\n",
1421 con->hw_supported, con->supported);
1424 amdgpu_ras_recovery_fini(adev);
1426 amdgpu_ras_set_context(adev, NULL);
1432 /* do some init work after IP late init as dependence */
1433 void amdgpu_ras_post_init(struct amdgpu_device *adev)
1435 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1436 struct ras_manager *obj, *tmp;
1441 /* We enable ras on all hw_supported block, but as boot parameter might
1442 * disable some of them and one or more IP has not implemented yet.
1443 * So we disable them on behalf.
1445 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
1446 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1447 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
1448 amdgpu_ras_feature_enable(adev, &obj->head, 0);
1449 /* there should be no any reference. */
1450 WARN_ON(alive_obj(obj));
1456 /* do some fini work before IP fini as dependence */
1457 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
1459 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1464 /* Need disable ras on all IPs here before ip [hw/sw]fini */
1465 amdgpu_ras_disable_all_features(adev, 0);
1466 amdgpu_ras_recovery_fini(adev);
1470 int amdgpu_ras_fini(struct amdgpu_device *adev)
1472 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1477 amdgpu_ras_fs_fini(adev);
1478 amdgpu_ras_interrupt_remove_all(adev);
1480 WARN(con->features, "Feature mask is not cleared");
1483 amdgpu_ras_disable_all_features(adev, 1);
1485 amdgpu_ras_set_context(adev, NULL);