python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <linux/efi_embedded_fw.h>
#include <linux/property.h>
#include <linux/security.h>
#include <linux/vmalloc.h>
#include "fallback.h"
#include "firmware.h"
int firmware_fallback_platform(struct fw_priv *fw_priv)
{
const u8 *data;
size_t size;
int rc;
if (!(fw_priv->opt_flags & FW_OPT_FALLBACK_PLATFORM))
return -ENOENT;
rc = security_kernel_load_data(LOADING_FIRMWARE, true);
if (rc)
return rc;
rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size);
if (rc)
return rc; /* rc == -ENOENT when the fw was not found */
if (fw_priv->data && size > fw_priv->allocated_size)
return -ENOMEM;
rc = security_kernel_post_load_data((u8 *)data, size, LOADING_FIRMWARE,
"platform");
if (rc)
return rc;
if (!fw_priv->data)
fw_priv->data = vmalloc(size);
if (!fw_priv->data)
return -ENOMEM;
memcpy(fw_priv->data, data, size);
fw_priv->size = size;
fw_state_done(fw_priv);
return 0;
}
| linux-master | drivers/base/firmware_loader/fallback_platform.c |
// SPDX-License-Identifier: GPL-2.0
/*
* main.c - Multi purpose firmware loading support
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
* Please see Documentation/driver-api/firmware/ for more information.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/kernel_read_file.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/async.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
#include <linux/zstd.h>
#include <linux/xz.h>
#include <generated/utsrelease.h>
#include "../base.h"
#include "firmware.h"
#include "fallback.h"
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
struct list_head head;
int state;
#ifdef CONFIG_FW_CACHE
/*
* Names of firmware images which have been cached successfully
* will be added into the below list so that device uncache
* helper can trace which firmware images have been cached
* before.
*/
spinlock_t name_lock;
struct list_head fw_names;
struct delayed_work work;
struct notifier_block pm_notify;
#endif
};
struct fw_cache_entry {
struct list_head list;
const char *name;
};
struct fw_name_devm {
unsigned long magic;
const char *name;
};
static inline struct fw_priv *to_fw_priv(struct kref *ref)
{
return container_of(ref, struct fw_priv, ref);
}
#define FW_LOADER_NO_CACHE 0
#define FW_LOADER_START_CACHE 1
/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
* guarding for corner cases a global lock should be OK */
DEFINE_MUTEX(fw_lock);
struct firmware_cache fw_cache;
void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
static inline int fw_state_wait(struct fw_priv *fw_priv)
{
return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
}
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
static struct fw_priv *__allocate_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
void *dbuf,
size_t size,
size_t offset,
u32 opt_flags)
{
struct fw_priv *fw_priv;
/* For a partial read, the buffer must be preallocated. */
if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
return NULL;
/* Only partial reads are allowed to use an offset. */
if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
return NULL;
fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
if (!fw_priv)
return NULL;
fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
if (!fw_priv->fw_name) {
kfree(fw_priv);
return NULL;
}
kref_init(&fw_priv->ref);
fw_priv->fwc = fwc;
fw_priv->data = dbuf;
fw_priv->allocated_size = size;
fw_priv->offset = offset;
fw_priv->opt_flags = opt_flags;
fw_state_init(fw_priv);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&fw_priv->pending_list);
#endif
pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
return fw_priv;
}
static struct fw_priv *__lookup_fw_priv(const char *fw_name)
{
struct fw_priv *tmp;
struct firmware_cache *fwc = &fw_cache;
list_for_each_entry(tmp, &fwc->head, list)
if (!strcmp(tmp->fw_name, fw_name))
return tmp;
return NULL;
}
/* Returns 1 for batching firmware requests with the same name */
int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
struct fw_priv **fw_priv, void *dbuf, size_t size,
size_t offset, u32 opt_flags)
{
struct fw_priv *tmp;
spin_lock(&fwc->lock);
/*
* Do not merge requests that are marked to be non-cached or
* are performing partial reads.
*/
if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
tmp = __lookup_fw_priv(fw_name);
if (tmp) {
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
*fw_priv = tmp;
pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
return 1;
}
}
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
if (tmp) {
INIT_LIST_HEAD(&tmp->list);
if (!(opt_flags & FW_OPT_NOCACHE))
list_add(&tmp->list, &fwc->head);
}
spin_unlock(&fwc->lock);
*fw_priv = tmp;
return tmp ? 0 : -ENOMEM;
}
static void __free_fw_priv(struct kref *ref)
__releases(&fwc->lock)
{
struct fw_priv *fw_priv = to_fw_priv(ref);
struct firmware_cache *fwc = fw_priv->fwc;
pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
__func__, fw_priv->fw_name, fw_priv, fw_priv->data,
(unsigned int)fw_priv->size);
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
if (fw_is_paged_buf(fw_priv))
fw_free_paged_buf(fw_priv);
else if (!fw_priv->allocated_size)
vfree(fw_priv->data);
kfree_const(fw_priv->fw_name);
kfree(fw_priv);
}
void free_fw_priv(struct fw_priv *fw_priv)
{
struct firmware_cache *fwc = fw_priv->fwc;
spin_lock(&fwc->lock);
if (!kref_put(&fw_priv->ref, __free_fw_priv))
spin_unlock(&fwc->lock);
}
#ifdef CONFIG_FW_LOADER_PAGED_BUF
bool fw_is_paged_buf(struct fw_priv *fw_priv)
{
return fw_priv->is_paged_buf;
}
void fw_free_paged_buf(struct fw_priv *fw_priv)
{
int i;
if (!fw_priv->pages)
return;
vunmap(fw_priv->data);
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kvfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
fw_priv->data = NULL;
fw_priv->size = 0;
}
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
{
/* If the array of pages is too small, grow it */
if (fw_priv->page_array_size < pages_needed) {
int new_array_size = max(pages_needed,
fw_priv->page_array_size * 2);
struct page **new_pages;
new_pages = kvmalloc_array(new_array_size, sizeof(void *),
GFP_KERNEL);
if (!new_pages)
return -ENOMEM;
memcpy(new_pages, fw_priv->pages,
fw_priv->page_array_size * sizeof(void *));
memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
(new_array_size - fw_priv->page_array_size));
kvfree(fw_priv->pages);
fw_priv->pages = new_pages;
fw_priv->page_array_size = new_array_size;
}
while (fw_priv->nr_pages < pages_needed) {
fw_priv->pages[fw_priv->nr_pages] =
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!fw_priv->pages[fw_priv->nr_pages])
return -ENOMEM;
fw_priv->nr_pages++;
}
return 0;
}
int fw_map_paged_buf(struct fw_priv *fw_priv)
{
/* one pages buffer should be mapped/unmapped only once */
if (!fw_priv->pages)
return 0;
vunmap(fw_priv->data);
fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
PAGE_KERNEL_RO);
if (!fw_priv->data)
return -ENOMEM;
return 0;
}
#endif
/*
* ZSTD-compressed firmware support
*/
#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
size_t in_size, const void *in_buffer)
{
size_t len, out_size, workspace_size;
void *workspace, *out_buf;
zstd_dctx *ctx;
int err;
if (fw_priv->allocated_size) {
out_size = fw_priv->allocated_size;
out_buf = fw_priv->data;
} else {
zstd_frame_header params;
if (zstd_get_frame_header(¶ms, in_buffer, in_size) ||
params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
dev_dbg(dev, "%s: invalid zstd header\n", __func__);
return -EINVAL;
}
out_size = params.frameContentSize;
out_buf = vzalloc(out_size);
if (!out_buf)
return -ENOMEM;
}
workspace_size = zstd_dctx_workspace_bound();
workspace = kvzalloc(workspace_size, GFP_KERNEL);
if (!workspace) {
err = -ENOMEM;
goto error;
}
ctx = zstd_init_dctx(workspace, workspace_size);
if (!ctx) {
dev_dbg(dev, "%s: failed to initialize context\n", __func__);
err = -EINVAL;
goto error;
}
len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
if (zstd_is_error(len)) {
dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
zstd_get_error_code(len));
err = -EINVAL;
goto error;
}
if (!fw_priv->allocated_size)
fw_priv->data = out_buf;
fw_priv->size = len;
err = 0;
error:
kvfree(workspace);
if (err && !fw_priv->allocated_size)
vfree(out_buf);
return err;
}
#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
/*
* XZ-compressed firmware support
*/
#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
/* show an error and return the standard error code */
static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
{
if (xz_ret != XZ_STREAM_END) {
dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
}
return 0;
}
/* single-shot decompression onto the pre-allocated buffer */
static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
size_t in_size, const void *in_buffer)
{
struct xz_dec *xz_dec;
struct xz_buf xz_buf;
enum xz_ret xz_ret;
xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
if (!xz_dec)
return -ENOMEM;
xz_buf.in_size = in_size;
xz_buf.in = in_buffer;
xz_buf.in_pos = 0;
xz_buf.out_size = fw_priv->allocated_size;
xz_buf.out = fw_priv->data;
xz_buf.out_pos = 0;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
xz_dec_end(xz_dec);
fw_priv->size = xz_buf.out_pos;
return fw_decompress_xz_error(dev, xz_ret);
}
/* decompression on paged buffer and map it */
static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
size_t in_size, const void *in_buffer)
{
struct xz_dec *xz_dec;
struct xz_buf xz_buf;
enum xz_ret xz_ret;
struct page *page;
int err = 0;
xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
if (!xz_dec)
return -ENOMEM;
xz_buf.in_size = in_size;
xz_buf.in = in_buffer;
xz_buf.in_pos = 0;
fw_priv->is_paged_buf = true;
fw_priv->size = 0;
do {
if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
err = -ENOMEM;
goto out;
}
/* decompress onto the new allocated page */
page = fw_priv->pages[fw_priv->nr_pages - 1];
xz_buf.out = kmap_local_page(page);
xz_buf.out_pos = 0;
xz_buf.out_size = PAGE_SIZE;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
kunmap_local(xz_buf.out);
fw_priv->size += xz_buf.out_pos;
/* partial decompression means either end or error */
if (xz_buf.out_pos != PAGE_SIZE)
break;
} while (xz_ret == XZ_OK);
err = fw_decompress_xz_error(dev, xz_ret);
if (!err)
err = fw_map_paged_buf(fw_priv);
out:
xz_dec_end(xz_dec);
return err;
}
static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
size_t in_size, const void *in_buffer)
{
/* if the buffer is pre-allocated, we can perform in single-shot mode */
if (fw_priv->data)
return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
else
return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
}
#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
/* direct firmware loading support */
static char fw_path_para[256];
static const char * const fw_path[] = {
fw_path_para,
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
"/lib/firmware"
};
/*
* Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
* from kernel command line because firmware_class is generally built in
* kernel instead of module.
*/
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
static int
fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
const char *suffix,
int (*decompress)(struct device *dev,
struct fw_priv *fw_priv,
size_t in_size,
const void *in_buffer))
{
size_t size;
int i, len, maxlen = 0;
int rc = -ENOENT;
char *path, *nt = NULL;
size_t msize = INT_MAX;
void *buffer = NULL;
/* Already populated data member means we're loading into a buffer */
if (!decompress && fw_priv->data) {
buffer = fw_priv->data;
msize = fw_priv->allocated_size;
}
path = __getname();
if (!path)
return -ENOMEM;
wait_for_initramfs();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
size_t file_size = 0;
size_t *file_size_ptr = NULL;
/* skip the unset customized path */
if (!fw_path[i][0])
continue;
/* strip off \n from customized path */
maxlen = strlen(fw_path[i]);
if (i == 0) {
nt = strchr(fw_path[i], '\n');
if (nt)
maxlen = nt - fw_path[i];
}
len = snprintf(path, PATH_MAX, "%.*s/%s%s",
maxlen, fw_path[i],
fw_priv->fw_name, suffix);
if (len >= PATH_MAX) {
rc = -ENAMETOOLONG;
break;
}
fw_priv->size = 0;
/*
* The total file size is only examined when doing a partial
* read; the "full read" case needs to fail if the whole
* firmware was not completely loaded.
*/
if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
file_size_ptr = &file_size;
/* load firmware files from the mount namespace of init */
rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
&buffer, msize,
file_size_ptr,
READING_FIRMWARE);
if (rc < 0) {
if (rc != -ENOENT)
dev_warn(device, "loading %s failed with error %d\n",
path, rc);
else
dev_dbg(device, "loading %s failed for no such file or directory.\n",
path);
continue;
}
size = rc;
rc = 0;
dev_dbg(device, "Loading firmware from %s\n", path);
if (decompress) {
dev_dbg(device, "f/w decompressing %s\n",
fw_priv->fw_name);
rc = decompress(device, fw_priv, size, buffer);
/* discard the superfluous original content */
vfree(buffer);
buffer = NULL;
if (rc) {
fw_free_paged_buf(fw_priv);
continue;
}
} else {
dev_dbg(device, "direct-loading %s\n",
fw_priv->fw_name);
if (!fw_priv->data)
fw_priv->data = buffer;
fw_priv->size = size;
}
fw_state_done(fw_priv);
break;
}
__putname(path);
return rc;
}
/* firmware holds the ownership of pages */
static void firmware_free_data(const struct firmware *fw)
{
/* Loaded directly? */
if (!fw->priv) {
vfree(fw->data);
return;
}
free_fw_priv(fw->priv);
}
/* store the pages buffer info firmware from buf */
static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
{
fw->priv = fw_priv;
fw->size = fw_priv->size;
fw->data = fw_priv->data;
pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
__func__, fw_priv->fw_name, fw_priv, fw_priv->data,
(unsigned int)fw_priv->size);
}
#ifdef CONFIG_FW_CACHE
static void fw_name_devm_release(struct device *dev, void *res)
{
struct fw_name_devm *fwn = res;
if (fwn->magic == (unsigned long)&fw_cache)
pr_debug("%s: fw_name-%s devm-%p released\n",
__func__, fwn->name, res);
kfree_const(fwn->name);
}
static int fw_devm_match(struct device *dev, void *res,
void *match_data)
{
struct fw_name_devm *fwn = res;
return (fwn->magic == (unsigned long)&fw_cache) &&
!strcmp(fwn->name, match_data);
}
static struct fw_name_devm *fw_find_devm_name(struct device *dev,
const char *name)
{
struct fw_name_devm *fwn;
fwn = devres_find(dev, fw_name_devm_release,
fw_devm_match, (void *)name);
return fwn;
}
static bool fw_cache_is_setup(struct device *dev, const char *name)
{
struct fw_name_devm *fwn;
fwn = fw_find_devm_name(dev, name);
if (fwn)
return true;
return false;
}
/* add firmware name into devres list */
static int fw_add_devm_name(struct device *dev, const char *name)
{
struct fw_name_devm *fwn;
if (fw_cache_is_setup(dev, name))
return 0;
fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
GFP_KERNEL);
if (!fwn)
return -ENOMEM;
fwn->name = kstrdup_const(name, GFP_KERNEL);
if (!fwn->name) {
devres_free(fwn);
return -ENOMEM;
}
fwn->magic = (unsigned long)&fw_cache;
devres_add(dev, fwn);
return 0;
}
#else
static bool fw_cache_is_setup(struct device *dev, const char *name)
{
return false;
}
static int fw_add_devm_name(struct device *dev, const char *name)
{
return 0;
}
#endif
int assign_fw(struct firmware *fw, struct device *device)
{
struct fw_priv *fw_priv = fw->priv;
int ret;
mutex_lock(&fw_lock);
if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
/*
* add firmware name into devres list so that we can auto cache
* and uncache firmware for device.
*
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
/* don't cache firmware handled without uevent */
if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
!(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
ret = fw_add_devm_name(device, fw_priv->fw_name);
if (ret) {
mutex_unlock(&fw_lock);
return ret;
}
}
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
fw_priv->fwc->state == FW_LOADER_START_CACHE)
fw_cache_piggyback_on_request(fw_priv);
/* pass the pages buffer to driver at the last minute */
fw_set_page_data(fw_priv, fw);
mutex_unlock(&fw_lock);
return 0;
}
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size,
size_t offset, u32 opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
int ret;
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
return -ENOMEM;
}
if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
offset, opt_flags);
/*
* bind with 'priv' now to avoid warning in failure path
* of requesting firmware.
*/
firmware->priv = fw_priv;
if (ret > 0) {
ret = fw_state_wait(fw_priv);
if (!ret) {
fw_set_page_data(fw_priv, firmware);
return 0; /* assigned */
}
}
if (ret < 0)
return ret;
return 1; /* need to load */
}
/*
* Batched requests need only one wake, we need to do this step last due to the
* fallback mechanism. The buf is protected with kref_get(), and it won't be
* released until the last user calls release_firmware().
*
* Failed batched requests are possible as well, in such cases we just share
* the struct fw_priv and won't release it until all requests are woken
* and have gone through this same path.
*/
static void fw_abort_batch_reqs(struct firmware *fw)
{
struct fw_priv *fw_priv;
/* Loaded directly? */
if (!fw || !fw->priv)
return;
fw_priv = fw->priv;
mutex_lock(&fw_lock);
if (!fw_state_is_aborted(fw_priv))
fw_state_aborted(fw_priv);
mutex_unlock(&fw_lock);
}
#if defined(CONFIG_FW_LOADER_DEBUG)
#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
struct shash_desc *shash;
struct crypto_shash *alg;
u8 *sha256buf;
char *outbuf;
alg = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(alg))
return;
sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
if (!sha256buf || !outbuf || !shash)
goto out_free;
shash->tfm = alg;
if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
goto out_shash;
for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
outbuf[SHA256_BLOCK_SIZE] = 0;
dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
out_shash:
crypto_free_shash(alg);
out_free:
kfree(shash);
kfree(outbuf);
kfree(sha256buf);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
struct device *device)
{}
#endif
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
struct cred *kern_cred = NULL;
const struct cred *old_cred;
bool nondirect = false;
int ret;
if (!firmware_p)
return -EINVAL;
if (!name || name[0] == '\0') {
ret = -EINVAL;
goto out;
}
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
goto out;
/*
* We are about to try to access the firmware file. Because we may have been
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
kern_cred = prepare_kernel_cred(&init_task);
if (!kern_cred) {
ret = -ENOMEM;
goto out;
}
old_cred = override_creds(kern_cred);
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */
if (!(opt_flags & FW_OPT_PARTIAL))
nondirect = true;
#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
fw_decompress_zstd);
#endif
#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
fw_decompress_xz);
#endif
if (ret == -ENOENT && nondirect)
ret = firmware_fallback_platform(fw->priv);
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
dev_warn(device,
"Direct firmware load for %s failed with error %d\n",
name, ret);
if (nondirect)
ret = firmware_fallback_sysfs(fw, name, device,
opt_flags, ret);
} else
ret = assign_fw(fw, device);
revert_creds(old_cred);
put_cred(kern_cred);
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
} else {
fw_log_firmware_info(fw, name, device);
}
*firmware_p = fw;
return ret;
}
/**
* request_firmware() - send firmware request and wait for it
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* @firmware_p will be used to return a firmware image by the name
* of @name for device @device.
*
* Should be called from user context where sleeping is allowed.
*
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
*
* Caller must hold the reference count of @device.
*
* The function can be called safely inside device's suspend and
* resume callback.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int ret;
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
FW_OPT_UEVENT);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware);
/**
* firmware_request_nowarn() - request for an optional fw module
* @firmware: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* This function is similar in behaviour to request_firmware(), except it
* doesn't produce warning messages when the file is not found. The sysfs
* fallback mechanism is enabled if direct filesystem lookup fails. However,
* failures to find the firmware file with it are still suppressed. It is
* therefore up to the driver to check for the return value of this call and to
* decide when to inform the users of errors.
**/
int firmware_request_nowarn(const struct firmware **firmware, const char *name,
struct device *device)
{
int ret;
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(firmware_request_nowarn);
/**
* request_firmware_direct() - load firmware directly without usermode helper
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* This function works pretty much like request_firmware(), but this doesn't
* fall back to usermode helper even if the firmware couldn't be loaded
* directly from fs. Hence it's useful for loading optional firmwares, which
* aren't always present, without extra long timeouts of udev.
**/
int request_firmware_direct(const struct firmware **firmware_p,
const char *name, struct device *device)
{
int ret;
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN |
FW_OPT_NOFALLBACK_SYSFS);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
* firmware_request_platform() - request firmware with platform-fw fallback
* @firmware: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* This function is similar in behaviour to request_firmware, except that if
* direct filesystem lookup fails, it will fallback to looking for a copy of the
* requested firmware embedded in the platform's main (e.g. UEFI) firmware.
**/
int firmware_request_platform(const struct firmware **firmware,
const char *name, struct device *device)
{
int ret;
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(firmware_request_platform);
/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
* @name: name of firmware file
* @device: device for which firmware should be cached for
*
* There are some devices with an optimization that enables the device to not
* require loading firmware on system reboot. This optimization may still
* require the firmware present on resume from suspend. This routine can be
* used to ensure the firmware is present on resume from suspend in these
* situations. This helper is not compatible with drivers which use
* request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
**/
int firmware_request_cache(struct device *device, const char *name)
{
int ret;
mutex_lock(&fw_lock);
ret = fw_add_devm_name(device, name);
mutex_unlock(&fw_lock);
return ret;
}
EXPORT_SYMBOL_GPL(firmware_request_cache);
/**
* request_firmware_into_buf() - load firmware into a previously allocated buffer
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded and DMA region allocated
* @buf: address of buffer to load firmware into
* @size: size of buffer
*
* This function works pretty much like request_firmware(), but it doesn't
* allocate a buffer to hold the firmware data. Instead, the firmware
* is loaded directly into the buffer pointed to by @buf and the @firmware_p
* data member is pointed at @buf.
*
* This function doesn't cache firmware either.
*/
int
request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size)
{
int ret;
if (fw_cache_is_setup(device, name))
return -EOPNOTSUPP;
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, buf, size, 0,
FW_OPT_UEVENT | FW_OPT_NOCACHE);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware_into_buf);
/**
* request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded and DMA region allocated
* @buf: address of buffer to load firmware into
* @size: size of buffer
* @offset: offset into file to read
*
* This function works pretty much like request_firmware_into_buf except
* it allows a partial read of the file.
*/
int
request_partial_firmware_into_buf(const struct firmware **firmware_p,
const char *name, struct device *device,
void *buf, size_t size, size_t offset)
{
int ret;
if (fw_cache_is_setup(device, name))
return -EOPNOTSUPP;
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, buf, size, offset,
FW_OPT_UEVENT | FW_OPT_NOCACHE |
FW_OPT_PARTIAL);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_partial_firmware_into_buf);
/**
* release_firmware() - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
void release_firmware(const struct firmware *fw)
{
if (fw) {
if (!firmware_is_builtin(fw))
firmware_free_data(fw);
kfree(fw);
}
}
EXPORT_SYMBOL(release_firmware);
/* Async support */
struct firmware_work {
struct work_struct work;
struct module *module;
const char *name;
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
u32 opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work;
const struct firmware *fw;
fw_work = container_of(work, struct firmware_work, work);
_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
kfree_const(fw_work->name);
kfree(fw_work);
}
/**
* request_firmware_nowait() - asynchronous version of request_firmware
* @module: module requesting the firmware
* @uevent: sends uevent to copy the firmware image if this flag
* is non-zero else the firmware copy must be done manually.
* @name: name of firmware file
* @device: device for which firmware is being loaded
* @gfp: allocation flags
* @context: will be passed over to @cont, and
* @fw may be %NULL if firmware request fails.
* @cont: function will be called asynchronously when the firmware
* request is over.
*
* Caller must hold the reference count of @device.
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
* increase kernel boot time of built-in device drivers
* requesting firmware in their ->probe() methods, if
* @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
int
request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
struct firmware_work *fw_work;
fw_work = kzalloc(sizeof(struct firmware_work), gfp);
if (!fw_work)
return -ENOMEM;
fw_work->module = module;
fw_work->name = kstrdup_const(name, gfp);
if (!fw_work->name) {
kfree(fw_work);
return -ENOMEM;
}
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
fw_work->opt_flags = FW_OPT_NOWAIT |
(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
if (!uevent && fw_cache_is_setup(device, name)) {
kfree_const(fw_work->name);
kfree(fw_work);
return -EOPNOTSUPP;
}
if (!try_module_get(module)) {
kfree_const(fw_work->name);
kfree(fw_work);
return -EFAULT;
}
get_device(fw_work->device);
INIT_WORK(&fw_work->work, request_firmware_work_func);
schedule_work(&fw_work->work);
return 0;
}
EXPORT_SYMBOL(request_firmware_nowait);
#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
* cache_firmware() - cache one firmware image in kernel memory space
* @fw_name: the firmware image name
*
* Cache firmware in kernel memory so that drivers can use it when
* system isn't ready for them to request firmware image from userspace.
* Once it returns successfully, driver can use request_firmware or its
* nowait version to get the cached firmware without any interacting
* with userspace
*
* Return 0 if the firmware image has been cached successfully
* Return !0 otherwise
*
*/
static int cache_firmware(const char *fw_name)
{
int ret;
const struct firmware *fw;
pr_debug("%s: %s\n", __func__, fw_name);
ret = request_firmware(&fw, fw_name, NULL);
if (!ret)
kfree(fw);
pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
return ret;
}
static struct fw_priv *lookup_fw_priv(const char *fw_name)
{
struct fw_priv *tmp;
struct firmware_cache *fwc = &fw_cache;
spin_lock(&fwc->lock);
tmp = __lookup_fw_priv(fw_name);
spin_unlock(&fwc->lock);
return tmp;
}
/**
* uncache_firmware() - remove one cached firmware image
* @fw_name: the firmware image name
*
* Uncache one firmware image which has been cached successfully
* before.
*
* Return 0 if the firmware cache has been removed successfully
* Return !0 otherwise
*
*/
static int uncache_firmware(const char *fw_name)
{
struct fw_priv *fw_priv;
struct firmware fw;
pr_debug("%s: %s\n", __func__, fw_name);
if (firmware_request_builtin(&fw, fw_name))
return 0;
fw_priv = lookup_fw_priv(fw_name);
if (fw_priv) {
free_fw_priv(fw_priv);
return 0;
}
return -EINVAL;
}
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
if (!fce)
goto exit;
fce->name = kstrdup_const(name, GFP_ATOMIC);
if (!fce->name) {
kfree(fce);
fce = NULL;
goto exit;
}
exit:
return fce;
}
static int __fw_entry_found(const char *name)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
list_for_each_entry(fce, &fwc->fw_names, list) {
if (!strcmp(fce->name, name))
return 1;
}
return 0;
}
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
{
const char *name = fw_priv->fw_name;
struct firmware_cache *fwc = fw_priv->fwc;
struct fw_cache_entry *fce;
spin_lock(&fwc->name_lock);
if (__fw_entry_found(name))
goto found;
fce = alloc_fw_cache_entry(name);
if (fce) {
list_add(&fce->list, &fwc->fw_names);
kref_get(&fw_priv->ref);
pr_debug("%s: fw: %s\n", __func__, name);
}
found:
spin_unlock(&fwc->name_lock);
}
static void free_fw_cache_entry(struct fw_cache_entry *fce)
{
kfree_const(fce->name);
kfree(fce);
}
static void __async_dev_cache_fw_image(void *fw_entry,
async_cookie_t cookie)
{
struct fw_cache_entry *fce = fw_entry;
struct firmware_cache *fwc = &fw_cache;
int ret;
ret = cache_firmware(fce->name);
if (ret) {
spin_lock(&fwc->name_lock);
list_del(&fce->list);
spin_unlock(&fwc->name_lock);
free_fw_cache_entry(fce);
}
}
/* called with dev->devres_lock held */
static void dev_create_fw_entry(struct device *dev, void *res,
void *data)
{
struct fw_name_devm *fwn = res;
const char *fw_name = fwn->name;
struct list_head *head = data;
struct fw_cache_entry *fce;
fce = alloc_fw_cache_entry(fw_name);
if (fce)
list_add(&fce->list, head);
}
static int devm_name_match(struct device *dev, void *res,
void *match_data)
{
struct fw_name_devm *fwn = res;
return (fwn->magic == (unsigned long)match_data);
}
static void dev_cache_fw_image(struct device *dev, void *data)
{
LIST_HEAD(todo);
struct fw_cache_entry *fce;
struct fw_cache_entry *fce_next;
struct firmware_cache *fwc = &fw_cache;
devres_for_each_res(dev, fw_name_devm_release,
devm_name_match, &fw_cache,
dev_create_fw_entry, &todo);
list_for_each_entry_safe(fce, fce_next, &todo, list) {
list_del(&fce->list);
spin_lock(&fwc->name_lock);
/* only one cache entry for one firmware */
if (!__fw_entry_found(fce->name)) {
list_add(&fce->list, &fwc->fw_names);
} else {
free_fw_cache_entry(fce);
fce = NULL;
}
spin_unlock(&fwc->name_lock);
if (fce)
async_schedule_domain(__async_dev_cache_fw_image,
(void *)fce,
&fw_cache_domain);
}
}
static void __device_uncache_fw_images(void)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
spin_lock(&fwc->name_lock);
while (!list_empty(&fwc->fw_names)) {
fce = list_entry(fwc->fw_names.next,
struct fw_cache_entry, list);
list_del(&fce->list);
spin_unlock(&fwc->name_lock);
uncache_firmware(fce->name);
free_fw_cache_entry(fce);
spin_lock(&fwc->name_lock);
}
spin_unlock(&fwc->name_lock);
}
/**
* device_cache_fw_images() - cache devices' firmware
*
* If one device called request_firmware or its nowait version
* successfully before, the firmware names are recored into the
* device's devres link list, so device_cache_fw_images can call
* cache_firmware() to cache these firmwares for the device,
* then the device driver can load its firmwares easily at
* time when system is not ready to complete loading firmware.
*/
static void device_cache_fw_images(void)
{
struct firmware_cache *fwc = &fw_cache;
DEFINE_WAIT(wait);
pr_debug("%s\n", __func__);
/* cancel uncache work */
cancel_delayed_work_sync(&fwc->work);
fw_fallback_set_cache_timeout();
mutex_lock(&fw_lock);
fwc->state = FW_LOADER_START_CACHE;
dpm_for_each_dev(NULL, dev_cache_fw_image);
mutex_unlock(&fw_lock);
/* wait for completion of caching firmware for all devices */
async_synchronize_full_domain(&fw_cache_domain);
fw_fallback_set_default_timeout();
}
/**
* device_uncache_fw_images() - uncache devices' firmware
*
* uncache all firmwares which have been cached successfully
* by device_uncache_fw_images earlier
*/
static void device_uncache_fw_images(void)
{
pr_debug("%s\n", __func__);
__device_uncache_fw_images();
}
static void device_uncache_fw_images_work(struct work_struct *work)
{
device_uncache_fw_images();
}
/**
* device_uncache_fw_images_delay() - uncache devices firmwares
* @delay: number of milliseconds to delay uncache device firmwares
*
* uncache all devices's firmwares which has been cached successfully
* by device_cache_fw_images after @delay milliseconds.
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
/*
* kill pending fallback requests with a custom fallback
* to avoid stalling suspend.
*/
kill_pending_fw_fallback_reqs(true);
device_cache_fw_images();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
/*
* In case that system sleep failed and syscore_suspend is
* not called.
*/
mutex_lock(&fw_lock);
fw_cache.state = FW_LOADER_NO_CACHE;
mutex_unlock(&fw_lock);
device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
break;
}
return 0;
}
/* stop caching firmware once syscore_suspend is reached */
static int fw_suspend(void)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
static struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
static int __init register_fw_pm_ops(void)
{
int ret;
spin_lock_init(&fw_cache.name_lock);
INIT_LIST_HEAD(&fw_cache.fw_names);
INIT_DELAYED_WORK(&fw_cache.work,
device_uncache_fw_images_work);
fw_cache.pm_notify.notifier_call = fw_pm_notify;
ret = register_pm_notifier(&fw_cache.pm_notify);
if (ret)
return ret;
register_syscore_ops(&fw_syscore_ops);
return ret;
}
static inline void unregister_fw_pm_ops(void)
{
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
{
}
static inline int register_fw_pm_ops(void)
{
return 0;
}
static inline void unregister_fw_pm_ops(void)
{
}
#endif
static void __init fw_cache_init(void)
{
spin_lock_init(&fw_cache.lock);
INIT_LIST_HEAD(&fw_cache.head);
fw_cache.state = FW_LOADER_NO_CACHE;
}
static int fw_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
/*
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
*/
kill_pending_fw_fallback_reqs(false);
return NOTIFY_DONE;
}
static struct notifier_block fw_shutdown_nb = {
.notifier_call = fw_shutdown_notify,
};
static int __init firmware_class_init(void)
{
int ret;
/* No need to unfold these on exit */
fw_cache_init();
ret = register_fw_pm_ops();
if (ret)
return ret;
ret = register_reboot_notifier(&fw_shutdown_nb);
if (ret)
goto out;
return register_sysfs_loader();
out:
unregister_fw_pm_ops();
return ret;
}
static void __exit firmware_class_exit(void)
{
unregister_fw_pm_ops();
unregister_reboot_notifier(&fw_shutdown_nb);
unregister_sysfs_loader();
}
fs_initcall(firmware_class_init);
module_exit(firmware_class_exit);
| linux-master | drivers/base/firmware_loader/main.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/security.h>
#include <linux/umh.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include "fallback.h"
#include "firmware.h"
/*
* firmware fallback mechanism
*/
/*
* use small loading timeout for caching devices' firmware because all these
* firmware images have been loaded successfully at lease once, also system is
* ready for completing firmware loading now. The maximum size of firmware in
* current distributions is about 2M bytes, so 10 secs should be enough.
*/
void fw_fallback_set_cache_timeout(void)
{
fw_fallback_config.old_timeout = __firmware_loading_timeout();
__fw_fallback_set_timeout(10);
}
/* Restores the timeout to the value last configured during normal operation */
void fw_fallback_set_default_timeout(void)
{
__fw_fallback_set_timeout(fw_fallback_config.old_timeout);
}
static long firmware_loading_timeout(void)
{
return __firmware_loading_timeout() > 0 ?
__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
}
static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
{
return __fw_state_wait_common(fw_priv, timeout);
}
static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom)
{
struct fw_priv *fw_priv;
struct fw_priv *next;
mutex_lock(&fw_lock);
list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
pending_list) {
if (!fw_priv->need_uevent || !only_kill_custom)
__fw_load_abort(fw_priv);
}
mutex_unlock(&fw_lock);
}
/**
* fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
* @fw_sysfs: firmware sysfs information for the firmware to load
* @timeout: timeout to wait for the load
*
* In charge of constructing a sysfs fallback interface for firmware loading.
**/
static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_sysfs->dev;
struct fw_priv *fw_priv = fw_sysfs->fw_priv;
/* fall back on userspace loading */
if (!fw_priv->data)
fw_priv->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
retval = device_add(f_dev);
if (retval) {
dev_err(f_dev, "%s: device_register failed\n", __func__);
goto err_put_dev;
}
mutex_lock(&fw_lock);
if (fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
retval = -EINTR;
goto out;
}
list_add(&fw_priv->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
if (fw_priv->opt_flags & FW_OPT_UEVENT) {
fw_priv->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
} else {
timeout = MAX_JIFFY_OFFSET;
}
retval = fw_sysfs_wait_timeout(fw_priv, timeout);
if (retval < 0 && retval != -ENOENT) {
mutex_lock(&fw_lock);
fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);
}
if (fw_state_is_aborted(fw_priv)) {
if (retval == -ERESTARTSYS)
retval = -EINTR;
} else if (fw_priv->is_paged_buf && !fw_priv->data)
retval = -ENOMEM;
out:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
return retval;
}
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
long timeout;
int ret;
timeout = firmware_loading_timeout();
if (opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
dev_dbg(device, "firmware: %s loading timed out\n",
name);
return -EBUSY;
}
} else {
ret = usermodehelper_read_trylock();
if (WARN_ON(ret)) {
dev_err(device, "firmware: %s will not be loaded\n",
name);
return ret;
}
}
fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
if (IS_ERR(fw_sysfs)) {
ret = PTR_ERR(fw_sysfs);
goto out_unlock;
}
fw_sysfs->fw_priv = firmware->priv;
ret = fw_load_sysfs_fallback(fw_sysfs, timeout);
if (!ret)
ret = assign_fw(firmware, device);
out_unlock:
usermodehelper_read_unlock();
return ret;
}
static bool fw_force_sysfs_fallback(u32 opt_flags)
{
if (fw_fallback_config.force_sysfs_fallback)
return true;
if (!(opt_flags & FW_OPT_USERHELPER))
return false;
return true;
}
static bool fw_run_sysfs_fallback(u32 opt_flags)
{
int ret;
if (fw_fallback_config.ignore_sysfs_fallback) {
pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
return false;
}
if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS))
return false;
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
ret = security_kernel_load_data(LOADING_FIRMWARE, true);
if (ret < 0)
return false;
return fw_force_sysfs_fallback(opt_flags);
}
/**
* firmware_fallback_sysfs() - use the fallback mechanism to find firmware
* @fw: pointer to firmware image
* @name: name of firmware file to look for
* @device: device for which firmware is being loaded
* @opt_flags: options to control firmware loading behaviour, as defined by
* &enum fw_opt
* @ret: return value from direct lookup which triggered the fallback mechanism
*
* This function is called if direct lookup for the firmware failed, it enables
* a fallback mechanism through userspace by exposing a sysfs loading
* interface. Userspace is in charge of loading the firmware through the sysfs
* loading interface. This sysfs fallback mechanism may be disabled completely
* on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
* If this is false we check if the internal API caller set the
* @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
* mechanism. A system may want to enforce the sysfs fallback mechanism at all
* times, it can do this by setting ignore_sysfs_fallback to false and
* force_sysfs_fallback to true.
* Enabling force_sysfs_fallback is functionally equivalent to build a kernel
* with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
**/
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
u32 opt_flags,
int ret)
{
if (!fw_run_sysfs_fallback(opt_flags))
return ret;
if (!(opt_flags & FW_OPT_NO_WARN))
dev_warn(device, "Falling back to sysfs fallback for: %s\n",
name);
else
dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
name);
return fw_load_from_user_helper(fw, name, device, opt_flags);
}
| linux-master | drivers/base/firmware_loader/fallback.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "sysfs_upload.h"
/*
* Support for user-space to initiate a firmware upload to a device.
*/
static const char * const fw_upload_prog_str[] = {
[FW_UPLOAD_PROG_IDLE] = "idle",
[FW_UPLOAD_PROG_RECEIVING] = "receiving",
[FW_UPLOAD_PROG_PREPARING] = "preparing",
[FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
[FW_UPLOAD_PROG_PROGRAMMING] = "programming"
};
static const char * const fw_upload_err_str[] = {
[FW_UPLOAD_ERR_NONE] = "none",
[FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
[FW_UPLOAD_ERR_TIMEOUT] = "timeout",
[FW_UPLOAD_ERR_CANCELED] = "user-abort",
[FW_UPLOAD_ERR_BUSY] = "device-busy",
[FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
[FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
[FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
};
static const char *fw_upload_progress(struct device *dev,
enum fw_upload_prog prog)
{
const char *status = "unknown-status";
if (prog < FW_UPLOAD_PROG_MAX)
status = fw_upload_prog_str[prog];
else
dev_err(dev, "Invalid status during secure update: %d\n", prog);
return status;
}
static const char *fw_upload_error(struct device *dev,
enum fw_upload_err err_code)
{
const char *error = "unknown-error";
if (err_code < FW_UPLOAD_ERR_MAX)
error = fw_upload_err_str[err_code];
else
dev_err(dev, "Invalid error code during secure update: %d\n",
err_code);
return error;
}
static ssize_t
status_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
}
DEVICE_ATTR_RO(status);
static ssize_t
error_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
int ret;
mutex_lock(&fwlp->lock);
if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
ret = -EBUSY;
else if (!fwlp->err_code)
ret = 0;
else
ret = sysfs_emit(buf, "%s:%s\n",
fw_upload_progress(dev, fwlp->err_progress),
fw_upload_error(dev, fwlp->err_code));
mutex_unlock(&fwlp->lock);
return ret;
}
DEVICE_ATTR_RO(error);
static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
int ret = count;
bool cancel;
if (kstrtobool(buf, &cancel) || !cancel)
return -EINVAL;
mutex_lock(&fwlp->lock);
if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
ret = -ENODEV;
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
return ret;
}
DEVICE_ATTR_WO(cancel);
static ssize_t remaining_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
}
DEVICE_ATTR_RO(remaining_size);
umode_t
fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
static struct fw_sysfs *fw_sysfs;
fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
return attr->mode;
return 0;
}
static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
enum fw_upload_prog new_progress)
{
mutex_lock(&fwlp->lock);
fwlp->progress = new_progress;
mutex_unlock(&fwlp->lock);
}
static void fw_upload_set_error(struct fw_upload_priv *fwlp,
enum fw_upload_err err_code)
{
mutex_lock(&fwlp->lock);
fwlp->err_progress = fwlp->progress;
fwlp->err_code = err_code;
mutex_unlock(&fwlp->lock);
}
static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
{
mutex_lock(&fwlp->lock);
fwlp->progress = FW_UPLOAD_PROG_IDLE;
mutex_unlock(&fwlp->lock);
}
static void fw_upload_main(struct work_struct *work)
{
struct fw_upload_priv *fwlp;
struct fw_sysfs *fw_sysfs;
u32 written = 0, offset = 0;
enum fw_upload_err ret;
struct device *fw_dev;
struct fw_upload *fwl;
fwlp = container_of(work, struct fw_upload_priv, work);
fwl = fwlp->fw_upload;
fw_sysfs = (struct fw_sysfs *)fwl->priv;
fw_dev = &fw_sysfs->dev;
fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
if (ret != FW_UPLOAD_ERR_NONE) {
fw_upload_set_error(fwlp, ret);
goto putdev_exit;
}
fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
while (fwlp->remaining_size) {
ret = fwlp->ops->write(fwl, fwlp->data, offset,
fwlp->remaining_size, &written);
if (ret != FW_UPLOAD_ERR_NONE || !written) {
if (ret == FW_UPLOAD_ERR_NONE) {
dev_warn(fw_dev, "write-op wrote zero data\n");
ret = FW_UPLOAD_ERR_RW_ERROR;
}
fw_upload_set_error(fwlp, ret);
goto done;
}
fwlp->remaining_size -= written;
offset += written;
}
fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
ret = fwlp->ops->poll_complete(fwl);
if (ret != FW_UPLOAD_ERR_NONE)
fw_upload_set_error(fwlp, ret);
done:
if (fwlp->ops->cleanup)
fwlp->ops->cleanup(fwl);
putdev_exit:
put_device(fw_dev->parent);
/*
* Note: fwlp->remaining_size is left unmodified here to provide
* additional information on errors. It will be reinitialized when
* the next firmeware upload begins.
*/
mutex_lock(&fw_lock);
fw_free_paged_buf(fw_sysfs->fw_priv);
fw_state_init(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
fwlp->data = NULL;
fw_upload_prog_complete(fwlp);
}
/*
* Start a worker thread to upload data to the parent driver.
* Must be called with fw_lock held.
*/
int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
struct fw_priv *fw_priv = fw_sysfs->fw_priv;
struct device *fw_dev = &fw_sysfs->dev;
struct fw_upload_priv *fwlp;
if (!fw_sysfs->fw_upload_priv)
return 0;
if (!fw_priv->size) {
fw_free_paged_buf(fw_priv);
fw_state_init(fw_sysfs->fw_priv);
return 0;
}
fwlp = fw_sysfs->fw_upload_priv;
mutex_lock(&fwlp->lock);
/* Do not interfere with an on-going fw_upload */
if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
mutex_unlock(&fwlp->lock);
return -EBUSY;
}
get_device(fw_dev->parent); /* released in fw_upload_main */
fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
fwlp->err_code = 0;
fwlp->remaining_size = fw_priv->size;
fwlp->data = fw_priv->data;
pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
__func__, fw_priv->fw_name,
fw_priv, fw_priv->data,
(unsigned int)fw_priv->size);
queue_work(system_long_wq, &fwlp->work);
mutex_unlock(&fwlp->lock);
return 0;
}
void fw_upload_free(struct fw_sysfs *fw_sysfs)
{
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
free_fw_priv(fw_sysfs->fw_priv);
kfree(fw_upload_priv->fw_upload);
kfree(fw_upload_priv);
}
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
* @parent: parent device instantiating firmware upload
* @name: firmware name to be associated with this device
* @ops: pointer to structure of firmware upload ops
* @dd_handle: pointer to parent driver private data
*
* @name must be unique among all users of firmware upload. The firmware
* sysfs files for this device will be found at /sys/class/firmware/@name.
*
* Return: struct fw_upload pointer or ERR_PTR()
*
**/
struct fw_upload *
firmware_upload_register(struct module *module, struct device *parent,
const char *name, const struct fw_upload_ops *ops,
void *dd_handle)
{
u32 opt_flags = FW_OPT_NOCACHE;
struct fw_upload *fw_upload;
struct fw_upload_priv *fw_upload_priv;
struct fw_sysfs *fw_sysfs;
struct fw_priv *fw_priv;
struct device *fw_dev;
int ret;
if (!name || name[0] == '\0')
return ERR_PTR(-EINVAL);
if (!ops || !ops->cancel || !ops->prepare ||
!ops->write || !ops->poll_complete) {
dev_err(parent, "Attempt to register without all required ops\n");
return ERR_PTR(-EINVAL);
}
if (!try_module_get(module))
return ERR_PTR(-EFAULT);
fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
if (!fw_upload) {
ret = -ENOMEM;
goto exit_module_put;
}
fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
if (!fw_upload_priv) {
ret = -ENOMEM;
goto free_fw_upload;
}
fw_upload_priv->fw_upload = fw_upload;
fw_upload_priv->ops = ops;
mutex_init(&fw_upload_priv->lock);
fw_upload_priv->module = module;
fw_upload_priv->name = name;
fw_upload_priv->err_code = 0;
fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
INIT_WORK(&fw_upload_priv->work, fw_upload_main);
fw_upload->dd_handle = dd_handle;
fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
if (IS_ERR(fw_sysfs)) {
ret = PTR_ERR(fw_sysfs);
goto free_fw_upload_priv;
}
fw_upload->priv = fw_sysfs;
fw_sysfs->fw_upload_priv = fw_upload_priv;
fw_dev = &fw_sysfs->dev;
ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0,
FW_OPT_NOCACHE);
if (ret != 0) {
if (ret > 0)
ret = -EINVAL;
goto free_fw_sysfs;
}
fw_priv->is_paged_buf = true;
fw_sysfs->fw_priv = fw_priv;
ret = device_add(fw_dev);
if (ret) {
dev_err(fw_dev, "%s: device_register failed\n", __func__);
put_device(fw_dev);
goto exit_module_put;
}
return fw_upload;
free_fw_sysfs:
kfree(fw_sysfs);
free_fw_upload_priv:
kfree(fw_upload_priv);
free_fw_upload:
kfree(fw_upload);
exit_module_put:
module_put(module);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(firmware_upload_register);
/**
* firmware_upload_unregister() - Unregister firmware upload interface
* @fw_upload: pointer to struct fw_upload
**/
void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
mutex_unlock(&fw_upload_priv->lock);
goto unregister;
}
fw_upload_priv->ops->cancel(fw_upload);
mutex_unlock(&fw_upload_priv->lock);
/* Ensure lower-level device-driver is finished */
flush_work(&fw_upload_priv->work);
unregister:
device_unregister(&fw_sysfs->dev);
module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);
| linux-master | drivers/base/firmware_loader/sysfs_upload.c |
// SPDX-License-Identifier: GPL-2.0
/* Builtin firmware support */
#include <linux/firmware.h>
#include "../firmware.h"
/* Only if FW_LOADER=y */
#ifdef CONFIG_FW_LOADER
struct builtin_fw {
char *name;
void *data;
unsigned long size;
};
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
static bool fw_copy_to_prealloc_buf(struct firmware *fw,
void *buf, size_t size)
{
if (!buf)
return true;
if (size < fw->size)
return false;
memcpy(buf, fw->data, fw->size);
return true;
}
/**
* firmware_request_builtin() - load builtin firmware
* @fw: pointer to firmware struct
* @name: name of firmware file
*
* Some use cases in the kernel have a requirement so that no memory allocator
* is involved as these calls take place early in boot process. An example is
* the x86 CPU microcode loader. In these cases all the caller wants is to see
* if the firmware was built-in and if so use it right away. This can be used
* for such cases.
*
* This looks for the firmware in the built-in kernel. Only if the kernel was
* built-in with the firmware you are looking for will this return successfully.
*
* Callers of this API do not need to use release_firmware() as the pointer to
* the firmware is expected to be provided locally on the stack of the caller.
**/
bool firmware_request_builtin(struct firmware *fw, const char *name)
{
struct builtin_fw *b_fw;
if (!fw)
return false;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
return true;
}
}
return false;
}
EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
/**
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
* @fw: pointer to firmware struct
* @name: name of firmware file
* @buf: If set this lets you use a pre-allocated buffer so that the built-in
* firmware into is copied into. This field can be NULL. It is used by
* callers such as request_firmware_into_buf() and
* request_partial_firmware_into_buf()
* @size: if buf was provided, the max size of the allocated buffer available.
* If the built-in firmware does not fit into the pre-allocated @buf this
* call will fail.
*
* This looks for the firmware in the built-in kernel. Only if the kernel was
* built-in with the firmware you are looking for will this call possibly
* succeed. If you passed a @buf the firmware will be copied into it *iff* the
* built-in firmware fits into the pre-allocated buffer size specified in
* @size.
*
* This caller is to be used internally by the firmware_loader only.
**/
bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
void *buf, size_t size)
{
if (!firmware_request_builtin(fw, name))
return false;
return fw_copy_to_prealloc_buf(fw, buf, size);
}
bool firmware_is_builtin(const struct firmware *fw)
{
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
if (fw->data == b_fw->data)
return true;
return false;
}
#endif
| linux-master | drivers/base/firmware_loader/builtin/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/export.h>
#include "w1_internal.h"
DEFINE_SPINLOCK(w1_flock);
static LIST_HEAD(w1_families);
/**
* w1_register_family() - register a device family driver
* @newf: family to register
*/
int w1_register_family(struct w1_family *newf)
{
struct list_head *ent, *n;
struct w1_family *f;
int ret = 0;
spin_lock(&w1_flock);
list_for_each_safe(ent, n, &w1_families) {
f = list_entry(ent, struct w1_family, family_entry);
if (f->fid == newf->fid) {
ret = -EEXIST;
break;
}
}
if (!ret) {
atomic_set(&newf->refcnt, 0);
list_add_tail(&newf->family_entry, &w1_families);
}
spin_unlock(&w1_flock);
/* check default devices against the new set of drivers */
w1_reconnect_slaves(newf, 1);
return ret;
}
EXPORT_SYMBOL(w1_register_family);
/**
* w1_unregister_family() - unregister a device family driver
* @fent: family to unregister
*/
void w1_unregister_family(struct w1_family *fent)
{
struct list_head *ent, *n;
struct w1_family *f;
spin_lock(&w1_flock);
list_for_each_safe(ent, n, &w1_families) {
f = list_entry(ent, struct w1_family, family_entry);
if (f->fid == fent->fid) {
list_del(&fent->family_entry);
break;
}
}
spin_unlock(&w1_flock);
/* deatch devices using this family code */
w1_reconnect_slaves(fent, 0);
while (atomic_read(&fent->refcnt)) {
pr_info("Waiting for family %u to become free: refcnt=%d.\n",
fent->fid, atomic_read(&fent->refcnt));
if (msleep_interruptible(1000))
flush_signals(current);
}
}
EXPORT_SYMBOL(w1_unregister_family);
/*
* Should be called under w1_flock held.
*/
struct w1_family * w1_family_registered(u8 fid)
{
struct list_head *ent, *n;
struct w1_family *f = NULL;
int ret = 0;
list_for_each_safe(ent, n, &w1_families) {
f = list_entry(ent, struct w1_family, family_entry);
if (f->fid == fid) {
ret = 1;
break;
}
}
return (ret) ? f : NULL;
}
static void __w1_family_put(struct w1_family *f)
{
atomic_dec(&f->refcnt);
}
void w1_family_put(struct w1_family *f)
{
spin_lock(&w1_flock);
__w1_family_put(f);
spin_unlock(&w1_flock);
}
#if 0
void w1_family_get(struct w1_family *f)
{
spin_lock(&w1_flock);
__w1_family_get(f);
spin_unlock(&w1_flock);
}
#endif /* 0 */
void __w1_family_get(struct w1_family *f)
{
smp_mb__before_atomic();
atomic_inc(&f->refcnt);
smp_mb__after_atomic();
}
| linux-master | drivers/w1/w1_family.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include "w1_internal.h"
#include "w1_netlink.h"
static int w1_search_count = -1; /* Default is continual scan */
module_param_named(search_count, w1_search_count, int, 0);
static int w1_enable_pullup = 1;
module_param_named(enable_pullup, w1_enable_pullup, int, 0);
static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
struct device_driver *driver,
struct device *device)
{
struct w1_master *dev;
int err;
/*
* We are in process context(kernel thread), so can sleep.
*/
dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
pr_err("Failed to allocate %zd bytes for new w1 device.\n",
sizeof(struct w1_master));
return NULL;
}
dev->bus_master = (struct w1_bus_master *)(dev + 1);
dev->owner = THIS_MODULE;
dev->max_slave_count = slave_count;
dev->slave_count = 0;
dev->attempts = 0;
dev->initialized = 0;
dev->id = id;
dev->slave_ttl = slave_ttl;
dev->search_count = w1_search_count;
dev->enable_pullup = w1_enable_pullup;
/* For __w1_remove_master_device to decrement
*/
atomic_set(&dev->refcnt, 1);
INIT_LIST_HEAD(&dev->slist);
INIT_LIST_HEAD(&dev->async_list);
mutex_init(&dev->mutex);
mutex_init(&dev->bus_mutex);
mutex_init(&dev->list_mutex);
memcpy(&dev->dev, device, sizeof(struct device));
dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
snprintf(dev->name, sizeof(dev->name), "w1_bus_master%u", dev->id);
dev->dev.init_name = dev->name;
dev->driver = driver;
dev->seq = 1;
err = device_register(&dev->dev);
if (err) {
pr_err("Failed to register master device. err=%d\n", err);
put_device(&dev->dev);
dev = NULL;
}
return dev;
}
static void w1_free_dev(struct w1_master *dev)
{
device_unregister(&dev->dev);
}
/**
* w1_add_master_device() - registers a new master device
* @master: master bus device to register
*/
int w1_add_master_device(struct w1_bus_master *master)
{
struct w1_master *dev, *entry;
int retval = 0;
struct w1_netlink_msg msg;
int id, found;
/* validate minimum functionality */
if (!(master->touch_bit && master->reset_bus) &&
!(master->write_bit && master->read_bit) &&
!(master->write_byte && master->read_byte && master->reset_bus)) {
pr_err("w1_add_master_device: invalid function set\n");
return(-EINVAL);
}
/* Lock until the device is added (or not) to w1_masters. */
mutex_lock(&w1_mlock);
/* Search for the first available id (starting at 1). */
id = 0;
do {
++id;
found = 0;
list_for_each_entry(entry, &w1_masters, w1_master_entry) {
if (entry->id == id) {
found = 1;
break;
}
}
} while (found);
dev = w1_alloc_dev(id, w1_max_slave_count, w1_max_slave_ttl,
&w1_master_driver, &w1_master_device);
if (!dev) {
mutex_unlock(&w1_mlock);
return -ENOMEM;
}
retval = w1_create_master_attributes(dev);
if (retval) {
mutex_unlock(&w1_mlock);
goto err_out_free_dev;
}
memcpy(dev->bus_master, master, sizeof(struct w1_bus_master));
dev->initialized = 1;
dev->thread = kthread_run(&w1_process, dev, "%s", dev->name);
if (IS_ERR(dev->thread)) {
retval = PTR_ERR(dev->thread);
dev_err(&dev->dev,
"Failed to create new kernel thread. err=%d\n",
retval);
mutex_unlock(&w1_mlock);
goto err_out_rm_attr;
}
list_add(&dev->w1_master_entry, &w1_masters);
mutex_unlock(&w1_mlock);
memset(&msg, 0, sizeof(msg));
msg.id.mst.id = dev->id;
msg.type = W1_MASTER_ADD;
w1_netlink_send(dev, &msg);
return 0;
#if 0 /* Thread cleanup code, not required currently. */
err_out_kill_thread:
set_bit(W1_ABORT_SEARCH, &dev->flags);
kthread_stop(dev->thread);
#endif
err_out_rm_attr:
w1_destroy_master_attributes(dev);
err_out_free_dev:
w1_free_dev(dev);
return retval;
}
EXPORT_SYMBOL(w1_add_master_device);
void __w1_remove_master_device(struct w1_master *dev)
{
struct w1_netlink_msg msg;
struct w1_slave *sl, *sln;
mutex_lock(&w1_mlock);
list_del(&dev->w1_master_entry);
mutex_unlock(&w1_mlock);
set_bit(W1_ABORT_SEARCH, &dev->flags);
kthread_stop(dev->thread);
mutex_lock(&dev->mutex);
mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
mutex_lock(&dev->list_mutex);
}
w1_destroy_master_attributes(dev);
mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
atomic_dec(&dev->refcnt);
while (atomic_read(&dev->refcnt)) {
dev_info(&dev->dev, "Waiting for %s to become free: refcnt=%d.\n",
dev->name, atomic_read(&dev->refcnt));
if (msleep_interruptible(1000))
flush_signals(current);
mutex_lock(&dev->list_mutex);
w1_process_callbacks(dev);
mutex_unlock(&dev->list_mutex);
}
mutex_lock(&dev->list_mutex);
w1_process_callbacks(dev);
mutex_unlock(&dev->list_mutex);
memset(&msg, 0, sizeof(msg));
msg.id.mst.id = dev->id;
msg.type = W1_MASTER_REMOVE;
w1_netlink_send(dev, &msg);
w1_free_dev(dev);
}
/**
* w1_remove_master_device() - unregister a master device
* @bm: master bus device to remove
*/
void w1_remove_master_device(struct w1_bus_master *bm)
{
struct w1_master *dev, *found = NULL;
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
if (!dev->initialized)
continue;
if (dev->bus_master->data == bm->data) {
found = dev;
break;
}
}
if (!found) {
pr_err("Device doesn't exist.\n");
return;
}
__w1_remove_master_device(found);
}
EXPORT_SYMBOL(w1_remove_master_device);
| linux-master | drivers/w1/w1_int.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include "w1_internal.h"
static int w1_delay_parm = 1;
module_param_named(delay_coef, w1_delay_parm, int, 0);
static int w1_disable_irqs = 0;
module_param_named(disable_irqs, w1_disable_irqs, int, 0);
static u8 w1_crc8_table[] = {
0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65,
157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220,
35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98,
190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255,
70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7,
219, 133, 103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154,
101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36,
248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185,
140, 210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113, 147, 205,
17, 79, 173, 243, 112, 46, 204, 146, 211, 141, 111, 49, 178, 236, 14, 80,
175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82, 176, 238,
50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115,
202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139,
87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22,
233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168,
116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53
};
static void w1_delay(unsigned long tm)
{
udelay(tm * w1_delay_parm);
}
static void w1_write_bit(struct w1_master *dev, int bit);
static u8 w1_read_bit(struct w1_master *dev);
/**
* w1_touch_bit() - Generates a write-0 or write-1 cycle and samples the level.
* @dev: the master device
* @bit: 0 - write a 0, 1 - write a 0 read the level
*/
u8 w1_touch_bit(struct w1_master *dev, int bit)
{
if (dev->bus_master->touch_bit)
return dev->bus_master->touch_bit(dev->bus_master->data, bit);
else if (bit)
return w1_read_bit(dev);
else {
w1_write_bit(dev, 0);
return 0;
}
}
EXPORT_SYMBOL_GPL(w1_touch_bit);
/**
* w1_write_bit() - Generates a write-0 or write-1 cycle.
* @dev: the master device
* @bit: bit to write
*
* Only call if dev->bus_master->touch_bit is NULL
*/
static void w1_write_bit(struct w1_master *dev, int bit)
{
unsigned long flags = 0;
if(w1_disable_irqs) local_irq_save(flags);
if (bit) {
dev->bus_master->write_bit(dev->bus_master->data, 0);
w1_delay(6);
dev->bus_master->write_bit(dev->bus_master->data, 1);
w1_delay(64);
} else {
dev->bus_master->write_bit(dev->bus_master->data, 0);
w1_delay(60);
dev->bus_master->write_bit(dev->bus_master->data, 1);
w1_delay(10);
}
if(w1_disable_irqs) local_irq_restore(flags);
}
/**
* w1_pre_write() - pre-write operations
* @dev: the master device
*
* Pre-write operation, currently only supporting strong pullups.
* Program the hardware for a strong pullup, if one has been requested and
* the hardware supports it.
*/
static void w1_pre_write(struct w1_master *dev)
{
if (dev->pullup_duration &&
dev->enable_pullup && dev->bus_master->set_pullup) {
dev->bus_master->set_pullup(dev->bus_master->data,
dev->pullup_duration);
}
}
/**
* w1_post_write() - post-write options
* @dev: the master device
*
* Post-write operation, currently only supporting strong pullups.
* If a strong pullup was requested, clear it if the hardware supports
* them, or execute the delay otherwise, in either case clear the request.
*/
static void w1_post_write(struct w1_master *dev)
{
if (dev->pullup_duration) {
if (dev->enable_pullup && dev->bus_master->set_pullup)
dev->bus_master->set_pullup(dev->bus_master->data, 0);
else
msleep(dev->pullup_duration);
dev->pullup_duration = 0;
}
}
/**
* w1_write_8() - Writes 8 bits.
* @dev: the master device
* @byte: the byte to write
*/
void w1_write_8(struct w1_master *dev, u8 byte)
{
int i;
if (dev->bus_master->write_byte) {
w1_pre_write(dev);
dev->bus_master->write_byte(dev->bus_master->data, byte);
}
else
for (i = 0; i < 8; ++i) {
if (i == 7)
w1_pre_write(dev);
w1_touch_bit(dev, (byte >> i) & 0x1);
}
w1_post_write(dev);
}
EXPORT_SYMBOL_GPL(w1_write_8);
/**
* w1_read_bit() - Generates a write-1 cycle and samples the level.
* @dev: the master device
*
* Only call if dev->bus_master->touch_bit is NULL
*/
static u8 w1_read_bit(struct w1_master *dev)
{
int result;
unsigned long flags = 0;
/* sample timing is critical here */
local_irq_save(flags);
dev->bus_master->write_bit(dev->bus_master->data, 0);
w1_delay(6);
dev->bus_master->write_bit(dev->bus_master->data, 1);
w1_delay(9);
result = dev->bus_master->read_bit(dev->bus_master->data);
local_irq_restore(flags);
w1_delay(55);
return result & 0x1;
}
/**
* w1_triplet() - * Does a triplet - used for searching ROM addresses.
* @dev: the master device
* @bdir: the bit to write if both id_bit and comp_bit are 0
*
* Return bits:
* bit 0 = id_bit
* bit 1 = comp_bit
* bit 2 = dir_taken
*
* If both bits 0 & 1 are set, the search should be restarted.
*
* Return: bit fields - see above
*/
u8 w1_triplet(struct w1_master *dev, int bdir)
{
if (dev->bus_master->triplet)
return dev->bus_master->triplet(dev->bus_master->data, bdir);
else {
u8 id_bit = w1_touch_bit(dev, 1);
u8 comp_bit = w1_touch_bit(dev, 1);
u8 retval;
if (id_bit && comp_bit)
return 0x03; /* error */
if (!id_bit && !comp_bit) {
/* Both bits are valid, take the direction given */
retval = bdir ? 0x04 : 0;
} else {
/* Only one bit is valid, take that direction */
bdir = id_bit;
retval = id_bit ? 0x05 : 0x02;
}
if (dev->bus_master->touch_bit)
w1_touch_bit(dev, bdir);
else
w1_write_bit(dev, bdir);
return retval;
}
}
EXPORT_SYMBOL_GPL(w1_triplet);
/**
* w1_read_8() - Reads 8 bits.
* @dev: the master device
*
* Return: the byte read
*/
u8 w1_read_8(struct w1_master *dev)
{
int i;
u8 res = 0;
if (dev->bus_master->read_byte)
res = dev->bus_master->read_byte(dev->bus_master->data);
else
for (i = 0; i < 8; ++i)
res |= (w1_touch_bit(dev,1) << i);
return res;
}
EXPORT_SYMBOL_GPL(w1_read_8);
/**
* w1_write_block() - Writes a series of bytes.
* @dev: the master device
* @buf: pointer to the data to write
* @len: the number of bytes to write
*/
void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
{
int i;
if (dev->bus_master->write_block) {
w1_pre_write(dev);
dev->bus_master->write_block(dev->bus_master->data, buf, len);
}
else
for (i = 0; i < len; ++i)
w1_write_8(dev, buf[i]); /* calls w1_pre_write */
w1_post_write(dev);
}
EXPORT_SYMBOL_GPL(w1_write_block);
/**
* w1_touch_block() - Touches a series of bytes.
* @dev: the master device
* @buf: pointer to the data to write
* @len: the number of bytes to write
*/
void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
{
int i, j;
u8 tmp;
for (i = 0; i < len; ++i) {
tmp = 0;
for (j = 0; j < 8; ++j) {
if (j == 7)
w1_pre_write(dev);
tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j;
}
buf[i] = tmp;
}
}
EXPORT_SYMBOL_GPL(w1_touch_block);
/**
* w1_read_block() - Reads a series of bytes.
* @dev: the master device
* @buf: pointer to the buffer to fill
* @len: the number of bytes to read
* Return: the number of bytes read
*/
u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
{
int i;
u8 ret;
if (dev->bus_master->read_block)
ret = dev->bus_master->read_block(dev->bus_master->data, buf, len);
else {
for (i = 0; i < len; ++i)
buf[i] = w1_read_8(dev);
ret = len;
}
return ret;
}
EXPORT_SYMBOL_GPL(w1_read_block);
/**
* w1_reset_bus() - Issues a reset bus sequence.
* @dev: the master device
* Return: 0=Device present, 1=No device present or error
*/
int w1_reset_bus(struct w1_master *dev)
{
int result;
unsigned long flags = 0;
if(w1_disable_irqs) local_irq_save(flags);
if (dev->bus_master->reset_bus)
result = dev->bus_master->reset_bus(dev->bus_master->data) & 0x1;
else {
dev->bus_master->write_bit(dev->bus_master->data, 0);
/* minimum 480, max ? us
* be nice and sleep, except 18b20 spec lists 960us maximum,
* so until we can sleep with microsecond accuracy, spin.
* Feel free to come up with some other way to give up the
* cpu for such a short amount of time AND get it back in
* the maximum amount of time.
*/
w1_delay(500);
dev->bus_master->write_bit(dev->bus_master->data, 1);
w1_delay(70);
result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1;
/* minimum 70 (above) + 430 = 500 us
* There aren't any timing requirements between a reset and
* the following transactions. Sleeping is safe here.
*/
/* w1_delay(430); min required time */
msleep(1);
}
if(w1_disable_irqs) local_irq_restore(flags);
return result;
}
EXPORT_SYMBOL_GPL(w1_reset_bus);
u8 w1_calc_crc8(u8 * data, int len)
{
u8 crc = 0;
while (len--)
crc = w1_crc8_table[crc ^ *data++];
return crc;
}
EXPORT_SYMBOL_GPL(w1_calc_crc8);
void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
{
dev->attempts++;
if (dev->bus_master->search)
dev->bus_master->search(dev->bus_master->data, dev,
search_type, cb);
else
w1_search(dev, search_type, cb);
}
/**
* w1_reset_select_slave() - reset and select a slave
* @sl: the slave to select
*
* Resets the bus and then selects the slave by sending either a skip rom
* or a rom match. A skip rom is issued if there is only one device
* registered on the bus.
* The w1 master lock must be held.
*
* Return: 0=success, anything else=error
*/
int w1_reset_select_slave(struct w1_slave *sl)
{
if (w1_reset_bus(sl->master))
return -1;
if (sl->master->slave_count == 1)
w1_write_8(sl->master, W1_SKIP_ROM);
else {
u8 match[9] = {W1_MATCH_ROM, };
u64 rn = le64_to_cpu(*((u64*)&sl->reg_num));
memcpy(&match[1], &rn, 8);
w1_write_block(sl->master, match, 9);
}
return 0;
}
EXPORT_SYMBOL_GPL(w1_reset_select_slave);
/**
* w1_reset_resume_command() - resume instead of another match ROM
* @dev: the master device
*
* When the workflow with a slave amongst many requires several
* successive commands a reset between each, this function is similar
* to doing a reset then a match ROM for the last matched ROM. The
* advantage being that the matched ROM step is skipped in favor of the
* resume command. The slave must support the command of course.
*
* If the bus has only one slave, traditionnaly the match ROM is skipped
* and a "SKIP ROM" is done for efficiency. On multi-slave busses, this
* doesn't work of course, but the resume command is the next best thing.
*
* The w1 master lock must be held.
*/
int w1_reset_resume_command(struct w1_master *dev)
{
if (w1_reset_bus(dev))
return -1;
w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
return 0;
}
EXPORT_SYMBOL_GPL(w1_reset_resume_command);
/**
* w1_next_pullup() - register for a strong pullup
* @dev: the master device
* @delay: time in milliseconds
*
* Put out a strong pull-up of the specified duration after the next write
* operation. Not all hardware supports strong pullups. Hardware that
* doesn't support strong pullups will sleep for the given time after the
* write operation without a strong pullup. This is a one shot request for
* the next write, specifying zero will clear a previous request.
* The w1 master lock must be held.
*
* Return: 0=success, anything else=error
*/
void w1_next_pullup(struct w1_master *dev, int delay)
{
dev->pullup_duration = delay;
}
EXPORT_SYMBOL_GPL(w1_next_pullup);
| linux-master | drivers/w1/w1_io.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/of.h>
#include <linux/atomic.h>
#include "w1_internal.h"
#include "w1_netlink.h"
#define W1_FAMILY_DEFAULT 0
#define W1_FAMILY_DS28E04 0x1C /* for crc quirk */
static int w1_timeout = 10;
module_param_named(timeout, w1_timeout, int, 0);
MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
static int w1_timeout_us;
module_param_named(timeout_us, w1_timeout_us, int, 0);
MODULE_PARM_DESC(timeout_us,
"time in microseconds between automatic slave searches");
/* A search stops when w1_max_slave_count devices have been found in that
* search. The next search will start over and detect the same set of devices
* on a static 1-wire bus. Memory is not allocated based on this number, just
* on the number of devices known to the kernel. Having a high number does not
* consume additional resources. As a special case, if there is only one
* device on the network and w1_max_slave_count is set to 1, the device id can
* be read directly skipping the normal slower search process.
*/
int w1_max_slave_count = 64;
module_param_named(max_slave_count, w1_max_slave_count, int, 0);
MODULE_PARM_DESC(max_slave_count,
"maximum number of slaves detected in a search");
int w1_max_slave_ttl = 10;
module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
MODULE_PARM_DESC(slave_ttl,
"Number of searches not seeing a slave before it will be removed");
DEFINE_MUTEX(w1_mlock);
LIST_HEAD(w1_masters);
static int w1_master_probe(struct device *dev)
{
return -ENODEV;
}
static void w1_master_release(struct device *dev)
{
struct w1_master *md = dev_to_w1_master(dev);
dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name);
memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
kfree(md);
}
static void w1_slave_release(struct device *dev)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl);
w1_family_put(sl->family);
sl->master->slave_count--;
}
static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
return sprintf(buf, "%s\n", sl->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
ssize_t count = sizeof(sl->reg_num);
memcpy(buf, (u8 *)&sl->reg_num, count);
return count;
}
static DEVICE_ATTR_RO(id);
static struct attribute *w1_slave_attrs[] = {
&dev_attr_name.attr,
&dev_attr_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(w1_slave);
/* Default family */
static ssize_t rw_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
mutex_lock(&sl->master->mutex);
if (w1_reset_select_slave(sl)) {
count = 0;
goto out_up;
}
w1_write_block(sl->master, buf, count);
out_up:
mutex_unlock(&sl->master->mutex);
return count;
}
static ssize_t rw_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
mutex_lock(&sl->master->mutex);
w1_read_block(sl->master, buf, count);
mutex_unlock(&sl->master->mutex);
return count;
}
static BIN_ATTR_RW(rw, PAGE_SIZE);
static struct bin_attribute *w1_slave_bin_attrs[] = {
&bin_attr_rw,
NULL,
};
static const struct attribute_group w1_slave_default_group = {
.bin_attrs = w1_slave_bin_attrs,
};
static const struct attribute_group *w1_slave_default_groups[] = {
&w1_slave_default_group,
NULL,
};
static const struct w1_family_ops w1_default_fops = {
.groups = w1_slave_default_groups,
};
static struct w1_family w1_default_family = {
.fops = &w1_default_fops,
};
static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env);
static struct bus_type w1_bus_type = {
.name = "w1",
.uevent = w1_uevent,
};
struct device_driver w1_master_driver = {
.name = "w1_master_driver",
.bus = &w1_bus_type,
.probe = w1_master_probe,
};
struct device w1_master_device = {
.parent = NULL,
.bus = &w1_bus_type,
.init_name = "w1 bus master",
.driver = &w1_master_driver,
.release = &w1_master_release
};
static struct device_driver w1_slave_driver = {
.name = "w1_slave_driver",
.bus = &w1_bus_type,
};
#if 0
struct device w1_slave_device = {
.parent = NULL,
.bus = &w1_bus_type,
.init_name = "w1 bus slave",
.driver = &w1_slave_driver,
.release = &w1_slave_release
};
#endif /* 0 */
static ssize_t w1_master_attribute_show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%s\n", md->name);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_store_search(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
int ret;
ret = kstrtol(buf, 0, &tmp);
if (ret)
return ret;
mutex_lock(&md->mutex);
md->search_count = tmp;
mutex_unlock(&md->mutex);
/* Only wake if it is going to be searching. */
if (tmp)
wake_up_process(md->thread);
return count;
}
static ssize_t w1_master_attribute_show_search(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%d\n", md->search_count);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_store_pullup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
int ret;
ret = kstrtol(buf, 0, &tmp);
if (ret)
return ret;
mutex_lock(&md->mutex);
md->enable_pullup = tmp;
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_pullup(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%d\n", md->enable_pullup);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_pointer(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "0x%p\n", md->bus_master);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", w1_timeout);
}
static ssize_t w1_master_attribute_show_timeout_us(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", w1_timeout_us);
}
static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int tmp;
struct w1_master *md = dev_to_w1_master(dev);
if (kstrtoint(buf, 0, &tmp) || tmp < 1)
return -EINVAL;
mutex_lock(&md->mutex);
md->max_slave_count = tmp;
/* allow each time the max_slave_count is updated */
clear_bit(W1_WARN_MAX_COUNT, &md->flags);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%d\n", md->max_slave_count);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_attempts(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%lu\n", md->attempts);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_slave_count(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
ssize_t count;
mutex_lock(&md->mutex);
count = sprintf(buf, "%d\n", md->slave_count);
mutex_unlock(&md->mutex);
return count;
}
static ssize_t w1_master_attribute_show_slaves(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
int c = PAGE_SIZE;
struct list_head *ent, *n;
struct w1_slave *sl = NULL;
mutex_lock(&md->list_mutex);
list_for_each_safe(ent, n, &md->slist) {
sl = list_entry(ent, struct w1_slave, w1_slave_entry);
c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
}
if (!sl)
c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
mutex_unlock(&md->list_mutex);
return PAGE_SIZE - c;
}
static ssize_t w1_master_attribute_show_add(struct device *dev,
struct device_attribute *attr, char *buf)
{
int c = PAGE_SIZE;
c -= snprintf(buf+PAGE_SIZE - c, c,
"write device id xx-xxxxxxxxxxxx to add slave\n");
return PAGE_SIZE - c;
}
static int w1_atoreg_num(struct device *dev, const char *buf, size_t count,
struct w1_reg_num *rn)
{
unsigned int family;
unsigned long long id;
int i;
u64 rn64_le;
/* The CRC value isn't read from the user because the sysfs directory
* doesn't include it and most messages from the bus search don't
* print it either. It would be unreasonable for the user to then
* provide it.
*/
const char *error_msg = "bad slave string format, expecting "
"ff-dddddddddddd\n";
if (buf[2] != '-') {
dev_err(dev, "%s", error_msg);
return -EINVAL;
}
i = sscanf(buf, "%02x-%012llx", &family, &id);
if (i != 2) {
dev_err(dev, "%s", error_msg);
return -EINVAL;
}
rn->family = family;
rn->id = id;
rn64_le = cpu_to_le64(*(u64 *)rn);
rn->crc = w1_calc_crc8((u8 *)&rn64_le, 7);
#if 0
dev_info(dev, "With CRC device is %02x.%012llx.%02x.\n",
rn->family, (unsigned long long)rn->id, rn->crc);
#endif
return 0;
}
/* Searches the slaves in the w1_master and returns a pointer or NULL.
* Note: must not hold list_mutex
*/
struct w1_slave *w1_slave_search_device(struct w1_master *dev,
struct w1_reg_num *rn)
{
struct w1_slave *sl;
mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
if (sl->reg_num.family == rn->family &&
sl->reg_num.id == rn->id &&
sl->reg_num.crc == rn->crc) {
mutex_unlock(&dev->list_mutex);
return sl;
}
}
mutex_unlock(&dev->list_mutex);
return NULL;
}
static ssize_t w1_master_attribute_store_add(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct w1_master *md = dev_to_w1_master(dev);
struct w1_reg_num rn;
struct w1_slave *sl;
ssize_t result = count;
if (w1_atoreg_num(dev, buf, count, &rn))
return -EINVAL;
mutex_lock(&md->mutex);
sl = w1_slave_search_device(md, &rn);
/* It would be nice to do a targeted search one the one-wire bus
* for the new device to see if it is out there or not. But the
* current search doesn't support that.
*/
if (sl) {
dev_info(dev, "Device %s already exists\n", sl->name);
result = -EINVAL;
} else {
w1_attach_slave_device(md, &rn);
}
mutex_unlock(&md->mutex);
return result;
}
static ssize_t w1_master_attribute_show_remove(struct device *dev,
struct device_attribute *attr, char *buf)
{
int c = PAGE_SIZE;
c -= snprintf(buf+PAGE_SIZE - c, c,
"write device id xx-xxxxxxxxxxxx to remove slave\n");
return PAGE_SIZE - c;
}
static ssize_t w1_master_attribute_store_remove(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct w1_master *md = dev_to_w1_master(dev);
struct w1_reg_num rn;
struct w1_slave *sl;
ssize_t result;
if (w1_atoreg_num(dev, buf, count, &rn))
return -EINVAL;
mutex_lock(&md->mutex);
sl = w1_slave_search_device(md, &rn);
if (sl) {
result = w1_slave_detach(sl);
/* refcnt 0 means it was detached in the call */
if (result == 0)
result = count;
} else {
dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
(unsigned long long)rn.id);
result = -EINVAL;
}
mutex_unlock(&md->mutex);
return result;
}
#define W1_MASTER_ATTR_RO(_name, _mode) \
struct device_attribute w1_master_attribute_##_name = \
__ATTR(w1_master_##_name, _mode, \
w1_master_attribute_show_##_name, NULL)
#define W1_MASTER_ATTR_RW(_name, _mode) \
struct device_attribute w1_master_attribute_##_name = \
__ATTR(w1_master_##_name, _mode, \
w1_master_attribute_show_##_name, \
w1_master_attribute_store_##_name)
static W1_MASTER_ATTR_RO(name, S_IRUGO);
static W1_MASTER_ATTR_RO(slaves, S_IRUGO);
static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout_us, S_IRUGO);
static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP);
static struct attribute *w1_master_default_attrs[] = {
&w1_master_attribute_name.attr,
&w1_master_attribute_slaves.attr,
&w1_master_attribute_slave_count.attr,
&w1_master_attribute_max_slave_count.attr,
&w1_master_attribute_attempts.attr,
&w1_master_attribute_timeout.attr,
&w1_master_attribute_timeout_us.attr,
&w1_master_attribute_pointer.attr,
&w1_master_attribute_search.attr,
&w1_master_attribute_pullup.attr,
&w1_master_attribute_add.attr,
&w1_master_attribute_remove.attr,
NULL
};
static const struct attribute_group w1_master_defattr_group = {
.attrs = w1_master_default_attrs,
};
int w1_create_master_attributes(struct w1_master *master)
{
return sysfs_create_group(&master->dev.kobj, &w1_master_defattr_group);
}
void w1_destroy_master_attributes(struct w1_master *master)
{
sysfs_remove_group(&master->dev.kobj, &w1_master_defattr_group);
}
static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct w1_master *md = NULL;
const struct w1_slave *sl = NULL;
const char *event_owner, *name;
int err = 0;
if (dev->driver == &w1_master_driver) {
md = container_of(dev, struct w1_master, dev);
event_owner = "master";
name = md->name;
} else if (dev->driver == &w1_slave_driver) {
sl = container_of(dev, struct w1_slave, dev);
event_owner = "slave";
name = sl->name;
} else {
dev_dbg(dev, "Unknown event.\n");
return -EINVAL;
}
dev_dbg(dev, "Hotplug event for %s %s, bus_id=%s.\n",
event_owner, name, dev_name(dev));
if (dev->driver != &w1_slave_driver || !sl)
goto end;
err = add_uevent_var(env, "W1_FID=%02X", sl->reg_num.family);
if (err)
goto end;
err = add_uevent_var(env, "W1_SLAVE_ID=%024LX",
(unsigned long long)sl->reg_num.id);
end:
return err;
}
static int w1_family_notify(unsigned long action, struct w1_slave *sl)
{
const struct w1_family_ops *fops;
int err;
fops = sl->family->fops;
if (!fops)
return 0;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
/* if the family driver needs to initialize something... */
if (fops->add_slave) {
err = fops->add_slave(sl);
if (err < 0) {
dev_err(&sl->dev,
"add_slave() call failed. err=%d\n",
err);
return err;
}
}
if (fops->groups) {
err = sysfs_create_groups(&sl->dev.kobj, fops->groups);
if (err) {
dev_err(&sl->dev,
"sysfs group creation failed. err=%d\n",
err);
return err;
}
}
if (IS_REACHABLE(CONFIG_HWMON) && fops->chip_info) {
struct device *hwmon
= hwmon_device_register_with_info(&sl->dev,
"w1_slave_temp", sl,
fops->chip_info,
NULL);
if (IS_ERR(hwmon)) {
dev_warn(&sl->dev,
"could not create hwmon device\n");
} else {
sl->hwmon = hwmon;
}
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (IS_REACHABLE(CONFIG_HWMON) && fops->chip_info &&
sl->hwmon)
hwmon_device_unregister(sl->hwmon);
if (fops->remove_slave)
sl->family->fops->remove_slave(sl);
if (fops->groups)
sysfs_remove_groups(&sl->dev.kobj, fops->groups);
break;
}
return 0;
}
static int __w1_attach_slave_device(struct w1_slave *sl)
{
int err;
sl->dev.parent = &sl->master->dev;
sl->dev.driver = &w1_slave_driver;
sl->dev.bus = &w1_bus_type;
sl->dev.release = &w1_slave_release;
sl->dev.groups = w1_slave_groups;
sl->dev.of_node = of_find_matching_node(sl->master->dev.of_node,
sl->family->of_match_table);
dev_set_name(&sl->dev, "%02x-%012llx",
(unsigned int) sl->reg_num.family,
(unsigned long long) sl->reg_num.id);
snprintf(&sl->name[0], sizeof(sl->name),
"%02x-%012llx",
(unsigned int) sl->reg_num.family,
(unsigned long long) sl->reg_num.id);
dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__,
dev_name(&sl->dev), sl);
/* suppress for w1_family_notify before sending KOBJ_ADD */
dev_set_uevent_suppress(&sl->dev, true);
err = device_register(&sl->dev);
if (err < 0) {
dev_err(&sl->dev,
"Device registration [%s] failed. err=%d\n",
dev_name(&sl->dev), err);
of_node_put(sl->dev.of_node);
put_device(&sl->dev);
return err;
}
w1_family_notify(BUS_NOTIFY_ADD_DEVICE, sl);
dev_set_uevent_suppress(&sl->dev, false);
kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
mutex_lock(&sl->master->list_mutex);
list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
mutex_unlock(&sl->master->list_mutex);
return 0;
}
int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
{
struct w1_slave *sl;
struct w1_family *f;
int err;
struct w1_netlink_msg msg;
sl = kzalloc(sizeof(struct w1_slave), GFP_KERNEL);
if (!sl) {
dev_err(&dev->dev,
"%s: failed to allocate new slave device.\n",
__func__);
return -ENOMEM;
}
sl->owner = THIS_MODULE;
sl->master = dev;
set_bit(W1_SLAVE_ACTIVE, &sl->flags);
memset(&msg, 0, sizeof(msg));
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
atomic_set(&sl->refcnt, 1);
atomic_inc(&sl->master->refcnt);
dev->slave_count++;
dev_info(&dev->dev, "Attaching one wire slave %02x.%012llx crc %02x\n",
rn->family, (unsigned long long)rn->id, rn->crc);
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
request_module("w1-family-0x%02X", rn->family);
mutex_lock(&dev->mutex);
spin_lock(&w1_flock);
f = w1_family_registered(rn->family);
if (!f) {
f= &w1_default_family;
dev_info(&dev->dev, "Family %x for %02x.%012llx.%02x is not registered.\n",
rn->family, rn->family,
(unsigned long long)rn->id, rn->crc);
}
__w1_family_get(f);
spin_unlock(&w1_flock);
sl->family = f;
err = __w1_attach_slave_device(sl);
if (err < 0) {
dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
sl->name);
dev->slave_count--;
w1_family_put(sl->family);
atomic_dec(&sl->master->refcnt);
kfree(sl);
return err;
}
sl->ttl = dev->slave_ttl;
memcpy(msg.id.id, rn, sizeof(msg.id));
msg.type = W1_SLAVE_ADD;
w1_netlink_send(dev, &msg);
return 0;
}
int w1_unref_slave(struct w1_slave *sl)
{
struct w1_master *dev = sl->master;
int refcnt;
mutex_lock(&dev->list_mutex);
refcnt = atomic_sub_return(1, &sl->refcnt);
if (refcnt == 0) {
struct w1_netlink_msg msg;
dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__,
sl->name, sl);
list_del(&sl->w1_slave_entry);
memset(&msg, 0, sizeof(msg));
memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
msg.type = W1_SLAVE_REMOVE;
w1_netlink_send(sl->master, &msg);
w1_family_notify(BUS_NOTIFY_DEL_DEVICE, sl);
device_unregister(&sl->dev);
#ifdef DEBUG
memset(sl, 0, sizeof(*sl));
#endif
kfree(sl);
}
atomic_dec(&dev->refcnt);
mutex_unlock(&dev->list_mutex);
return refcnt;
}
int w1_slave_detach(struct w1_slave *sl)
{
/* Only detach a slave once as it decreases the refcnt each time. */
int destroy_now;
mutex_lock(&sl->master->list_mutex);
destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags);
set_bit(W1_SLAVE_DETACH, &sl->flags);
mutex_unlock(&sl->master->list_mutex);
if (destroy_now)
destroy_now = !w1_unref_slave(sl);
return destroy_now ? 0 : -EBUSY;
}
struct w1_master *w1_search_master_id(u32 id)
{
struct w1_master *dev = NULL, *iter;
mutex_lock(&w1_mlock);
list_for_each_entry(iter, &w1_masters, w1_master_entry) {
if (iter->id == id) {
dev = iter;
atomic_inc(&iter->refcnt);
break;
}
}
mutex_unlock(&w1_mlock);
return dev;
}
struct w1_slave *w1_search_slave(struct w1_reg_num *id)
{
struct w1_master *dev;
struct w1_slave *sl = NULL, *iter;
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
mutex_lock(&dev->list_mutex);
list_for_each_entry(iter, &dev->slist, w1_slave_entry) {
if (iter->reg_num.family == id->family &&
iter->reg_num.id == id->id &&
iter->reg_num.crc == id->crc) {
sl = iter;
atomic_inc(&dev->refcnt);
atomic_inc(&iter->refcnt);
break;
}
}
mutex_unlock(&dev->list_mutex);
if (sl)
break;
}
mutex_unlock(&w1_mlock);
return sl;
}
void w1_reconnect_slaves(struct w1_family *f, int attach)
{
struct w1_slave *sl, *sln;
struct w1_master *dev;
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"for family %02x.\n", dev->name, f->fid);
mutex_lock(&dev->mutex);
mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
/* If it is a new family, slaves with the default
* family driver and are that family will be
* connected. If the family is going away, devices
* matching that family are reconneced.
*/
if ((attach && sl->family->fid == W1_FAMILY_DEFAULT
&& sl->reg_num.family == f->fid) ||
(!attach && sl->family->fid == f->fid)) {
struct w1_reg_num rn;
mutex_unlock(&dev->list_mutex);
memcpy(&rn, &sl->reg_num, sizeof(rn));
/* If it was already in use let the automatic
* scan pick it up again later.
*/
if (!w1_slave_detach(sl))
w1_attach_slave_device(dev, &rn);
mutex_lock(&dev->list_mutex);
}
}
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"has been finished.\n", dev->name);
mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
}
mutex_unlock(&w1_mlock);
}
static int w1_addr_crc_is_valid(struct w1_master *dev, u64 rn)
{
u64 rn_le = cpu_to_le64(rn);
struct w1_reg_num *tmp = (struct w1_reg_num *)&rn;
u8 crc;
crc = w1_calc_crc8((u8 *)&rn_le, 7);
/* quirk:
* DS28E04 (1w eeprom) has strapping pins to change
* address, but will not update the crc. So normal rules
* for consistent w1 addresses are violated. We test
* with the 7 LSBs of the address forced high.
*
* (char*)&rn_le = { family, addr_lsb, ..., addr_msb, crc }.
*/
if (crc != tmp->crc && tmp->family == W1_FAMILY_DS28E04) {
u64 corr_le = rn_le;
((u8 *)&corr_le)[1] |= 0x7f;
crc = w1_calc_crc8((u8 *)&corr_le, 7);
dev_info(&dev->dev, "DS28E04 crc workaround on %02x.%012llx.%02x\n",
tmp->family, (unsigned long long)tmp->id, tmp->crc);
}
if (crc != tmp->crc) {
dev_dbg(&dev->dev, "w1 addr crc mismatch: %02x.%012llx.%02x != 0x%02x.\n",
tmp->family, (unsigned long long)tmp->id, tmp->crc, crc);
return 0;
}
return 1;
}
void w1_slave_found(struct w1_master *dev, u64 rn)
{
struct w1_slave *sl;
struct w1_reg_num *tmp;
atomic_inc(&dev->refcnt);
tmp = (struct w1_reg_num *) &rn;
sl = w1_slave_search_device(dev, tmp);
if (sl) {
set_bit(W1_SLAVE_ACTIVE, &sl->flags);
} else {
if (rn && w1_addr_crc_is_valid(dev, rn))
w1_attach_slave_device(dev, tmp);
}
atomic_dec(&dev->refcnt);
}
/**
* w1_search() - Performs a ROM Search & registers any devices found.
* @dev: The master device to search
* @search_type: W1_SEARCH to search all devices, or W1_ALARM_SEARCH
* to return only devices in the alarmed state
* @cb: Function to call when a device is found
*
* The 1-wire search is a simple binary tree search.
* For each bit of the address, we read two bits and write one bit.
* The bit written will put to sleep all devies that don't match that bit.
* When the two reads differ, the direction choice is obvious.
* When both bits are 0, we must choose a path to take.
* When we can scan all 64 bits without having to choose a path, we are done.
*
* See "Application note 187 1-wire search algorithm" at www.maxim-ic.com
*
*/
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
{
u64 last_rn, rn, tmp64;
int i, slave_count = 0;
int last_zero, last_device;
int search_bit, desc_bit;
u8 triplet_ret = 0;
search_bit = 0;
rn = dev->search_id;
last_rn = 0;
last_device = 0;
last_zero = -1;
desc_bit = 64;
while ( !last_device && (slave_count++ < dev->max_slave_count) ) {
last_rn = rn;
rn = 0;
/*
* Reset bus and all 1-wire device state machines
* so they can respond to our requests.
*
* Return 0 - device(s) present, 1 - no devices present.
*/
mutex_lock(&dev->bus_mutex);
if (w1_reset_bus(dev)) {
mutex_unlock(&dev->bus_mutex);
dev_dbg(&dev->dev, "No devices present on the wire.\n");
break;
}
/* Do fast search on single slave bus */
if (dev->max_slave_count == 1) {
int rv;
w1_write_8(dev, W1_READ_ROM);
rv = w1_read_block(dev, (u8 *)&rn, 8);
mutex_unlock(&dev->bus_mutex);
if (rv == 8 && rn)
cb(dev, rn);
break;
}
/* Start the search */
w1_write_8(dev, search_type);
for (i = 0; i < 64; ++i) {
/* Determine the direction/search bit */
if (i == desc_bit)
search_bit = 1; /* took the 0 path last time, so take the 1 path */
else if (i > desc_bit)
search_bit = 0; /* take the 0 path on the next branch */
else
search_bit = ((last_rn >> i) & 0x1);
/* Read two bits and write one bit */
triplet_ret = w1_triplet(dev, search_bit);
/* quit if no device responded */
if ( (triplet_ret & 0x03) == 0x03 )
break;
/* If both directions were valid, and we took the 0 path... */
if (triplet_ret == 0)
last_zero = i;
/* extract the direction taken & update the device number */
tmp64 = (triplet_ret >> 2);
rn |= (tmp64 << i);
if (test_bit(W1_ABORT_SEARCH, &dev->flags)) {
mutex_unlock(&dev->bus_mutex);
dev_dbg(&dev->dev, "Abort w1_search\n");
return;
}
}
mutex_unlock(&dev->bus_mutex);
if ( (triplet_ret & 0x03) != 0x03 ) {
if ((desc_bit == last_zero) || (last_zero < 0)) {
last_device = 1;
dev->search_id = 0;
} else {
dev->search_id = rn;
}
desc_bit = last_zero;
cb(dev, rn);
}
if (!last_device && slave_count == dev->max_slave_count &&
!test_bit(W1_WARN_MAX_COUNT, &dev->flags)) {
/* Only max_slave_count will be scanned in a search,
* but it will start where it left off next search
* until all ids are identified and then it will start
* over. A continued search will report the previous
* last id as the first id (provided it is still on the
* bus).
*/
dev_info(&dev->dev, "%s: max_slave_count %d reached, "
"will continue next search.\n", __func__,
dev->max_slave_count);
set_bit(W1_WARN_MAX_COUNT, &dev->flags);
}
}
}
void w1_search_process_cb(struct w1_master *dev, u8 search_type,
w1_slave_found_callback cb)
{
struct w1_slave *sl, *sln;
mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry)
clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
mutex_unlock(&dev->list_mutex);
w1_search_devices(dev, search_type, cb);
mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) {
mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
mutex_lock(&dev->list_mutex);
}
else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
sl->ttl = dev->slave_ttl;
}
mutex_unlock(&dev->list_mutex);
if (dev->search_count > 0)
dev->search_count--;
}
static void w1_search_process(struct w1_master *dev, u8 search_type)
{
w1_search_process_cb(dev, search_type, w1_slave_found);
}
/**
* w1_process_callbacks() - execute each dev->async_list callback entry
* @dev: w1_master device
*
* The w1 master list_mutex must be held.
*
* Return: 1 if there were commands to executed 0 otherwise
*/
int w1_process_callbacks(struct w1_master *dev)
{
int ret = 0;
struct w1_async_cmd *async_cmd, *async_n;
/* The list can be added to in another thread, loop until it is empty */
while (!list_empty(&dev->async_list)) {
list_for_each_entry_safe(async_cmd, async_n, &dev->async_list,
async_entry) {
/* drop the lock, if it is a search it can take a long
* time */
mutex_unlock(&dev->list_mutex);
async_cmd->cb(dev, async_cmd);
ret = 1;
mutex_lock(&dev->list_mutex);
}
}
return ret;
}
int w1_process(void *data)
{
struct w1_master *dev = (struct w1_master *) data;
/* As long as w1_timeout is only set by a module parameter the sleep
* time can be calculated in jiffies once.
*/
const unsigned long jtime =
usecs_to_jiffies(w1_timeout * 1000000 + w1_timeout_us);
/* remainder if it woke up early */
unsigned long jremain = 0;
atomic_inc(&dev->refcnt);
for (;;) {
if (!jremain && dev->search_count) {
mutex_lock(&dev->mutex);
w1_search_process(dev, W1_SEARCH);
mutex_unlock(&dev->mutex);
}
mutex_lock(&dev->list_mutex);
/* Note, w1_process_callback drops the lock while processing,
* but locks it again before returning.
*/
if (!w1_process_callbacks(dev) && jremain) {
/* a wake up is either to stop the thread, process
* callbacks, or search, it isn't process callbacks, so
* schedule a search.
*/
jremain = 1;
}
__set_current_state(TASK_INTERRUPTIBLE);
/* hold list_mutex until after interruptible to prevent loosing
* the wakeup signal when async_cmd is added.
*/
mutex_unlock(&dev->list_mutex);
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
break;
}
/* Only sleep when the search is active. */
if (dev->search_count) {
if (!jremain)
jremain = jtime;
jremain = schedule_timeout(jremain);
}
else
schedule();
}
atomic_dec(&dev->refcnt);
return 0;
}
static int __init w1_init(void)
{
int retval;
pr_info("Driver for 1-wire Dallas network protocol.\n");
w1_init_netlink();
retval = bus_register(&w1_bus_type);
if (retval) {
pr_err("Failed to register bus. err=%d.\n", retval);
goto err_out_exit_init;
}
retval = driver_register(&w1_master_driver);
if (retval) {
pr_err("Failed to register master driver. err=%d.\n",
retval);
goto err_out_bus_unregister;
}
retval = driver_register(&w1_slave_driver);
if (retval) {
pr_err("Failed to register slave driver. err=%d.\n",
retval);
goto err_out_master_unregister;
}
return 0;
#if 0
/* For undoing the slave register if there was a step after it. */
err_out_slave_unregister:
driver_unregister(&w1_slave_driver);
#endif
err_out_master_unregister:
driver_unregister(&w1_master_driver);
err_out_bus_unregister:
bus_unregister(&w1_bus_type);
err_out_exit_init:
return retval;
}
static void __exit w1_fini(void)
{
struct w1_master *dev, *n;
/* Set netlink removal messages and some cleanup */
list_for_each_entry_safe(dev, n, &w1_masters, w1_master_entry)
__w1_remove_master_device(dev);
w1_fini_netlink();
driver_unregister(&w1_slave_driver);
driver_unregister(&w1_master_driver);
bus_unregister(&w1_bus_type);
}
module_init(w1_init);
module_exit(w1_fini);
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/w1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2003 Evgeniy Polyakov <[email protected]>
*/
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/connector.h>
#include "w1_internal.h"
#include "w1_netlink.h"
#if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
/* Bundle together everything required to process a request in one memory
* allocation.
*/
struct w1_cb_block {
atomic_t refcnt;
u32 portid; /* Sending process port ID */
/* maximum value for first_cn->len */
u16 maxlen;
/* pointers to building up the reply message */
struct cn_msg *first_cn; /* fixed once the structure is populated */
struct cn_msg *cn; /* advances as cn_msg is appeneded */
struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */
struct w1_netlink_cmd *cmd; /* advances as cmds are appened */
struct w1_netlink_msg *cur_msg; /* currently message being processed */
/* copy of the original request follows */
struct cn_msg request_cn;
/* followed by variable length:
* cn_msg, data (w1_netlink_msg and w1_netlink_cmd)
* one or more struct w1_cb_node
* reply first_cn, data (w1_netlink_msg and w1_netlink_cmd)
*/
};
struct w1_cb_node {
struct w1_async_cmd async;
/* pointers within w1_cb_block and cn data */
struct w1_cb_block *block;
struct w1_netlink_msg *msg;
struct w1_slave *sl;
struct w1_master *dev;
};
/**
* w1_reply_len() - calculate current reply length, compare to maxlen
* @block: block to calculate
*
* Calculates the current message length including possible multiple
* cn_msg and data, excludes the first sizeof(struct cn_msg). Direclty
* compariable to maxlen and usable to send the message.
*/
static u16 w1_reply_len(struct w1_cb_block *block)
{
if (!block->cn)
return 0;
return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
}
static void w1_unref_block(struct w1_cb_block *block)
{
if (atomic_sub_return(1, &block->refcnt) == 0) {
u16 len = w1_reply_len(block);
if (len) {
cn_netlink_send_mult(block->first_cn, len,
block->portid, 0,
GFP_KERNEL, NULL, NULL);
}
kfree(block);
}
}
/**
* w1_reply_make_space() - send message if needed to make space
* @block: block to make space on
* @space: how many bytes requested
*
* Verify there is enough room left for the caller to add "space" bytes to the
* message, if there isn't send the message and reset.
*/
static void w1_reply_make_space(struct w1_cb_block *block, u16 space)
{
u16 len = w1_reply_len(block);
if (len + space >= block->maxlen) {
cn_netlink_send_mult(block->first_cn, len, block->portid,
0, GFP_KERNEL, NULL, NULL);
block->first_cn->len = 0;
block->cn = NULL;
block->msg = NULL;
block->cmd = NULL;
}
}
/* Early send when replies aren't bundled. */
static void w1_netlink_check_send(struct w1_cb_block *block)
{
if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
w1_reply_make_space(block, block->maxlen);
}
/**
* w1_netlink_setup_msg() - prepare to write block->msg
* @block: block to operate on
* @ack: determines if cn can be reused
*
* block->cn will be setup with the correct ack, advancing if needed
* block->cn->len does not include space for block->msg
* block->msg advances but remains uninitialized
*/
static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack)
{
if (block->cn && block->cn->ack == ack) {
block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
} else {
/* advance or set to data */
if (block->cn)
block->cn = (struct cn_msg *)(block->cn->data +
block->cn->len);
else
block->cn = block->first_cn;
memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
block->cn->len = 0;
block->cn->ack = ack;
block->msg = (struct w1_netlink_msg *)block->cn->data;
}
}
/* Append cmd to msg, include cmd->data as well. This is because
* any following data goes with the command and in the case of a read is
* the results.
*/
static void w1_netlink_queue_cmd(struct w1_cb_block *block,
struct w1_netlink_cmd *cmd)
{
u32 space;
w1_reply_make_space(block, sizeof(struct cn_msg) +
sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len);
/* There's a status message sent after each command, so no point
* in trying to bundle this cmd after an existing one, because
* there won't be one. Allocate and copy over a new cn_msg.
*/
w1_netlink_setup_msg(block, block->request_cn.seq + 1);
memcpy(block->msg, block->cur_msg, sizeof(*block->msg));
block->cn->len += sizeof(*block->msg);
block->msg->len = 0;
block->cmd = (struct w1_netlink_cmd *)(block->msg->data);
space = sizeof(*cmd) + cmd->len;
if (block->cmd != cmd)
memcpy(block->cmd, cmd, space);
block->cn->len += space;
block->msg->len += space;
}
/* Append req_msg and req_cmd, no other commands and no data from req_cmd are
* copied.
*/
static void w1_netlink_queue_status(struct w1_cb_block *block,
struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd,
int error)
{
u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd);
w1_reply_make_space(block, space);
w1_netlink_setup_msg(block, block->request_cn.ack);
memcpy(block->msg, req_msg, sizeof(*req_msg));
block->cn->len += sizeof(*req_msg);
block->msg->len = 0;
block->msg->status = (u8)-error;
if (req_cmd) {
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data;
memcpy(cmd, req_cmd, sizeof(*cmd));
block->cn->len += sizeof(*cmd);
block->msg->len += sizeof(*cmd);
cmd->len = 0;
}
w1_netlink_check_send(block);
}
/**
* w1_netlink_send_error() - sends the error message now
* @cn: original cn_msg
* @msg: original w1_netlink_msg
* @portid: where to send it
* @error: error status
*
* Use when a block isn't available to queue the message to and cn, msg
* might not be contiguous.
*/
static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
int portid, int error)
{
struct {
struct cn_msg cn;
struct w1_netlink_msg msg;
} packet;
memcpy(&packet.cn, cn, sizeof(packet.cn));
memcpy(&packet.msg, msg, sizeof(packet.msg));
packet.cn.len = sizeof(packet.msg);
packet.msg.len = 0;
packet.msg.status = (u8)-error;
cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
}
/**
* w1_netlink_send() - sends w1 netlink notifications
* @dev: w1_master the even is associated with or for
* @msg: w1_netlink_msg message to be sent
*
* This are notifications generated from the kernel.
*/
void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
{
struct {
struct cn_msg cn;
struct w1_netlink_msg msg;
} packet;
memset(&packet, 0, sizeof(packet));
packet.cn.id.idx = CN_W1_IDX;
packet.cn.id.val = CN_W1_VAL;
packet.cn.seq = dev->seq++;
packet.cn.len = sizeof(*msg);
memcpy(&packet.msg, msg, sizeof(*msg));
packet.msg.len = 0;
cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
}
static void w1_send_slave(struct w1_master *dev, u64 rn)
{
struct w1_cb_block *block = dev->priv;
struct w1_netlink_cmd *cache_cmd = block->cmd;
u64 *data;
w1_reply_make_space(block, sizeof(*data));
/* Add cmd back if the packet was sent */
if (!block->cmd) {
cache_cmd->len = 0;
w1_netlink_queue_cmd(block, cache_cmd);
}
data = (u64 *)(block->cmd->data + block->cmd->len);
*data = rn;
block->cn->len += sizeof(*data);
block->msg->len += sizeof(*data);
block->cmd->len += sizeof(*data);
}
static void w1_found_send_slave(struct w1_master *dev, u64 rn)
{
/* update kernel slave list */
w1_slave_found(dev, rn);
w1_send_slave(dev, rn);
}
/* Get the current slave list, or search (with or without alarm) */
static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd)
{
struct w1_slave *sl;
req_cmd->len = 0;
w1_netlink_queue_cmd(dev->priv, req_cmd);
if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
u64 rn;
mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
memcpy(&rn, &sl->reg_num, sizeof(rn));
w1_send_slave(dev, rn);
}
mutex_unlock(&dev->list_mutex);
} else {
w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ?
W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
}
return 0;
}
static int w1_process_command_io(struct w1_master *dev,
struct w1_netlink_cmd *cmd)
{
int err = 0;
switch (cmd->cmd) {
case W1_CMD_TOUCH:
w1_touch_block(dev, cmd->data, cmd->len);
w1_netlink_queue_cmd(dev->priv, cmd);
break;
case W1_CMD_READ:
w1_read_block(dev, cmd->data, cmd->len);
w1_netlink_queue_cmd(dev->priv, cmd);
break;
case W1_CMD_WRITE:
w1_write_block(dev, cmd->data, cmd->len);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int w1_process_command_addremove(struct w1_master *dev,
struct w1_netlink_cmd *cmd)
{
struct w1_slave *sl;
int err = 0;
struct w1_reg_num *id;
if (cmd->len != sizeof(*id))
return -EINVAL;
id = (struct w1_reg_num *)cmd->data;
sl = w1_slave_search_device(dev, id);
switch (cmd->cmd) {
case W1_CMD_SLAVE_ADD:
if (sl)
err = -EINVAL;
else
err = w1_attach_slave_device(dev, id);
break;
case W1_CMD_SLAVE_REMOVE:
if (sl)
w1_slave_detach(sl);
else
err = -EINVAL;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int w1_process_command_master(struct w1_master *dev,
struct w1_netlink_cmd *req_cmd)
{
int err = -EINVAL;
/* drop bus_mutex for search (does it's own locking), and add/remove
* which doesn't use the bus
*/
switch (req_cmd->cmd) {
case W1_CMD_SEARCH:
case W1_CMD_ALARM_SEARCH:
case W1_CMD_LIST_SLAVES:
mutex_unlock(&dev->bus_mutex);
err = w1_get_slaves(dev, req_cmd);
mutex_lock(&dev->bus_mutex);
break;
case W1_CMD_READ:
case W1_CMD_WRITE:
case W1_CMD_TOUCH:
err = w1_process_command_io(dev, req_cmd);
break;
case W1_CMD_RESET:
err = w1_reset_bus(dev);
break;
case W1_CMD_SLAVE_ADD:
case W1_CMD_SLAVE_REMOVE:
mutex_unlock(&dev->bus_mutex);
mutex_lock(&dev->mutex);
err = w1_process_command_addremove(dev, req_cmd);
mutex_unlock(&dev->mutex);
mutex_lock(&dev->bus_mutex);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int w1_process_command_slave(struct w1_slave *sl,
struct w1_netlink_cmd *cmd)
{
dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
sl->reg_num.crc, cmd->cmd, cmd->len);
return w1_process_command_io(sl->master, cmd);
}
static int w1_process_command_root(struct cn_msg *req_cn, u32 portid)
{
struct w1_master *dev;
struct cn_msg *cn;
struct w1_netlink_msg *msg;
u32 *id;
cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!cn)
return -ENOMEM;
cn->id.idx = CN_W1_IDX;
cn->id.val = CN_W1_VAL;
cn->seq = req_cn->seq;
cn->ack = req_cn->seq + 1;
cn->len = sizeof(struct w1_netlink_msg);
msg = (struct w1_netlink_msg *)cn->data;
msg->type = W1_LIST_MASTERS;
msg->status = 0;
msg->len = 0;
id = (u32 *)msg->data;
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
cn_netlink_send(cn, portid, 0, GFP_KERNEL);
cn->len = sizeof(struct w1_netlink_msg);
msg->len = 0;
id = (u32 *)msg->data;
}
*id = dev->id;
msg->len += sizeof(*id);
cn->len += sizeof(*id);
id++;
}
cn_netlink_send(cn, portid, 0, GFP_KERNEL);
mutex_unlock(&w1_mlock);
kfree(cn);
return 0;
}
static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
{
struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
async);
u16 mlen = node->msg->len;
u16 len;
int err = 0;
struct w1_slave *sl = node->sl;
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data;
mutex_lock(&dev->bus_mutex);
dev->priv = node->block;
if (sl && w1_reset_select_slave(sl))
err = -ENODEV;
node->block->cur_msg = node->msg;
while (mlen && !err) {
if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
err = -E2BIG;
break;
}
if (sl)
err = w1_process_command_slave(sl, cmd);
else
err = w1_process_command_master(dev, cmd);
w1_netlink_check_send(node->block);
w1_netlink_queue_status(node->block, node->msg, cmd, err);
err = 0;
len = sizeof(*cmd) + cmd->len;
cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
mlen -= len;
}
if (!cmd || err)
w1_netlink_queue_status(node->block, node->msg, cmd, err);
/* ref taken in w1_search_slave or w1_search_master_id when building
* the block
*/
if (sl)
w1_unref_slave(sl);
else
atomic_dec(&dev->refcnt);
dev->priv = NULL;
mutex_unlock(&dev->bus_mutex);
mutex_lock(&dev->list_mutex);
list_del(&async_cmd->async_entry);
mutex_unlock(&dev->list_mutex);
w1_unref_block(node->block);
}
static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count,
u16 *slave_len)
{
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data;
u16 mlen = msg->len;
u16 len;
int slave_list = 0;
while (mlen) {
if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen)
break;
switch (cmd->cmd) {
case W1_CMD_SEARCH:
case W1_CMD_ALARM_SEARCH:
case W1_CMD_LIST_SLAVES:
++slave_list;
}
++*cmd_count;
len = sizeof(*cmd) + cmd->len;
cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
mlen -= len;
}
if (slave_list) {
struct w1_master *dev = w1_search_master_id(msg->id.mst.id);
if (dev) {
/* Bytes, and likely an overstimate, and if it isn't
* the results can still be split between packets.
*/
*slave_len += sizeof(struct w1_reg_num) * slave_list *
(dev->slave_count + dev->max_slave_count);
/* search incremented it */
atomic_dec(&dev->refcnt);
}
}
}
static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
{
struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1);
struct w1_slave *sl;
struct w1_master *dev;
u16 msg_len;
u16 slave_len = 0;
int err = 0;
struct w1_cb_block *block = NULL;
struct w1_cb_node *node = NULL;
int node_count = 0;
int cmd_count = 0;
/* If any unknown flag is set let the application know, that way
* applications can detect the absence of features in kernels that
* don't know about them. http://lwn.net/Articles/587527/
*/
if (cn->flags & ~(W1_CN_BUNDLE)) {
w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL);
return;
}
/* Count the number of master or slave commands there are to allocate
* space for one cb_node each.
*/
msg_len = cn->len;
while (msg_len && !err) {
if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
/* count messages for nodes and allocate any additional space
* required for slave lists
*/
if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) {
++node_count;
w1_list_count_cmds(msg, &cmd_count, &slave_len);
}
msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
msg = (struct w1_netlink_msg *)(((u8 *)msg) +
sizeof(struct w1_netlink_msg) + msg->len);
}
msg = (struct w1_netlink_msg *)(cn + 1);
if (node_count) {
int size;
int reply_size = sizeof(*cn) + cn->len + slave_len;
if (cn->flags & W1_CN_BUNDLE) {
/* bundling duplicats some of the messages */
reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +
sizeof(struct w1_netlink_msg) +
sizeof(struct w1_netlink_cmd));
}
reply_size = min(CONNECTOR_MAX_MSG_SIZE, reply_size);
/* allocate space for the block, a copy of the original message,
* one node per cmd to point into the original message,
* space for replies which is the original message size plus
* space for any list slave data and status messages
* cn->len doesn't include itself which is part of the block
* */
size = /* block + original message */
sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len +
/* space for nodes */
node_count * sizeof(struct w1_cb_node) +
/* replies */
sizeof(struct cn_msg) + reply_size;
block = kzalloc(size, GFP_KERNEL);
if (!block) {
/* if the system is already out of memory,
* (A) will this work, and (B) would it be better
* to not try?
*/
w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM);
return;
}
atomic_set(&block->refcnt, 1);
block->portid = nsp->portid;
block->request_cn = *cn;
memcpy(block->request_cn.data, cn->data, cn->len);
node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
/* Sneeky, when not bundling, reply_size is the allocated space
* required for the reply, cn_msg isn't part of maxlen so
* it should be reply_size - sizeof(struct cn_msg), however
* when checking if there is enough space, w1_reply_make_space
* is called with the full message size including cn_msg,
* because it isn't known at that time if an additional cn_msg
* will need to be allocated. So an extra cn_msg is added
* above in "size".
*/
block->maxlen = reply_size;
block->first_cn = (struct cn_msg *)(node + node_count);
memset(block->first_cn, 0, sizeof(*block->first_cn));
}
msg_len = cn->len;
while (msg_len && !err) {
dev = NULL;
sl = NULL;
if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
/* execute on this thread, no need to process later */
if (msg->type == W1_LIST_MASTERS) {
err = w1_process_command_root(cn, nsp->portid);
goto out_cont;
}
/* All following message types require additional data,
* check here before references are taken.
*/
if (!msg->len) {
err = -EPROTO;
goto out_cont;
}
/* both search calls take references */
if (msg->type == W1_MASTER_CMD) {
dev = w1_search_master_id(msg->id.mst.id);
} else if (msg->type == W1_SLAVE_CMD) {
sl = w1_search_slave((struct w1_reg_num *)msg->id.id);
if (sl)
dev = sl->master;
} else {
pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n",
__func__, cn->id.idx, cn->id.val,
msg->type, msg->len);
err = -EPROTO;
goto out_cont;
}
if (!dev) {
err = -ENODEV;
goto out_cont;
}
err = 0;
atomic_inc(&block->refcnt);
node->async.cb = w1_process_cb;
node->block = block;
node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
(size_t)((u8 *)msg - (u8 *)cn));
node->sl = sl;
node->dev = dev;
mutex_lock(&dev->list_mutex);
list_add_tail(&node->async.async_entry, &dev->async_list);
wake_up_process(dev->thread);
mutex_unlock(&dev->list_mutex);
++node;
out_cont:
/* Can't queue because that modifies block and another
* thread could be processing the messages by now and
* there isn't a lock, send directly.
*/
if (err)
w1_netlink_send_error(cn, msg, nsp->portid, err);
msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
msg = (struct w1_netlink_msg *)(((u8 *)msg) +
sizeof(struct w1_netlink_msg) + msg->len);
/*
* Let's allow requests for nonexisting devices.
*/
if (err == -ENODEV)
err = 0;
}
if (block)
w1_unref_block(block);
}
int w1_init_netlink(void)
{
struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
return cn_add_callback(&w1_id, "w1", &w1_cn_callback);
}
void w1_fini_netlink(void)
{
struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
cn_del_callback(&w1_id);
}
#else
void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn)
{
}
int w1_init_netlink(void)
{
return 0;
}
void w1_fini_netlink(void)
{
}
#endif
| linux-master | drivers/w1/w1_netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2413.c - w1 family 3a (DS2413) driver
* based on w1_ds2408.c by Jean-Francois Dagenais <[email protected]>
*
* Copyright (c) 2013 Mariusz Bialonczyk <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2413 0x3A
#define W1_F3A_RETRIES 3
#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
#define W1_F3A_INVALID_PIO_STATE 0xFF
static ssize_t state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F3A_RETRIES;
ssize_t bytes_read = -EIO;
u8 state;
dev_dbg(&sl->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
next:
if (w1_reset_select_slave(sl))
goto out;
while (retries--) {
w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
state = w1_read_8(sl->master);
if ((state & 0x0F) == ((~state >> 4) & 0x0F)) {
/* complement is correct */
*buf = state;
bytes_read = 1;
goto out;
} else if (state == W1_F3A_INVALID_PIO_STATE) {
/* slave didn't respond, try to select it again */
dev_warn(&sl->dev, "slave device did not respond to PIO_ACCESS_READ, " \
"reselecting, retries left: %d\n", retries);
goto next;
}
if (w1_reset_resume_command(sl->master))
goto out; /* unrecoverable error */
dev_warn(&sl->dev, "PIO_ACCESS_READ error, retries left: %d\n", retries);
}
out:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
(bytes_read > 0) ? "succeeded" : "error", retries);
return bytes_read;
}
static BIN_ATTR_RO(state, 1);
static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
unsigned int retries = W1_F3A_RETRIES;
ssize_t bytes_written = -EIO;
if (count != 1 || off != 0)
return -EFAULT;
dev_dbg(&sl->dev, "locking mutex for write_output");
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
goto out;
/*
* according to the DS2413 datasheet the most significant 6 bits
* should be set to "1"s, so do it now
*/
*buf = *buf | 0xFC;
while (retries--) {
w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
w1_buf[1] = *buf;
w1_buf[2] = ~(*buf);
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
bytes_written = 1;
goto out;
}
if (w1_reset_resume_command(sl->master))
goto out; /* unrecoverable error */
dev_warn(&sl->dev, "PIO_ACCESS_WRITE error, retries left: %d\n", retries);
}
out:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
(bytes_written > 0) ? "succeeded" : "error", retries);
return bytes_written;
}
static BIN_ATTR(output, 0664, NULL, output_write, 1);
static struct bin_attribute *w1_f3a_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
NULL,
};
static const struct attribute_group w1_f3a_group = {
.bin_attrs = w1_f3a_bin_attrs,
};
static const struct attribute_group *w1_f3a_groups[] = {
&w1_f3a_group,
NULL,
};
static const struct w1_family_ops w1_f3a_fops = {
.groups = w1_f3a_groups,
};
static struct w1_family w1_family_3a = {
.fid = W1_FAMILY_DS2413,
.fops = &w1_f3a_fops,
};
module_w1_family(w1_family_3a);
MODULE_AUTHOR("Mariusz Bialonczyk <[email protected]>");
MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
| linux-master | drivers/w1/slaves/w1_ds2413.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 1-Wire implementation for the ds2438 chip
*
* Copyright (c) 2017 Mariusz Bialonczyk <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2438 0x26
#define W1_DS2438_RETRIES 3
/* Memory commands */
#define W1_DS2438_READ_SCRATCH 0xBE
#define W1_DS2438_WRITE_SCRATCH 0x4E
#define W1_DS2438_COPY_SCRATCH 0x48
#define W1_DS2438_RECALL_MEMORY 0xB8
/* Register commands */
#define W1_DS2438_CONVERT_TEMP 0x44
#define W1_DS2438_CONVERT_VOLTAGE 0xB4
#define DS2438_PAGE_SIZE 8
#define DS2438_ADC_INPUT_VAD 0
#define DS2438_ADC_INPUT_VDD 1
#define DS2438_MAX_CONVERSION_TIME 10 /* ms */
/* Page #0 definitions */
#define DS2438_STATUS_REG 0x00 /* Status/Configuration Register */
#define DS2438_STATUS_IAD (1 << 0) /* Current A/D Control Bit */
#define DS2438_STATUS_CA (1 << 1) /* Current Accumulator Configuration */
#define DS2438_STATUS_EE (1 << 2) /* Current Accumulator Shadow Selector bit */
#define DS2438_STATUS_AD (1 << 3) /* Voltage A/D Input Select Bit */
#define DS2438_STATUS_TB (1 << 4) /* Temperature Busy Flag */
#define DS2438_STATUS_NVB (1 << 5) /* Nonvolatile Memory Busy Flag */
#define DS2438_STATUS_ADB (1 << 6) /* A/D Converter Busy Flag */
#define DS2438_TEMP_LSB 0x01
#define DS2438_TEMP_MSB 0x02
#define DS2438_VOLTAGE_LSB 0x03
#define DS2438_VOLTAGE_MSB 0x04
#define DS2438_CURRENT_LSB 0x05
#define DS2438_CURRENT_MSB 0x06
#define DS2438_THRESHOLD 0x07
/* Page #1 definitions */
#define DS2438_ETM_0 0x00
#define DS2438_ETM_1 0x01
#define DS2438_ETM_2 0x02
#define DS2438_ETM_3 0x03
#define DS2438_ICA 0x04
#define DS2438_OFFSET_LSB 0x05
#define DS2438_OFFSET_MSB 0x06
static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[2];
u8 crc;
size_t count;
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
w1_buf[1] = (u8)pageno;
w1_write_block(sl->master, w1_buf, 2);
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_READ_SCRATCH;
w1_buf[1] = (u8)pageno;
w1_write_block(sl->master, w1_buf, 2);
count = w1_read_block(sl->master, buf, DS2438_PAGE_SIZE + 1);
if (count == DS2438_PAGE_SIZE + 1) {
crc = w1_calc_crc8(buf, DS2438_PAGE_SIZE);
/* check for correct CRC */
if ((u8)buf[DS2438_PAGE_SIZE] == crc)
return 0;
}
}
return -1;
}
static int w1_ds2438_get_temperature(struct w1_slave *sl, int16_t *temperature)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
unsigned int tm = DS2438_MAX_CONVERSION_TIME;
unsigned long sleep_rem;
int ret;
mutex_lock(&sl->master->bus_mutex);
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_write_8(sl->master, W1_DS2438_CONVERT_TEMP);
mutex_unlock(&sl->master->bus_mutex);
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -1;
goto post_unlock;
}
if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
ret = -1;
goto post_unlock;
}
break;
}
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
*temperature = (((int16_t) w1_buf[DS2438_TEMP_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_TEMP_LSB]);
ret = 0;
} else
ret = -1;
mutex_unlock(&sl->master->bus_mutex);
post_unlock:
return ret;
}
static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[3];
u8 status;
int perform_write = 0;
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
w1_buf[1] = 0x00;
w1_write_block(sl->master, w1_buf, 2);
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_READ_SCRATCH;
w1_buf[1] = 0x00;
w1_write_block(sl->master, w1_buf, 2);
/* reading one byte of result */
status = w1_read_8(sl->master);
/* if bit0=1, set a value to a mask for easy compare */
if (value)
value = mask;
if ((status & mask) == value)
return 0; /* already set as requested */
/* changing bit */
status ^= mask;
perform_write = 1;
break;
}
if (perform_write) {
retries = W1_DS2438_RETRIES;
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_WRITE_SCRATCH;
w1_buf[1] = 0x00;
w1_buf[2] = status;
w1_write_block(sl->master, w1_buf, 3);
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_COPY_SCRATCH;
w1_buf[1] = 0x00;
w1_write_block(sl->master, w1_buf, 2);
return 0;
}
}
return -1;
}
static int w1_ds2438_change_offset_register(struct w1_slave *sl, u8 *value)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[9];
u8 w1_page1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
if (w1_ds2438_get_page(sl, 1, w1_page1_buf) == 0) {
memcpy(&w1_buf[2], w1_page1_buf, DS2438_PAGE_SIZE - 1); /* last register reserved */
w1_buf[7] = value[0]; /* change only offset register */
w1_buf[8] = value[1];
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_WRITE_SCRATCH;
w1_buf[1] = 0x01; /* write to page 1 */
w1_write_block(sl->master, w1_buf, 9);
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_COPY_SCRATCH;
w1_buf[1] = 0x01;
w1_write_block(sl->master, w1_buf, 2);
return 0;
}
}
return -1;
}
static int w1_ds2438_get_voltage(struct w1_slave *sl,
int adc_input, uint16_t *voltage)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
unsigned int tm = DS2438_MAX_CONVERSION_TIME;
unsigned long sleep_rem;
int ret;
mutex_lock(&sl->master->bus_mutex);
if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_AD, adc_input)) {
ret = -1;
goto pre_unlock;
}
while (retries--) {
if (w1_reset_select_slave(sl))
continue;
w1_write_8(sl->master, W1_DS2438_CONVERT_VOLTAGE);
mutex_unlock(&sl->master->bus_mutex);
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -1;
goto post_unlock;
}
if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
ret = -1;
goto post_unlock;
}
break;
}
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
*voltage = (((uint16_t) w1_buf[DS2438_VOLTAGE_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_VOLTAGE_LSB]);
ret = 0;
} else
ret = -1;
pre_unlock:
mutex_unlock(&sl->master->bus_mutex);
post_unlock:
return ret;
}
static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
{
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
int ret;
mutex_lock(&sl->master->bus_mutex);
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
/* The voltage measured across current sense resistor RSENS. */
*voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]);
ret = 0;
} else
ret = -1;
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_IAD, *buf & 0x01) == 0)
ret = 1;
else
ret = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static ssize_t iad_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
int16_t voltage;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
if (w1_ds2438_get_current(sl, &voltage) == 0)
ret = snprintf(buf, count, "%i\n", voltage);
else
ret = -EIO;
return ret;
}
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
/* Read no more than page0 size */
if (count > DS2438_PAGE_SIZE)
count = DS2438_PAGE_SIZE;
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
memcpy(buf, &w1_buf, count);
ret = count;
} else
ret = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static ssize_t page1_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
/* Read no more than page1 size */
if (count > DS2438_PAGE_SIZE)
count = DS2438_PAGE_SIZE;
if (w1_ds2438_get_page(sl, 1, w1_buf) == 0) {
memcpy(buf, &w1_buf, count);
ret = count;
} else
ret = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static ssize_t offset_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
mutex_lock(&sl->master->bus_mutex);
if (w1_ds2438_change_offset_register(sl, buf) == 0)
ret = count;
else
ret = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
int16_t temp;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
if (w1_ds2438_get_temperature(sl, &temp) == 0)
ret = snprintf(buf, count, "%i\n", temp);
else
ret = -EIO;
return ret;
}
static ssize_t vad_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
uint16_t voltage;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0)
ret = snprintf(buf, count, "%u\n", voltage);
else
ret = -EIO;
return ret;
}
static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
uint16_t voltage;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0)
ret = snprintf(buf, count, "%u\n", voltage);
else
ret = -EIO;
return ret;
}
static BIN_ATTR_RW(iad, 0);
static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
static BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
static BIN_ATTR_WO(offset, 2);
static BIN_ATTR_RO(temperature, 0/* real length varies */);
static BIN_ATTR_RO(vad, 0/* real length varies */);
static BIN_ATTR_RO(vdd, 0/* real length varies */);
static struct bin_attribute *w1_ds2438_bin_attrs[] = {
&bin_attr_iad,
&bin_attr_page0,
&bin_attr_page1,
&bin_attr_offset,
&bin_attr_temperature,
&bin_attr_vad,
&bin_attr_vdd,
NULL,
};
static const struct attribute_group w1_ds2438_group = {
.bin_attrs = w1_ds2438_bin_attrs,
};
static const struct attribute_group *w1_ds2438_groups[] = {
&w1_ds2438_group,
NULL,
};
static const struct w1_family_ops w1_ds2438_fops = {
.groups = w1_ds2438_groups,
};
static struct w1_family w1_ds2438_family = {
.fid = W1_FAMILY_DS2438,
.fops = &w1_ds2438_fops,
};
module_w1_family(w1_ds2438_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mariusz Bialonczyk <[email protected]>");
MODULE_DESCRIPTION("1-wire driver for Maxim/Dallas DS2438 Smart Battery Monitor");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2438));
| linux-master | drivers/w1/slaves/w1_ds2438.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 1-Wire implementation for the ds2781 chip
*
* Author: Renata Sayakhova <[email protected]>
*
* Based on w1-ds2780 driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/w1.h>
#include "w1_ds2781.h"
#define W1_FAMILY_DS2781 0x3D
static int w1_ds2781_do_io(struct device *dev, char *buf, int addr,
size_t count, int io)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
if (addr > DS2781_DATA_SIZE || addr < 0)
return 0;
count = min_t(int, count, DS2781_DATA_SIZE - addr);
if (w1_reset_select_slave(sl) == 0) {
if (io) {
w1_write_8(sl->master, W1_DS2781_WRITE_DATA);
w1_write_8(sl->master, addr);
w1_write_block(sl->master, buf, count);
} else {
w1_write_8(sl->master, W1_DS2781_READ_DATA);
w1_write_8(sl->master, addr);
count = w1_read_block(sl->master, buf, count);
}
}
return count;
}
int w1_ds2781_io(struct device *dev, char *buf, int addr, size_t count,
int io)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret;
if (!dev)
return -ENODEV;
mutex_lock(&sl->master->bus_mutex);
ret = w1_ds2781_do_io(dev, buf, addr, count, io);
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
EXPORT_SYMBOL(w1_ds2781_io);
int w1_ds2781_eeprom_cmd(struct device *dev, int addr, int cmd)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
if (!dev)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl) == 0) {
w1_write_8(sl->master, cmd);
w1_write_8(sl->master, addr);
}
mutex_unlock(&sl->master->bus_mutex);
return 0;
}
EXPORT_SYMBOL(w1_ds2781_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
return w1_ds2781_io(dev, buf, off, count, 0);
}
static BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
static struct bin_attribute *w1_ds2781_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2781_group = {
.bin_attrs = w1_ds2781_bin_attrs,
};
static const struct attribute_group *w1_ds2781_groups[] = {
&w1_ds2781_group,
NULL,
};
static int w1_ds2781_add_slave(struct w1_slave *sl)
{
int ret;
struct platform_device *pdev;
pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
if (ret)
goto pdev_add_failed;
dev_set_drvdata(&sl->dev, pdev);
return 0;
pdev_add_failed:
platform_device_put(pdev);
return ret;
}
static void w1_ds2781_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
platform_device_unregister(pdev);
}
static const struct w1_family_ops w1_ds2781_fops = {
.add_slave = w1_ds2781_add_slave,
.remove_slave = w1_ds2781_remove_slave,
.groups = w1_ds2781_groups,
};
static struct w1_family w1_ds2781_family = {
.fid = W1_FAMILY_DS2781,
.fops = &w1_ds2781_fops,
};
module_w1_family(w1_ds2781_family);
MODULE_AUTHOR("Renata Sayakhova <[email protected]>");
MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2781));
| linux-master | drivers/w1/slaves/w1_ds2781.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* w1_smem.c
*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <asm/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/w1.h>
#define W1_FAMILY_SMEM_01 0x01
#define W1_FAMILY_SMEM_81 0x81
static struct w1_family w1_smem_family_01 = {
.fid = W1_FAMILY_SMEM_01,
};
static struct w1_family w1_smem_family_81 = {
.fid = W1_FAMILY_SMEM_81,
};
static int __init w1_smem_init(void)
{
int err;
err = w1_register_family(&w1_smem_family_01);
if (err)
return err;
err = w1_register_family(&w1_smem_family_81);
if (err) {
w1_unregister_family(&w1_smem_family_01);
return err;
}
return 0;
}
static void __exit w1_smem_fini(void)
{
w1_unregister_family(&w1_smem_family_01);
w1_unregister_family(&w1_smem_family_81);
}
module_init(w1_smem_init);
module_exit(w1_smem_fini);
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_01));
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_81));
| linux-master | drivers/w1/slaves/w1_smem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2805 - w1 family 0d (DS28E05) driver
*
* Copyright (c) 2016 Andrew Worsley [email protected]
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/w1.h>
#define W1_EEPROM_DS2805 0x0D
#define W1_F0D_EEPROM_SIZE 128
#define W1_F0D_PAGE_BITS 3
#define W1_F0D_PAGE_SIZE (1<<W1_F0D_PAGE_BITS)
#define W1_F0D_PAGE_MASK 0x0F
#define W1_F0D_SCRATCH_BITS 1
#define W1_F0D_SCRATCH_SIZE (1<<W1_F0D_SCRATCH_BITS)
#define W1_F0D_SCRATCH_MASK (W1_F0D_SCRATCH_SIZE-1)
#define W1_F0D_READ_EEPROM 0xF0
#define W1_F0D_WRITE_EEPROM 0x55
#define W1_F0D_RELEASE 0xFF
#define W1_F0D_CS_OK 0xAA /* Chip Status Ok */
#define W1_F0D_TPROG_MS 16
#define W1_F0D_READ_RETRIES 10
#define W1_F0D_READ_MAXLEN W1_F0D_EEPROM_SIZE
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f0d_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return size - off;
return count;
}
/*
* Read a block from W1 ROM two times and compares the results.
* If they are equal they are returned, otherwise the read
* is repeated W1_F0D_READ_RETRIES times.
*
* count must not exceed W1_F0D_READ_MAXLEN.
*/
static int w1_f0d_readblock(struct w1_slave *sl, int off, int count, char *buf)
{
u8 wrbuf[3];
u8 cmp[W1_F0D_READ_MAXLEN];
int tries = W1_F0D_READ_RETRIES;
do {
wrbuf[0] = W1_F0D_READ_EEPROM;
wrbuf[1] = off & 0x7f;
wrbuf[2] = 0;
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
w1_read_block(sl->master, buf, count);
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
w1_read_block(sl->master, cmp, count);
if (!memcmp(cmp, buf, count))
return 0;
} while (--tries);
dev_err(&sl->dev, "proof reading failed %d times\n",
W1_F0D_READ_RETRIES);
return -1;
}
static ssize_t w1_f0d_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int todo = count;
count = w1_f0d_fix_count(off, count, W1_F0D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->mutex);
/* read directly from the EEPROM in chunks of W1_F0D_READ_MAXLEN */
while (todo > 0) {
int block_read;
if (todo >= W1_F0D_READ_MAXLEN)
block_read = W1_F0D_READ_MAXLEN;
else
block_read = todo;
if (w1_f0d_readblock(sl, off, block_read, buf) < 0) {
count = -EIO;
break;
}
todo -= W1_F0D_READ_MAXLEN;
buf += W1_F0D_READ_MAXLEN;
off += W1_F0D_READ_MAXLEN;
}
mutex_unlock(&sl->master->mutex);
return count;
}
/*
* Writes to the scratchpad and reads it back for verification.
* Then copies the scratchpad to EEPROM.
* The data must be aligned at W1_F0D_SCRATCH_SIZE bytes and
* must be W1_F0D_SCRATCH_SIZE bytes long.
* The master must be locked.
*
* @param sl The slave structure
* @param addr Address for the write
* @param len length must be <= (W1_F0D_PAGE_SIZE - (addr & W1_F0D_PAGE_MASK))
* @param data The data to write
* @return 0=Success -1=failure
*/
static int w1_f0d_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
int tries = W1_F0D_READ_RETRIES;
u8 wrbuf[3];
u8 rdbuf[W1_F0D_SCRATCH_SIZE];
u8 cs;
if ((addr & 1) || (len != 2)) {
dev_err(&sl->dev, "%s: bad addr/len - addr=%#x len=%d\n",
__func__, addr, len);
return -1;
}
retry:
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F0D_WRITE_EEPROM;
wrbuf[1] = addr & 0xff;
wrbuf[2] = 0xff; /* ?? from Example */
w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
w1_write_block(sl->master, data, len);
w1_read_block(sl->master, rdbuf, sizeof(rdbuf));
/* Compare what was read against the data written */
if ((rdbuf[0] != data[0]) || (rdbuf[1] != data[1])) {
if (--tries)
goto retry;
dev_err(&sl->dev,
"could not write to eeprom, scratchpad compare failed %d times\n",
W1_F0D_READ_RETRIES);
pr_info("%s: rdbuf = %#x %#x data = %#x %#x\n",
__func__, rdbuf[0], rdbuf[1], data[0], data[1]);
return -1;
}
/* Trigger write out to EEPROM */
w1_write_8(sl->master, W1_F0D_RELEASE);
/* Sleep for tprog ms to wait for the write to complete */
msleep(W1_F0D_TPROG_MS);
/* Check CS (Command Status) == 0xAA ? */
cs = w1_read_8(sl->master);
if (cs != W1_F0D_CS_OK) {
dev_err(&sl->dev, "save to eeprom failed = CS=%#x\n", cs);
return -1;
}
return 0;
}
static ssize_t w1_f0d_write_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len;
int copy;
count = w1_f0d_fix_count(off, count, W1_F0D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->mutex);
/* Can only write data in blocks of the size of the scratchpad */
addr = off;
len = count;
while (len > 0) {
/* if len too short or addr not aligned */
if (len < W1_F0D_SCRATCH_SIZE || addr & W1_F0D_SCRATCH_MASK) {
char tmp[W1_F0D_SCRATCH_SIZE];
/* read the block and update the parts to be written */
if (w1_f0d_readblock(sl, addr & ~W1_F0D_SCRATCH_MASK,
W1_F0D_SCRATCH_SIZE, tmp)) {
count = -EIO;
goto out_up;
}
/* copy at most to the boundary of the PAGE or len */
copy = W1_F0D_SCRATCH_SIZE -
(addr & W1_F0D_SCRATCH_MASK);
if (copy > len)
copy = len;
memcpy(&tmp[addr & W1_F0D_SCRATCH_MASK], buf, copy);
if (w1_f0d_write(sl, addr & ~W1_F0D_SCRATCH_MASK,
W1_F0D_SCRATCH_SIZE, tmp) < 0) {
count = -EIO;
goto out_up;
}
} else {
copy = W1_F0D_SCRATCH_SIZE;
if (w1_f0d_write(sl, addr, copy, buf) < 0) {
count = -EIO;
goto out_up;
}
}
buf += copy;
addr += copy;
len -= copy;
}
out_up:
mutex_unlock(&sl->master->mutex);
return count;
}
static struct bin_attribute w1_f0d_bin_attr = {
.attr = {
.name = "eeprom",
.mode = 0644,
},
.size = W1_F0D_EEPROM_SIZE,
.read = w1_f0d_read_bin,
.write = w1_f0d_write_bin,
};
static int w1_f0d_add_slave(struct w1_slave *sl)
{
return sysfs_create_bin_file(&sl->dev.kobj, &w1_f0d_bin_attr);
}
static void w1_f0d_remove_slave(struct w1_slave *sl)
{
sysfs_remove_bin_file(&sl->dev.kobj, &w1_f0d_bin_attr);
}
static const struct w1_family_ops w1_f0d_fops = {
.add_slave = w1_f0d_add_slave,
.remove_slave = w1_f0d_remove_slave,
};
static struct w1_family w1_family_0d = {
.fid = W1_EEPROM_DS2805,
.fops = &w1_f0d_fops,
};
module_w1_family(w1_family_0d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Worsley [email protected]");
MODULE_DESCRIPTION("w1 family 0d driver for DS2805, 1kb EEPROM");
| linux-master | drivers/w1/slaves/w1_ds2805.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2431.c - w1 family 2d (DS2431) driver
*
* Copyright (c) 2008 Bernhard Weirich <[email protected]>
*
* Heavily inspired by w1_DS2433 driver from Ben Gardner <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/w1.h>
#define W1_EEPROM_DS2431 0x2D
#define W1_F2D_EEPROM_SIZE 128
#define W1_F2D_PAGE_COUNT 4
#define W1_F2D_PAGE_BITS 5
#define W1_F2D_PAGE_SIZE (1<<W1_F2D_PAGE_BITS)
#define W1_F2D_PAGE_MASK 0x1F
#define W1_F2D_SCRATCH_BITS 3
#define W1_F2D_SCRATCH_SIZE (1<<W1_F2D_SCRATCH_BITS)
#define W1_F2D_SCRATCH_MASK (W1_F2D_SCRATCH_SIZE-1)
#define W1_F2D_READ_EEPROM 0xF0
#define W1_F2D_WRITE_SCRATCH 0x0F
#define W1_F2D_READ_SCRATCH 0xAA
#define W1_F2D_COPY_SCRATCH 0x55
#define W1_F2D_TPROG_MS 11
#define W1_F2D_READ_RETRIES 10
#define W1_F2D_READ_MAXLEN 8
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f2d_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return size - off;
return count;
}
/*
* Read a block from W1 ROM two times and compares the results.
* If they are equal they are returned, otherwise the read
* is repeated W1_F2D_READ_RETRIES times.
*
* count must not exceed W1_F2D_READ_MAXLEN.
*/
static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
{
u8 wrbuf[3];
u8 cmp[W1_F2D_READ_MAXLEN];
int tries = W1_F2D_READ_RETRIES;
do {
wrbuf[0] = W1_F2D_READ_EEPROM;
wrbuf[1] = off & 0xff;
wrbuf[2] = off >> 8;
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, buf, count);
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, cmp, count);
if (!memcmp(cmp, buf, count))
return 0;
} while (--tries);
dev_err(&sl->dev, "proof reading failed %d times\n",
W1_F2D_READ_RETRIES);
return -1;
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int todo = count;
count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* read directly from the EEPROM in chunks of W1_F2D_READ_MAXLEN */
while (todo > 0) {
int block_read;
if (todo >= W1_F2D_READ_MAXLEN)
block_read = W1_F2D_READ_MAXLEN;
else
block_read = todo;
if (w1_f2d_readblock(sl, off, block_read, buf) < 0)
count = -EIO;
todo -= W1_F2D_READ_MAXLEN;
buf += W1_F2D_READ_MAXLEN;
off += W1_F2D_READ_MAXLEN;
}
mutex_unlock(&sl->master->bus_mutex);
return count;
}
/*
* Writes to the scratchpad and reads it back for verification.
* Then copies the scratchpad to EEPROM.
* The data must be aligned at W1_F2D_SCRATCH_SIZE bytes and
* must be W1_F2D_SCRATCH_SIZE bytes long.
* The master must be locked.
*
* @param sl The slave structure
* @param addr Address for the write
* @param len length must be <= (W1_F2D_PAGE_SIZE - (addr & W1_F2D_PAGE_MASK))
* @param data The data to write
* @return 0=Success -1=failure
*/
static int w1_f2d_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
int tries = W1_F2D_READ_RETRIES;
u8 wrbuf[4];
u8 rdbuf[W1_F2D_SCRATCH_SIZE + 3];
u8 es = (addr + len - 1) % W1_F2D_SCRATCH_SIZE;
retry:
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F2D_WRITE_SCRATCH;
wrbuf[1] = addr & 0xff;
wrbuf[2] = addr >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_write_block(sl->master, data, len);
/* Read the scratchpad and verify */
if (w1_reset_select_slave(sl))
return -1;
w1_write_8(sl->master, W1_F2D_READ_SCRATCH);
w1_read_block(sl->master, rdbuf, len + 3);
/* Compare what was read against the data written */
if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) ||
(rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) {
if (--tries)
goto retry;
dev_err(&sl->dev,
"could not write to eeprom, scratchpad compare failed %d times\n",
W1_F2D_READ_RETRIES);
return -1;
}
/* Copy the scratchpad to EEPROM */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F2D_COPY_SCRATCH;
wrbuf[3] = es;
w1_write_block(sl->master, wrbuf, 4);
/* Sleep for tprog ms to wait for the write to complete */
msleep(W1_F2D_TPROG_MS);
/* Reset the bus to wake up the EEPROM */
w1_reset_bus(sl->master);
return 0;
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len;
int copy;
count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* Can only write data in blocks of the size of the scratchpad */
addr = off;
len = count;
while (len > 0) {
/* if len too short or addr not aligned */
if (len < W1_F2D_SCRATCH_SIZE || addr & W1_F2D_SCRATCH_MASK) {
char tmp[W1_F2D_SCRATCH_SIZE];
/* read the block and update the parts to be written */
if (w1_f2d_readblock(sl, addr & ~W1_F2D_SCRATCH_MASK,
W1_F2D_SCRATCH_SIZE, tmp)) {
count = -EIO;
goto out_up;
}
/* copy at most to the boundary of the PAGE or len */
copy = W1_F2D_SCRATCH_SIZE -
(addr & W1_F2D_SCRATCH_MASK);
if (copy > len)
copy = len;
memcpy(&tmp[addr & W1_F2D_SCRATCH_MASK], buf, copy);
if (w1_f2d_write(sl, addr & ~W1_F2D_SCRATCH_MASK,
W1_F2D_SCRATCH_SIZE, tmp) < 0) {
count = -EIO;
goto out_up;
}
} else {
copy = W1_F2D_SCRATCH_SIZE;
if (w1_f2d_write(sl, addr, copy, buf) < 0) {
count = -EIO;
goto out_up;
}
}
buf += copy;
addr += copy;
len -= copy;
}
out_up:
mutex_unlock(&sl->master->bus_mutex);
return count;
}
static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
static struct bin_attribute *w1_f2d_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f2d_group = {
.bin_attrs = w1_f2d_bin_attrs,
};
static const struct attribute_group *w1_f2d_groups[] = {
&w1_f2d_group,
NULL,
};
static const struct w1_family_ops w1_f2d_fops = {
.groups = w1_f2d_groups,
};
static struct w1_family w1_family_2d = {
.fid = W1_EEPROM_DS2431,
.fops = &w1_f2d_fops,
};
module_w1_family(w1_family_2d);
MODULE_AUTHOR("Bernhard Weirich <[email protected]>");
MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2431));
| linux-master | drivers/w1/slaves/w1_ds2431.c |
// SPDX-License-Identifier: GPL-2.0
/*
* w1_ds250x.c - w1 family 09/0b/89/91 (DS250x) driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/crc16.h>
#include <linux/w1.h>
#include <linux/nvmem-provider.h>
#define W1_DS2501_UNW_FAMILY 0x91
#define W1_DS2501_SIZE 64
#define W1_DS2502_FAMILY 0x09
#define W1_DS2502_UNW_FAMILY 0x89
#define W1_DS2502_SIZE 128
#define W1_DS2505_FAMILY 0x0b
#define W1_DS2505_SIZE 2048
#define W1_PAGE_SIZE 32
#define W1_EXT_READ_MEMORY 0xA5
#define W1_READ_DATA_CRC 0xC3
#define OFF2PG(off) ((off) / W1_PAGE_SIZE)
#define CRC16_INIT 0
#define CRC16_VALID 0xb001
struct w1_eprom_data {
size_t size;
int (*read)(struct w1_slave *sl, int pageno);
u8 eprom[W1_DS2505_SIZE];
DECLARE_BITMAP(page_present, W1_DS2505_SIZE / W1_PAGE_SIZE);
char nvmem_name[64];
};
static int w1_ds2502_read_page(struct w1_slave *sl, int pageno)
{
struct w1_eprom_data *data = sl->family_data;
int pgoff = pageno * W1_PAGE_SIZE;
int ret = -EIO;
u8 buf[3];
u8 crc8;
if (test_bit(pageno, data->page_present))
return 0; /* page already present */
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl))
goto err;
buf[0] = W1_READ_DATA_CRC;
buf[1] = pgoff & 0xff;
buf[2] = pgoff >> 8;
w1_write_block(sl->master, buf, 3);
crc8 = w1_read_8(sl->master);
if (w1_calc_crc8(buf, 3) != crc8)
goto err;
w1_read_block(sl->master, &data->eprom[pgoff], W1_PAGE_SIZE);
crc8 = w1_read_8(sl->master);
if (w1_calc_crc8(&data->eprom[pgoff], W1_PAGE_SIZE) != crc8)
goto err;
set_bit(pageno, data->page_present); /* mark page present */
ret = 0;
err:
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static int w1_ds2505_read_page(struct w1_slave *sl, int pageno)
{
struct w1_eprom_data *data = sl->family_data;
int redir_retries = 16;
int pgoff, epoff;
int ret = -EIO;
u8 buf[6];
u8 redir;
u16 crc;
if (test_bit(pageno, data->page_present))
return 0; /* page already present */
epoff = pgoff = pageno * W1_PAGE_SIZE;
mutex_lock(&sl->master->bus_mutex);
retry:
if (w1_reset_select_slave(sl))
goto err;
buf[0] = W1_EXT_READ_MEMORY;
buf[1] = pgoff & 0xff;
buf[2] = pgoff >> 8;
w1_write_block(sl->master, buf, 3);
w1_read_block(sl->master, buf + 3, 3); /* redir, crc16 */
redir = buf[3];
crc = crc16(CRC16_INIT, buf, 6);
if (crc != CRC16_VALID)
goto err;
if (redir != 0xff) {
redir_retries--;
if (redir_retries < 0)
goto err;
pgoff = (redir ^ 0xff) * W1_PAGE_SIZE;
goto retry;
}
w1_read_block(sl->master, &data->eprom[epoff], W1_PAGE_SIZE);
w1_read_block(sl->master, buf, 2); /* crc16 */
crc = crc16(CRC16_INIT, &data->eprom[epoff], W1_PAGE_SIZE);
crc = crc16(crc, buf, 2);
if (crc != CRC16_VALID)
goto err;
set_bit(pageno, data->page_present);
ret = 0;
err:
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static int w1_nvmem_read(void *priv, unsigned int off, void *buf, size_t count)
{
struct w1_slave *sl = priv;
struct w1_eprom_data *data = sl->family_data;
size_t eprom_size = data->size;
int ret;
int i;
if (off > eprom_size)
return -EINVAL;
if ((off + count) > eprom_size)
count = eprom_size - off;
i = OFF2PG(off);
do {
ret = data->read(sl, i++);
if (ret < 0)
return ret;
} while (i < OFF2PG(off + count));
memcpy(buf, &data->eprom[off], count);
return 0;
}
static int w1_eprom_add_slave(struct w1_slave *sl)
{
struct w1_eprom_data *data;
struct nvmem_device *nvmem;
struct nvmem_config nvmem_cfg = {
.dev = &sl->dev,
.reg_read = w1_nvmem_read,
.type = NVMEM_TYPE_OTP,
.read_only = true,
.word_size = 1,
.priv = sl,
.id = -1
};
data = devm_kzalloc(&sl->dev, sizeof(struct w1_eprom_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
sl->family_data = data;
switch (sl->family->fid) {
case W1_DS2501_UNW_FAMILY:
data->size = W1_DS2501_SIZE;
data->read = w1_ds2502_read_page;
break;
case W1_DS2502_FAMILY:
case W1_DS2502_UNW_FAMILY:
data->size = W1_DS2502_SIZE;
data->read = w1_ds2502_read_page;
break;
case W1_DS2505_FAMILY:
data->size = W1_DS2505_SIZE;
data->read = w1_ds2505_read_page;
break;
}
if (sl->master->bus_master->dev_id)
snprintf(data->nvmem_name, sizeof(data->nvmem_name),
"%s-%02x-%012llx",
sl->master->bus_master->dev_id, sl->reg_num.family,
(unsigned long long)sl->reg_num.id);
else
snprintf(data->nvmem_name, sizeof(data->nvmem_name),
"%02x-%012llx",
sl->reg_num.family,
(unsigned long long)sl->reg_num.id);
nvmem_cfg.name = data->nvmem_name;
nvmem_cfg.size = data->size;
nvmem = devm_nvmem_register(&sl->dev, &nvmem_cfg);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct w1_family_ops w1_eprom_fops = {
.add_slave = w1_eprom_add_slave,
};
static struct w1_family w1_family_09 = {
.fid = W1_DS2502_FAMILY,
.fops = &w1_eprom_fops,
};
static struct w1_family w1_family_0b = {
.fid = W1_DS2505_FAMILY,
.fops = &w1_eprom_fops,
};
static struct w1_family w1_family_89 = {
.fid = W1_DS2502_UNW_FAMILY,
.fops = &w1_eprom_fops,
};
static struct w1_family w1_family_91 = {
.fid = W1_DS2501_UNW_FAMILY,
.fops = &w1_eprom_fops,
};
static int __init w1_ds250x_init(void)
{
int err;
err = w1_register_family(&w1_family_09);
if (err)
return err;
err = w1_register_family(&w1_family_0b);
if (err)
goto err_0b;
err = w1_register_family(&w1_family_89);
if (err)
goto err_89;
err = w1_register_family(&w1_family_91);
if (err)
goto err_91;
return 0;
err_91:
w1_unregister_family(&w1_family_89);
err_89:
w1_unregister_family(&w1_family_0b);
err_0b:
w1_unregister_family(&w1_family_09);
return err;
}
static void __exit w1_ds250x_exit(void)
{
w1_unregister_family(&w1_family_09);
w1_unregister_family(&w1_family_0b);
w1_unregister_family(&w1_family_89);
w1_unregister_family(&w1_family_91);
}
module_init(w1_ds250x_init);
module_exit(w1_ds250x_exit);
MODULE_AUTHOR("Thomas Bogendoerfer <[email protected]>");
MODULE_DESCRIPTION("w1 family driver for DS250x Add Only Memory");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_FAMILY));
MODULE_ALIAS("w1-family-" __stringify(W1_DS2505_FAMILY));
MODULE_ALIAS("w1-family-" __stringify(W1_DS2501_UNW_FAMILY));
MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_UNW_FAMILY));
| linux-master | drivers/w1/slaves/w1_ds250x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2430.c - w1 family 14 (DS2430) driver
**
* Copyright (c) 2019 Angelo Dureghello <[email protected]>
*
* Cloned and modified from ds2431
* Copyright (c) 2008 Bernhard Weirich <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/w1.h>
#define W1_EEPROM_DS2430 0x14
#define W1_F14_EEPROM_SIZE 32
#define W1_F14_PAGE_COUNT 1
#define W1_F14_PAGE_BITS 5
#define W1_F14_PAGE_SIZE (1 << W1_F14_PAGE_BITS)
#define W1_F14_PAGE_MASK 0x1F
#define W1_F14_SCRATCH_BITS 5
#define W1_F14_SCRATCH_SIZE (1 << W1_F14_SCRATCH_BITS)
#define W1_F14_SCRATCH_MASK (W1_F14_SCRATCH_SIZE-1)
#define W1_F14_READ_EEPROM 0xF0
#define W1_F14_WRITE_SCRATCH 0x0F
#define W1_F14_READ_SCRATCH 0xAA
#define W1_F14_COPY_SCRATCH 0x55
#define W1_F14_VALIDATION_KEY 0xa5
#define W1_F14_TPROG_MS 11
#define W1_F14_READ_RETRIES 10
#define W1_F14_READ_MAXLEN W1_F14_SCRATCH_SIZE
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f14_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return size - off;
return count;
}
/*
* Read a block from W1 ROM two times and compares the results.
* If they are equal they are returned, otherwise the read
* is repeated W1_F14_READ_RETRIES times.
*
* count must not exceed W1_F14_READ_MAXLEN.
*/
static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
{
u8 wrbuf[2];
u8 cmp[W1_F14_READ_MAXLEN];
int tries = W1_F14_READ_RETRIES;
do {
wrbuf[0] = W1_F14_READ_EEPROM;
wrbuf[1] = off & 0xff;
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 2);
w1_read_block(sl->master, buf, count);
if (w1_reset_select_slave(sl))
return -1;
w1_write_block(sl->master, wrbuf, 2);
w1_read_block(sl->master, cmp, count);
if (!memcmp(cmp, buf, count))
return 0;
} while (--tries);
dev_err(&sl->dev, "proof reading failed %d times\n",
W1_F14_READ_RETRIES);
return -1;
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int todo = count;
count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* read directly from the EEPROM in chunks of W1_F14_READ_MAXLEN */
while (todo > 0) {
int block_read;
if (todo >= W1_F14_READ_MAXLEN)
block_read = W1_F14_READ_MAXLEN;
else
block_read = todo;
if (w1_f14_readblock(sl, off, block_read, buf) < 0)
count = -EIO;
todo -= W1_F14_READ_MAXLEN;
buf += W1_F14_READ_MAXLEN;
off += W1_F14_READ_MAXLEN;
}
mutex_unlock(&sl->master->bus_mutex);
return count;
}
/*
* Writes to the scratchpad and reads it back for verification.
* Then copies the scratchpad to EEPROM.
* The data must be aligned at W1_F14_SCRATCH_SIZE bytes and
* must be W1_F14_SCRATCH_SIZE bytes long.
* The master must be locked.
*
* @param sl The slave structure
* @param addr Address for the write
* @param len length must be <= (W1_F14_PAGE_SIZE - (addr & W1_F14_PAGE_MASK))
* @param data The data to write
* @return 0=Success -1=failure
*/
static int w1_f14_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
int tries = W1_F14_READ_RETRIES;
u8 wrbuf[2];
u8 rdbuf[W1_F14_SCRATCH_SIZE + 3];
retry:
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F14_WRITE_SCRATCH;
wrbuf[1] = addr & 0xff;
w1_write_block(sl->master, wrbuf, 2);
w1_write_block(sl->master, data, len);
/* Read the scratchpad and verify */
if (w1_reset_select_slave(sl))
return -1;
w1_write_8(sl->master, W1_F14_READ_SCRATCH);
w1_read_block(sl->master, rdbuf, len + 2);
/*
* Compare what was read against the data written
* Note: on read scratchpad, device returns 2 bulk 0xff bytes,
* to be discarded.
*/
if ((memcmp(data, &rdbuf[2], len) != 0)) {
if (--tries)
goto retry;
dev_err(&sl->dev,
"could not write to eeprom, scratchpad compare failed %d times\n",
W1_F14_READ_RETRIES);
return -1;
}
/* Copy the scratchpad to EEPROM */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F14_COPY_SCRATCH;
wrbuf[1] = W1_F14_VALIDATION_KEY;
w1_write_block(sl->master, wrbuf, 2);
/* Sleep for tprog ms to wait for the write to complete */
msleep(W1_F14_TPROG_MS);
/* Reset the bus to wake up the EEPROM */
w1_reset_bus(sl->master);
return 0;
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len;
int copy;
count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->bus_mutex);
/* Can only write data in blocks of the size of the scratchpad */
addr = off;
len = count;
while (len > 0) {
/* if len too short or addr not aligned */
if (len < W1_F14_SCRATCH_SIZE || addr & W1_F14_SCRATCH_MASK) {
char tmp[W1_F14_SCRATCH_SIZE];
/* read the block and update the parts to be written */
if (w1_f14_readblock(sl, addr & ~W1_F14_SCRATCH_MASK,
W1_F14_SCRATCH_SIZE, tmp)) {
count = -EIO;
goto out_up;
}
/* copy at most to the boundary of the PAGE or len */
copy = W1_F14_SCRATCH_SIZE -
(addr & W1_F14_SCRATCH_MASK);
if (copy > len)
copy = len;
memcpy(&tmp[addr & W1_F14_SCRATCH_MASK], buf, copy);
if (w1_f14_write(sl, addr & ~W1_F14_SCRATCH_MASK,
W1_F14_SCRATCH_SIZE, tmp) < 0) {
count = -EIO;
goto out_up;
}
} else {
copy = W1_F14_SCRATCH_SIZE;
if (w1_f14_write(sl, addr, copy, buf) < 0) {
count = -EIO;
goto out_up;
}
}
buf += copy;
addr += copy;
len -= copy;
}
out_up:
mutex_unlock(&sl->master->bus_mutex);
return count;
}
static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
static struct bin_attribute *w1_f14_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f14_group = {
.bin_attrs = w1_f14_bin_attrs,
};
static const struct attribute_group *w1_f14_groups[] = {
&w1_f14_group,
NULL,
};
static const struct w1_family_ops w1_f14_fops = {
.groups = w1_f14_groups,
};
static struct w1_family w1_family_14 = {
.fid = W1_EEPROM_DS2430,
.fops = &w1_f14_fops,
};
module_w1_family(w1_family_14);
MODULE_AUTHOR("Angelo Dureghello <[email protected]>");
MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256b EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
| linux-master | drivers/w1/slaves/w1_ds2430.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* w1_ds2405.c
*
* Copyright (c) 2017 Maciej S. Szmigiero <[email protected]>
* Based on w1_therm.c copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2405 0x05
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maciej S. Szmigiero <[email protected]>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas DS2405 PIO.");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2405));
static int w1_ds2405_select(struct w1_slave *sl, bool only_active)
{
struct w1_master *dev = sl->master;
u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
unsigned int bit_ctr;
if (w1_reset_bus(dev) != 0)
return 0;
/*
* We cannot use a normal Match ROM command
* since doing so would toggle PIO state
*/
w1_write_8(dev, only_active ? W1_ALARM_SEARCH : W1_SEARCH);
for (bit_ctr = 0; bit_ctr < 64; bit_ctr++) {
int bit2send = !!(dev_addr & BIT(bit_ctr));
u8 ret;
ret = w1_triplet(dev, bit2send);
if ((ret & (BIT(0) | BIT(1))) ==
(BIT(0) | BIT(1))) /* no devices found */
return 0;
if (!!(ret & BIT(2)) != bit2send)
/* wrong direction taken - no such device */
return 0;
}
return 1;
}
static int w1_ds2405_read_pio(struct w1_slave *sl)
{
if (w1_ds2405_select(sl, true))
return 0; /* "active" means PIO is low */
if (w1_ds2405_select(sl, false))
return 1;
return -ENODEV;
}
static ssize_t state_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
int ret;
ssize_t f_retval;
u8 state;
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret)
return ret;
if (!w1_ds2405_select(sl, false)) {
f_retval = -ENODEV;
goto out_unlock;
}
state = w1_read_8(dev);
if (state != 0 &&
state != 0xff) {
dev_err(device, "non-consistent state %x\n", state);
f_retval = -EIO;
goto out_unlock;
}
*buf = state ? '1' : '0';
f_retval = 1;
out_unlock:
w1_reset_bus(dev);
mutex_unlock(&dev->bus_mutex);
return f_retval;
}
static ssize_t output_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
int ret;
ssize_t f_retval;
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret)
return ret;
ret = w1_ds2405_read_pio(sl);
if (ret < 0) {
f_retval = ret;
goto out_unlock;
}
*buf = ret ? '1' : '0';
f_retval = 1;
out_unlock:
w1_reset_bus(dev);
mutex_unlock(&dev->bus_mutex);
return f_retval;
}
static ssize_t output_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
int ret, current_pio;
unsigned int val;
ssize_t f_retval;
if (count < 1)
return -EINVAL;
if (sscanf(buf, " %u%n", &val, &ret) < 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
f_retval = ret;
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret)
return ret;
current_pio = w1_ds2405_read_pio(sl);
if (current_pio < 0) {
f_retval = current_pio;
goto out_unlock;
}
if (current_pio == val)
goto out_unlock;
if (w1_reset_bus(dev) != 0) {
f_retval = -ENODEV;
goto out_unlock;
}
/*
* can't use w1_reset_select_slave() here since it uses Skip ROM if
* there is only one device on bus
*/
do {
u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
u8 cmd[9];
cmd[0] = W1_MATCH_ROM;
memcpy(&cmd[1], &dev_addr, sizeof(dev_addr));
w1_write_block(dev, cmd, sizeof(cmd));
} while (0);
out_unlock:
w1_reset_bus(dev);
mutex_unlock(&dev->bus_mutex);
return f_retval;
}
static DEVICE_ATTR_RO(state);
static DEVICE_ATTR_RW(output);
static struct attribute *w1_ds2405_attrs[] = {
&dev_attr_state.attr,
&dev_attr_output.attr,
NULL
};
ATTRIBUTE_GROUPS(w1_ds2405);
static const struct w1_family_ops w1_ds2405_fops = {
.groups = w1_ds2405_groups
};
static struct w1_family w1_family_ds2405 = {
.fid = W1_FAMILY_DS2405,
.fops = &w1_ds2405_fops
};
module_w1_family(w1_family_ds2405);
| linux-master | drivers/w1/slaves/w1_ds2405.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds28e17.c - w1 family 19 (DS28E17) driver
*
* Copyright (c) 2016 Jan Kandziora <[email protected]>
*/
#include <linux/crc16.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#define CRC16_INIT 0
#include <linux/w1.h>
#define W1_FAMILY_DS28E17 0x19
/* Module setup. */
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jan Kandziora <[email protected]>");
MODULE_DESCRIPTION("w1 family 19 driver for DS28E17, 1-wire to I2C master bridge");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E17));
/* Default I2C speed to be set when a DS28E17 is detected. */
static int i2c_speed = 100;
module_param_named(speed, i2c_speed, int, 0600);
MODULE_PARM_DESC(speed, "Default I2C speed to be set when a DS28E17 is detected");
/* Default I2C stretch value to be set when a DS28E17 is detected. */
static char i2c_stretch = 1;
module_param_named(stretch, i2c_stretch, byte, 0600);
MODULE_PARM_DESC(stretch, "Default I2C stretch value to be set when a DS28E17 is detected");
/* DS28E17 device command codes. */
#define W1_F19_WRITE_DATA_WITH_STOP 0x4B
#define W1_F19_WRITE_DATA_NO_STOP 0x5A
#define W1_F19_WRITE_DATA_ONLY 0x69
#define W1_F19_WRITE_DATA_ONLY_WITH_STOP 0x78
#define W1_F19_READ_DATA_WITH_STOP 0x87
#define W1_F19_WRITE_READ_DATA_WITH_STOP 0x2D
#define W1_F19_WRITE_CONFIGURATION 0xD2
#define W1_F19_READ_CONFIGURATION 0xE1
#define W1_F19_ENABLE_SLEEP_MODE 0x1E
#define W1_F19_READ_DEVICE_REVISION 0xC4
/* DS28E17 status bits */
#define W1_F19_STATUS_CRC 0x01
#define W1_F19_STATUS_ADDRESS 0x02
#define W1_F19_STATUS_START 0x08
/*
* Maximum number of I2C bytes to transfer within one CRC16 protected onewire
* command.
*/
#define W1_F19_WRITE_DATA_LIMIT 255
/* Maximum number of I2C bytes to read with one onewire command. */
#define W1_F19_READ_DATA_LIMIT 255
/* Constants for calculating the busy sleep. */
#define W1_F19_BUSY_TIMEBASES { 90, 23, 10 }
#define W1_F19_BUSY_GRATUITY 1000
/* Number of checks for the busy flag before timeout. */
#define W1_F19_BUSY_CHECKS 1000
/* Slave specific data. */
struct w1_f19_data {
u8 speed;
u8 stretch;
struct i2c_adapter adapter;
};
/* Wait a while until the busy flag clears. */
static int w1_f19_i2c_busy_wait(struct w1_slave *sl, size_t count)
{
const unsigned long timebases[3] = W1_F19_BUSY_TIMEBASES;
struct w1_f19_data *data = sl->family_data;
unsigned int checks;
/* Check the busy flag first in any case.*/
if (w1_touch_bit(sl->master, 1) == 0)
return 0;
/*
* Do a generously long sleep in the beginning,
* as we have to wait at least this time for all
* the I2C bytes at the given speed to be transferred.
*/
usleep_range(timebases[data->speed] * (data->stretch) * count,
timebases[data->speed] * (data->stretch) * count
+ W1_F19_BUSY_GRATUITY);
/* Now continusly check the busy flag sent by the DS28E17. */
checks = W1_F19_BUSY_CHECKS;
while ((checks--) > 0) {
/* Return success if the busy flag is cleared. */
if (w1_touch_bit(sl->master, 1) == 0)
return 0;
/* Wait one non-streched byte timeslot. */
udelay(timebases[data->speed]);
}
/* Timeout. */
dev_warn(&sl->dev, "busy timeout\n");
return -ETIMEDOUT;
}
/* Utility function: result. */
static size_t w1_f19_error(struct w1_slave *sl, u8 w1_buf[])
{
/* Warnings. */
if (w1_buf[0] & W1_F19_STATUS_CRC)
dev_warn(&sl->dev, "crc16 mismatch\n");
if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
dev_warn(&sl->dev, "i2c device not responding\n");
if ((w1_buf[0] & (W1_F19_STATUS_CRC | W1_F19_STATUS_ADDRESS)) == 0
&& w1_buf[1] != 0) {
dev_warn(&sl->dev, "i2c short write, %d bytes not acknowledged\n",
w1_buf[1]);
}
/* Check error conditions. */
if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
return -ENXIO;
if (w1_buf[0] & W1_F19_STATUS_START)
return -EAGAIN;
if (w1_buf[0] != 0 || w1_buf[1] != 0)
return -EIO;
/* All ok. */
return 0;
}
/* Utility function: write data to I2C slave, single chunk. */
static int __w1_f19_i2c_write(struct w1_slave *sl,
const u8 *command, size_t command_count,
const u8 *buffer, size_t count)
{
u16 crc;
int error;
u8 w1_buf[2];
/* Send command and I2C data to DS28E17. */
crc = crc16(CRC16_INIT, command, command_count);
w1_write_block(sl->master, command, command_count);
w1_buf[0] = count;
crc = crc16(crc, w1_buf, 1);
w1_write_8(sl->master, w1_buf[0]);
crc = crc16(crc, buffer, count);
w1_write_block(sl->master, buffer, count);
w1_buf[0] = ~(crc & 0xFF);
w1_buf[1] = ~((crc >> 8) & 0xFF);
w1_write_block(sl->master, w1_buf, 2);
/* Wait until busy flag clears (or timeout). */
if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
return -ETIMEDOUT;
/* Read status from DS28E17. */
w1_read_block(sl->master, w1_buf, 2);
/* Check error conditions. */
error = w1_f19_error(sl, w1_buf);
if (error < 0)
return error;
/* Return number of bytes written. */
return count;
}
/* Write data to I2C slave. */
static int w1_f19_i2c_write(struct w1_slave *sl, u16 i2c_address,
const u8 *buffer, size_t count, bool stop)
{
int result;
int remaining = count;
const u8 *p;
u8 command[2];
/* Check input. */
if (count == 0)
return -EOPNOTSUPP;
/* Check whether we need multiple commands. */
if (count <= W1_F19_WRITE_DATA_LIMIT) {
/*
* Small data amount. Data can be sent with
* a single onewire command.
*/
/* Send all data to DS28E17. */
command[0] = (stop ? W1_F19_WRITE_DATA_WITH_STOP
: W1_F19_WRITE_DATA_NO_STOP);
command[1] = i2c_address << 1;
result = __w1_f19_i2c_write(sl, command, 2, buffer, count);
} else {
/* Large data amount. Data has to be sent in multiple chunks. */
/* Send first chunk to DS28E17. */
p = buffer;
command[0] = W1_F19_WRITE_DATA_NO_STOP;
command[1] = i2c_address << 1;
result = __w1_f19_i2c_write(sl, command, 2, p,
W1_F19_WRITE_DATA_LIMIT);
if (result < 0)
return result;
/* Resume to same DS28E17. */
if (w1_reset_resume_command(sl->master))
return -EIO;
/* Next data chunk. */
p += W1_F19_WRITE_DATA_LIMIT;
remaining -= W1_F19_WRITE_DATA_LIMIT;
while (remaining > W1_F19_WRITE_DATA_LIMIT) {
/* Send intermediate chunk to DS28E17. */
command[0] = W1_F19_WRITE_DATA_ONLY;
result = __w1_f19_i2c_write(sl, command, 1, p,
W1_F19_WRITE_DATA_LIMIT);
if (result < 0)
return result;
/* Resume to same DS28E17. */
if (w1_reset_resume_command(sl->master))
return -EIO;
/* Next data chunk. */
p += W1_F19_WRITE_DATA_LIMIT;
remaining -= W1_F19_WRITE_DATA_LIMIT;
}
/* Send final chunk to DS28E17. */
command[0] = (stop ? W1_F19_WRITE_DATA_ONLY_WITH_STOP
: W1_F19_WRITE_DATA_ONLY);
result = __w1_f19_i2c_write(sl, command, 1, p, remaining);
}
return result;
}
/* Read data from I2C slave. */
static int w1_f19_i2c_read(struct w1_slave *sl, u16 i2c_address,
u8 *buffer, size_t count)
{
u16 crc;
int error;
u8 w1_buf[5];
/* Check input. */
if (count == 0)
return -EOPNOTSUPP;
/* Send command to DS28E17. */
w1_buf[0] = W1_F19_READ_DATA_WITH_STOP;
w1_buf[1] = i2c_address << 1 | 0x01;
w1_buf[2] = count;
crc = crc16(CRC16_INIT, w1_buf, 3);
w1_buf[3] = ~(crc & 0xFF);
w1_buf[4] = ~((crc >> 8) & 0xFF);
w1_write_block(sl->master, w1_buf, 5);
/* Wait until busy flag clears (or timeout). */
if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
return -ETIMEDOUT;
/* Read status from DS28E17. */
w1_buf[0] = w1_read_8(sl->master);
w1_buf[1] = 0;
/* Check error conditions. */
error = w1_f19_error(sl, w1_buf);
if (error < 0)
return error;
/* Read received I2C data from DS28E17. */
return w1_read_block(sl->master, buffer, count);
}
/* Write to, then read data from I2C slave. */
static int w1_f19_i2c_write_read(struct w1_slave *sl, u16 i2c_address,
const u8 *wbuffer, size_t wcount, u8 *rbuffer, size_t rcount)
{
u16 crc;
int error;
u8 w1_buf[3];
/* Check input. */
if (wcount == 0 || rcount == 0)
return -EOPNOTSUPP;
/* Send command and I2C data to DS28E17. */
w1_buf[0] = W1_F19_WRITE_READ_DATA_WITH_STOP;
w1_buf[1] = i2c_address << 1;
w1_buf[2] = wcount;
crc = crc16(CRC16_INIT, w1_buf, 3);
w1_write_block(sl->master, w1_buf, 3);
crc = crc16(crc, wbuffer, wcount);
w1_write_block(sl->master, wbuffer, wcount);
w1_buf[0] = rcount;
crc = crc16(crc, w1_buf, 1);
w1_buf[1] = ~(crc & 0xFF);
w1_buf[2] = ~((crc >> 8) & 0xFF);
w1_write_block(sl->master, w1_buf, 3);
/* Wait until busy flag clears (or timeout). */
if (w1_f19_i2c_busy_wait(sl, wcount + rcount + 2) < 0)
return -ETIMEDOUT;
/* Read status from DS28E17. */
w1_read_block(sl->master, w1_buf, 2);
/* Check error conditions. */
error = w1_f19_error(sl, w1_buf);
if (error < 0)
return error;
/* Read received I2C data from DS28E17. */
return w1_read_block(sl->master, rbuffer, rcount);
}
/* Do an I2C master transfer. */
static int w1_f19_i2c_master_transfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
struct w1_slave *sl = (struct w1_slave *) adapter->algo_data;
int i = 0;
int result = 0;
/* Start onewire transaction. */
mutex_lock(&sl->master->bus_mutex);
/* Select DS28E17. */
if (w1_reset_select_slave(sl)) {
i = -EIO;
goto error;
}
/* Loop while there are still messages to transfer. */
while (i < num) {
/*
* Check for special case: Small write followed
* by read to same I2C device.
*/
if (i < (num-1)
&& msgs[i].addr == msgs[i+1].addr
&& !(msgs[i].flags & I2C_M_RD)
&& (msgs[i+1].flags & I2C_M_RD)
&& (msgs[i].len <= W1_F19_WRITE_DATA_LIMIT)) {
/*
* The DS28E17 has a combined transfer
* for small write+read.
*/
result = w1_f19_i2c_write_read(sl, msgs[i].addr,
msgs[i].buf, msgs[i].len,
msgs[i+1].buf, msgs[i+1].len);
if (result < 0) {
i = result;
goto error;
}
/*
* Check if we should interpret the read data
* as a length byte. The DS28E17 unfortunately
* has no read without stop, so we can just do
* another simple read in that case.
*/
if (msgs[i+1].flags & I2C_M_RECV_LEN) {
result = w1_f19_i2c_read(sl, msgs[i+1].addr,
&(msgs[i+1].buf[1]), msgs[i+1].buf[0]);
if (result < 0) {
i = result;
goto error;
}
}
/* Eat up read message, too. */
i++;
} else if (msgs[i].flags & I2C_M_RD) {
/* Read transfer. */
result = w1_f19_i2c_read(sl, msgs[i].addr,
msgs[i].buf, msgs[i].len);
if (result < 0) {
i = result;
goto error;
}
/*
* Check if we should interpret the read data
* as a length byte. The DS28E17 unfortunately
* has no read without stop, so we can just do
* another simple read in that case.
*/
if (msgs[i].flags & I2C_M_RECV_LEN) {
result = w1_f19_i2c_read(sl,
msgs[i].addr,
&(msgs[i].buf[1]),
msgs[i].buf[0]);
if (result < 0) {
i = result;
goto error;
}
}
} else {
/*
* Write transfer.
* Stop condition only for last
* transfer.
*/
result = w1_f19_i2c_write(sl,
msgs[i].addr,
msgs[i].buf,
msgs[i].len,
i == (num-1));
if (result < 0) {
i = result;
goto error;
}
}
/* Next message. */
i++;
/* Are there still messages to send/receive? */
if (i < num) {
/* Yes. Resume to same DS28E17. */
if (w1_reset_resume_command(sl->master)) {
i = -EIO;
goto error;
}
}
}
error:
/* End onewire transaction. */
mutex_unlock(&sl->master->bus_mutex);
/* Return number of messages processed or error. */
return i;
}
/* Get I2C adapter functionality. */
static u32 w1_f19_i2c_functionality(struct i2c_adapter *adapter)
{
/*
* Plain I2C functions only.
* SMBus is emulated by the kernel's I2C layer.
* No "I2C_FUNC_SMBUS_QUICK"
* No "I2C_FUNC_SMBUS_READ_BLOCK_DATA"
* No "I2C_FUNC_SMBUS_BLOCK_PROC_CALL"
*/
return I2C_FUNC_I2C |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PROC_CALL |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK |
I2C_FUNC_SMBUS_PEC;
}
/* I2C adapter quirks. */
static const struct i2c_adapter_quirks w1_f19_i2c_adapter_quirks = {
.max_read_len = W1_F19_READ_DATA_LIMIT,
};
/* I2C algorithm. */
static const struct i2c_algorithm w1_f19_i2c_algorithm = {
.master_xfer = w1_f19_i2c_master_transfer,
.functionality = w1_f19_i2c_functionality,
};
/* Read I2C speed from DS28E17. */
static int w1_f19_get_i2c_speed(struct w1_slave *sl)
{
struct w1_f19_data *data = sl->family_data;
int result = -EIO;
/* Start onewire transaction. */
mutex_lock(&sl->master->bus_mutex);
/* Select slave. */
if (w1_reset_select_slave(sl))
goto error;
/* Read slave configuration byte. */
w1_write_8(sl->master, W1_F19_READ_CONFIGURATION);
result = w1_read_8(sl->master);
if (result < 0 || result > 2) {
result = -EIO;
goto error;
}
/* Update speed in slave specific data. */
data->speed = result;
error:
/* End onewire transaction. */
mutex_unlock(&sl->master->bus_mutex);
return result;
}
/* Set I2C speed on DS28E17. */
static int __w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
{
struct w1_f19_data *data = sl->family_data;
const int i2c_speeds[3] = { 100, 400, 900 };
u8 w1_buf[2];
/* Select slave. */
if (w1_reset_select_slave(sl))
return -EIO;
w1_buf[0] = W1_F19_WRITE_CONFIGURATION;
w1_buf[1] = speed;
w1_write_block(sl->master, w1_buf, 2);
/* Update speed in slave specific data. */
data->speed = speed;
dev_info(&sl->dev, "i2c speed set to %d kBaud\n", i2c_speeds[speed]);
return 0;
}
static int w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
{
int result;
/* Start onewire transaction. */
mutex_lock(&sl->master->bus_mutex);
/* Set I2C speed on DS28E17. */
result = __w1_f19_set_i2c_speed(sl, speed);
/* End onewire transaction. */
mutex_unlock(&sl->master->bus_mutex);
return result;
}
/* Sysfs attributes. */
/* I2C speed attribute for a single chip. */
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
int result;
/* Read current speed from slave. Updates data->speed. */
result = w1_f19_get_i2c_speed(sl);
if (result < 0)
return result;
/* Return current speed value. */
return sprintf(buf, "%d\n", result);
}
static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
int error;
/* Valid values are: "100", "400", "900" */
if (count < 3 || count > 4 || !buf)
return -EINVAL;
if (count == 4 && buf[3] != '\n')
return -EINVAL;
if (buf[1] != '0' || buf[2] != '0')
return -EINVAL;
/* Set speed on slave. */
switch (buf[0]) {
case '1':
error = w1_f19_set_i2c_speed(sl, 0);
break;
case '4':
error = w1_f19_set_i2c_speed(sl, 1);
break;
case '9':
error = w1_f19_set_i2c_speed(sl, 2);
break;
default:
return -EINVAL;
}
if (error < 0)
return error;
/* Return bytes written. */
return count;
}
static DEVICE_ATTR_RW(speed);
/* Busy stretch attribute for a single chip. */
static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
struct w1_f19_data *data = sl->family_data;
/* Return current stretch value. */
return sprintf(buf, "%d\n", data->stretch);
}
static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
struct w1_f19_data *data = sl->family_data;
/* Valid values are '1' to '9' */
if (count < 1 || count > 2 || !buf)
return -EINVAL;
if (count == 2 && buf[1] != '\n')
return -EINVAL;
if (buf[0] < '1' || buf[0] > '9')
return -EINVAL;
/* Set busy stretch value. */
data->stretch = buf[0] & 0x0F;
/* Return bytes written. */
return count;
}
static DEVICE_ATTR_RW(stretch);
/* All attributes. */
static struct attribute *w1_f19_attrs[] = {
&dev_attr_speed.attr,
&dev_attr_stretch.attr,
NULL,
};
static const struct attribute_group w1_f19_group = {
.attrs = w1_f19_attrs,
};
static const struct attribute_group *w1_f19_groups[] = {
&w1_f19_group,
NULL,
};
/* Slave add and remove functions. */
static int w1_f19_add_slave(struct w1_slave *sl)
{
struct w1_f19_data *data = NULL;
/* Allocate memory for slave specific data. */
data = devm_kzalloc(&sl->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
sl->family_data = data;
/* Setup default I2C speed on slave. */
switch (i2c_speed) {
case 100:
__w1_f19_set_i2c_speed(sl, 0);
break;
case 400:
__w1_f19_set_i2c_speed(sl, 1);
break;
case 900:
__w1_f19_set_i2c_speed(sl, 2);
break;
default:
/*
* A i2c_speed module parameter of anything else
* than 100, 400, 900 means not to touch the
* speed of the DS28E17.
* We assume 400kBaud, the power-on value.
*/
data->speed = 1;
}
/*
* Setup default busy stretch
* configuration for the DS28E17.
*/
data->stretch = i2c_stretch;
/* Setup I2C adapter. */
data->adapter.owner = THIS_MODULE;
data->adapter.algo = &w1_f19_i2c_algorithm;
data->adapter.algo_data = sl;
strcpy(data->adapter.name, "w1-");
strcat(data->adapter.name, sl->name);
data->adapter.dev.parent = &sl->dev;
data->adapter.quirks = &w1_f19_i2c_adapter_quirks;
return i2c_add_adapter(&data->adapter);
}
static void w1_f19_remove_slave(struct w1_slave *sl)
{
struct w1_f19_data *family_data = sl->family_data;
/* Delete I2C adapter. */
i2c_del_adapter(&family_data->adapter);
/* Free slave specific data. */
devm_kfree(&sl->dev, family_data);
sl->family_data = NULL;
}
/* Declarations within the w1 subsystem. */
static const struct w1_family_ops w1_f19_fops = {
.add_slave = w1_f19_add_slave,
.remove_slave = w1_f19_remove_slave,
.groups = w1_f19_groups,
};
static struct w1_family w1_family_19 = {
.fid = W1_FAMILY_DS28E17,
.fops = &w1_f19_fops,
};
module_w1_family(w1_family_19);
| linux-master | drivers/w1/slaves/w1_ds28e17.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds28e04.c - w1 family 1C (DS28E04) driver
*
* Copyright (c) 2012 Markus Franke <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/crc16.h>
#include <linux/uaccess.h>
#define CRC16_INIT 0
#define CRC16_VALID 0xb001
#include <linux/w1.h>
#define W1_FAMILY_DS28E04 0x1C
/* Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the required
* current to copy the data from the scratchpad to EEPROM. If it is enabled
* parasite powered devices have a better chance of getting the current
* required.
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
/* enable/disable CRC checking on DS28E04-100 memory accesses */
static bool w1_enable_crccheck = true;
#define W1_EEPROM_SIZE 512
#define W1_PAGE_COUNT 16
#define W1_PAGE_SIZE 32
#define W1_PAGE_BITS 5
#define W1_PAGE_MASK 0x1F
#define W1_F1C_READ_EEPROM 0xF0
#define W1_F1C_WRITE_SCRATCH 0x0F
#define W1_F1C_READ_SCRATCH 0xAA
#define W1_F1C_COPY_SCRATCH 0x55
#define W1_F1C_ACCESS_WRITE 0x5A
#define W1_1C_REG_LOGIC_STATE 0x220
struct w1_f1C_data {
u8 memory[W1_EEPROM_SIZE];
u32 validcrc;
};
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f1C_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return size - off;
return count;
}
static int w1_f1C_refresh_block(struct w1_slave *sl, struct w1_f1C_data *data,
int block)
{
u8 wrbuf[3];
int off = block * W1_PAGE_SIZE;
if (data->validcrc & (1 << block))
return 0;
if (w1_reset_select_slave(sl)) {
data->validcrc = 0;
return -EIO;
}
wrbuf[0] = W1_F1C_READ_EEPROM;
wrbuf[1] = off & 0xff;
wrbuf[2] = off >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, &data->memory[off], W1_PAGE_SIZE);
/* cache the block if the CRC is valid */
if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID)
data->validcrc |= (1 << block);
return 0;
}
static int w1_f1C_read(struct w1_slave *sl, int addr, int len, char *data)
{
u8 wrbuf[3];
/* read directly from the EEPROM */
if (w1_reset_select_slave(sl))
return -EIO;
wrbuf[0] = W1_F1C_READ_EEPROM;
wrbuf[1] = addr & 0xff;
wrbuf[2] = addr >> 8;
w1_write_block(sl->master, wrbuf, sizeof(wrbuf));
return w1_read_block(sl->master, data, len);
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
struct w1_f1C_data *data = sl->family_data;
int i, min_page, max_page;
count = w1_f1C_fix_count(off, count, W1_EEPROM_SIZE);
if (count == 0)
return 0;
mutex_lock(&sl->master->mutex);
if (w1_enable_crccheck) {
min_page = (off >> W1_PAGE_BITS);
max_page = (off + count - 1) >> W1_PAGE_BITS;
for (i = min_page; i <= max_page; i++) {
if (w1_f1C_refresh_block(sl, data, i)) {
count = -EIO;
goto out_up;
}
}
memcpy(buf, &data->memory[off], count);
} else {
count = w1_f1C_read(sl, off, count, buf);
}
out_up:
mutex_unlock(&sl->master->mutex);
return count;
}
/**
* w1_f1C_write() - Writes to the scratchpad and reads it back for verification.
* @sl: The slave structure
* @addr: Address for the write
* @len: length must be <= (W1_PAGE_SIZE - (addr & W1_PAGE_MASK))
* @data: The data to write
*
* Then copies the scratchpad to EEPROM.
* The data must be on one page.
* The master must be locked.
*
* Return: 0=Success, -1=failure
*/
static int w1_f1C_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
u8 wrbuf[4];
u8 rdbuf[W1_PAGE_SIZE + 3];
u8 es = (addr + len - 1) & 0x1f;
unsigned int tm = 10;
int i;
struct w1_f1C_data *f1C = sl->family_data;
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F1C_WRITE_SCRATCH;
wrbuf[1] = addr & 0xff;
wrbuf[2] = addr >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_write_block(sl->master, data, len);
/* Read the scratchpad and verify */
if (w1_reset_select_slave(sl))
return -1;
w1_write_8(sl->master, W1_F1C_READ_SCRATCH);
w1_read_block(sl->master, rdbuf, len + 3);
/* Compare what was read against the data written */
if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) ||
(rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0))
return -1;
/* Copy the scratchpad to EEPROM */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F1C_COPY_SCRATCH;
wrbuf[3] = es;
for (i = 0; i < sizeof(wrbuf); ++i) {
/*
* issue 10ms strong pullup (or delay) on the last byte
* for writing the data from the scratchpad to EEPROM
*/
if (w1_strong_pullup && i == sizeof(wrbuf)-1)
w1_next_pullup(sl->master, tm);
w1_write_8(sl->master, wrbuf[i]);
}
if (!w1_strong_pullup)
msleep(tm);
if (w1_enable_crccheck) {
/* invalidate cached data */
f1C->validcrc &= ~(1 << (addr >> W1_PAGE_BITS));
}
/* Reset the bus to wake up the EEPROM (this may not be needed) */
w1_reset_bus(sl->master);
return 0;
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len, idx;
count = w1_f1C_fix_count(off, count, W1_EEPROM_SIZE);
if (count == 0)
return 0;
if (w1_enable_crccheck) {
/* can only write full blocks in cached mode */
if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) {
dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n",
(int)off, count);
return -EINVAL;
}
/* make sure the block CRCs are valid */
for (idx = 0; idx < count; idx += W1_PAGE_SIZE) {
if (crc16(CRC16_INIT, &buf[idx], W1_PAGE_SIZE)
!= CRC16_VALID) {
dev_err(&sl->dev, "bad CRC at offset %d\n",
(int)off);
return -EINVAL;
}
}
}
mutex_lock(&sl->master->mutex);
/* Can only write data to one page at a time */
idx = 0;
while (idx < count) {
addr = off + idx;
len = W1_PAGE_SIZE - (addr & W1_PAGE_MASK);
if (len > (count - idx))
len = count - idx;
if (w1_f1C_write(sl, addr, len, &buf[idx]) < 0) {
count = -EIO;
goto out_up;
}
idx += len;
}
out_up:
mutex_unlock(&sl->master->mutex);
return count;
}
static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
static ssize_t pio_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
/* check arguments */
if (off != 0 || count != 1 || buf == NULL)
return -EINVAL;
mutex_lock(&sl->master->mutex);
ret = w1_f1C_read(sl, W1_1C_REG_LOGIC_STATE, count, buf);
mutex_unlock(&sl->master->mutex);
return ret;
}
static ssize_t pio_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 wrbuf[3];
u8 ack;
/* check arguments */
if (off != 0 || count != 1 || buf == NULL)
return -EINVAL;
mutex_lock(&sl->master->mutex);
/* Write the PIO data */
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->mutex);
return -1;
}
/* set bit 7..2 to value '1' */
*buf = *buf | 0xFC;
wrbuf[0] = W1_F1C_ACCESS_WRITE;
wrbuf[1] = *buf;
wrbuf[2] = ~(*buf);
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, &ack, sizeof(ack));
mutex_unlock(&sl->master->mutex);
/* check for acknowledgement */
if (ack != 0xAA)
return -EIO;
return count;
}
static BIN_ATTR_RW(pio, 1);
static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", w1_enable_crccheck);
}
static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int err = kstrtobool(buf, &w1_enable_crccheck);
if (err)
return err;
return count;
}
static DEVICE_ATTR_RW(crccheck);
static struct attribute *w1_f1C_attrs[] = {
&dev_attr_crccheck.attr,
NULL,
};
static struct bin_attribute *w1_f1C_bin_attrs[] = {
&bin_attr_eeprom,
&bin_attr_pio,
NULL,
};
static const struct attribute_group w1_f1C_group = {
.attrs = w1_f1C_attrs,
.bin_attrs = w1_f1C_bin_attrs,
};
static const struct attribute_group *w1_f1C_groups[] = {
&w1_f1C_group,
NULL,
};
static int w1_f1C_add_slave(struct w1_slave *sl)
{
struct w1_f1C_data *data = NULL;
if (w1_enable_crccheck) {
data = kzalloc(sizeof(struct w1_f1C_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
sl->family_data = data;
}
return 0;
}
static void w1_f1C_remove_slave(struct w1_slave *sl)
{
kfree(sl->family_data);
sl->family_data = NULL;
}
static const struct w1_family_ops w1_f1C_fops = {
.add_slave = w1_f1C_add_slave,
.remove_slave = w1_f1C_remove_slave,
.groups = w1_f1C_groups,
};
static struct w1_family w1_family_1C = {
.fid = W1_FAMILY_DS28E04,
.fops = &w1_f1C_fops,
};
module_w1_family(w1_family_1C);
MODULE_AUTHOR("Markus Franke <[email protected]>, <[email protected]>");
MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
| linux-master | drivers/w1/slaves/w1_ds28e04.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2433.c - w1 family 23 (DS2433) driver
*
* Copyright (c) 2005 Ben Gardner <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
#include <linux/crc16.h>
#define CRC16_INIT 0
#define CRC16_VALID 0xb001
#endif
#include <linux/w1.h>
#define W1_EEPROM_DS2433 0x23
#define W1_EEPROM_SIZE 512
#define W1_PAGE_COUNT 16
#define W1_PAGE_SIZE 32
#define W1_PAGE_BITS 5
#define W1_PAGE_MASK 0x1F
#define W1_F23_TIME 300
#define W1_F23_READ_EEPROM 0xF0
#define W1_F23_WRITE_SCRATCH 0x0F
#define W1_F23_READ_SCRATCH 0xAA
#define W1_F23_COPY_SCRATCH 0x55
struct w1_f23_data {
u8 memory[W1_EEPROM_SIZE];
u32 validcrc;
};
/*
* Check the file size bounds and adjusts count as needed.
* This would not be needed if the file size didn't reset to 0 after a write.
*/
static inline size_t w1_f23_fix_count(loff_t off, size_t count, size_t size)
{
if (off > size)
return 0;
if ((off + count) > size)
return (size - off);
return count;
}
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
int block)
{
u8 wrbuf[3];
int off = block * W1_PAGE_SIZE;
if (data->validcrc & (1 << block))
return 0;
if (w1_reset_select_slave(sl)) {
data->validcrc = 0;
return -EIO;
}
wrbuf[0] = W1_F23_READ_EEPROM;
wrbuf[1] = off & 0xff;
wrbuf[2] = off >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, &data->memory[off], W1_PAGE_SIZE);
/* cache the block if the CRC is valid */
if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID)
data->validcrc |= (1 << block);
return 0;
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *data = sl->family_data;
int i, min_page, max_page;
#else
u8 wrbuf[3];
#endif
count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
if (!count)
return 0;
mutex_lock(&sl->master->bus_mutex);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
min_page = (off >> W1_PAGE_BITS);
max_page = (off + count - 1) >> W1_PAGE_BITS;
for (i = min_page; i <= max_page; i++) {
if (w1_f23_refresh_block(sl, data, i)) {
count = -EIO;
goto out_up;
}
}
memcpy(buf, &data->memory[off], count);
#else /* CONFIG_W1_SLAVE_DS2433_CRC */
/* read directly from the EEPROM */
if (w1_reset_select_slave(sl)) {
count = -EIO;
goto out_up;
}
wrbuf[0] = W1_F23_READ_EEPROM;
wrbuf[1] = off & 0xff;
wrbuf[2] = off >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_read_block(sl->master, buf, count);
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
out_up:
mutex_unlock(&sl->master->bus_mutex);
return count;
}
/**
* w1_f23_write() - Writes to the scratchpad and reads it back for verification.
* @sl: The slave structure
* @addr: Address for the write
* @len: length must be <= (W1_PAGE_SIZE - (addr & W1_PAGE_MASK))
* @data: The data to write
*
* Then copies the scratchpad to EEPROM.
* The data must be on one page.
* The master must be locked.
*
* Return: 0=Success, -1=failure
*/
static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *f23 = sl->family_data;
#endif
u8 wrbuf[4];
u8 rdbuf[W1_PAGE_SIZE + 3];
u8 es = (addr + len - 1) & 0x1f;
/* Write the data to the scratchpad */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F23_WRITE_SCRATCH;
wrbuf[1] = addr & 0xff;
wrbuf[2] = addr >> 8;
w1_write_block(sl->master, wrbuf, 3);
w1_write_block(sl->master, data, len);
/* Read the scratchpad and verify */
if (w1_reset_select_slave(sl))
return -1;
w1_write_8(sl->master, W1_F23_READ_SCRATCH);
w1_read_block(sl->master, rdbuf, len + 3);
/* Compare what was read against the data written */
if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) ||
(rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0))
return -1;
/* Copy the scratchpad to EEPROM */
if (w1_reset_select_slave(sl))
return -1;
wrbuf[0] = W1_F23_COPY_SCRATCH;
wrbuf[3] = es;
w1_write_block(sl->master, wrbuf, 4);
/* Sleep for 5 ms to wait for the write to complete */
msleep(5);
/* Reset the bus to wake up the EEPROM (this may not be needed) */
w1_reset_bus(sl->master);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS));
#endif
return 0;
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len, idx;
count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
if (!count)
return 0;
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
/* can only write full blocks in cached mode */
if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) {
dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n",
(int)off, count);
return -EINVAL;
}
/* make sure the block CRCs are valid */
for (idx = 0; idx < count; idx += W1_PAGE_SIZE) {
if (crc16(CRC16_INIT, &buf[idx], W1_PAGE_SIZE) != CRC16_VALID) {
dev_err(&sl->dev, "bad CRC at offset %d\n", (int)off);
return -EINVAL;
}
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
mutex_lock(&sl->master->bus_mutex);
/* Can only write data to one page at a time */
idx = 0;
while (idx < count) {
addr = off + idx;
len = W1_PAGE_SIZE - (addr & W1_PAGE_MASK);
if (len > (count - idx))
len = count - idx;
if (w1_f23_write(sl, addr, len, &buf[idx]) < 0) {
count = -EIO;
goto out_up;
}
idx += len;
}
out_up:
mutex_unlock(&sl->master->bus_mutex);
return count;
}
static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
static struct bin_attribute *w1_f23_bin_attributes[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f23_group = {
.bin_attrs = w1_f23_bin_attributes,
};
static const struct attribute_group *w1_f23_groups[] = {
&w1_f23_group,
NULL,
};
static int w1_f23_add_slave(struct w1_slave *sl)
{
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *data;
data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
sl->family_data = data;
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
return 0;
}
static void w1_f23_remove_slave(struct w1_slave *sl)
{
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
kfree(sl->family_data);
sl->family_data = NULL;
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
}
static const struct w1_family_ops w1_f23_fops = {
.add_slave = w1_f23_add_slave,
.remove_slave = w1_f23_remove_slave,
.groups = w1_f23_groups,
};
static struct w1_family w1_family_23 = {
.fid = W1_EEPROM_DS2433,
.fops = &w1_f23_fops,
};
module_w1_family(w1_family_23);
MODULE_AUTHOR("Ben Gardner <[email protected]>");
MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
| linux-master | drivers/w1/slaves/w1_ds2433.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* w1_ds2423.c
*
* Copyright (c) 2010 Mika Laitio <[email protected]>
*
* This driver will read and write the value of 4 counters to w1_slave file in
* sys filesystem.
* Inspired by the w1_therm and w1_ds2431 drivers.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/crc16.h>
#include <linux/w1.h>
#define W1_COUNTER_DS2423 0x1D
#define CRC16_VALID 0xb001
#define CRC16_INIT 0
#define COUNTER_COUNT 4
#define READ_BYTE_COUNT 42
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *out_buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT];
u8 wrbuf[3];
int rom_addr;
int read_byte_count;
int result;
ssize_t c;
int ii;
int p;
int crc;
c = PAGE_SIZE;
rom_addr = (12 << 5) + 31;
wrbuf[0] = 0xA5;
wrbuf[1] = rom_addr & 0xFF;
wrbuf[2] = rom_addr >> 8;
mutex_lock(&dev->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_block(dev, wrbuf, 3);
read_byte_count = 0;
for (p = 0; p < 4; p++) {
/*
* 1 byte for first bytes in ram page read
* 4 bytes for counter
* 4 bytes for zero bits
* 2 bytes for crc
* 31 remaining bytes from the ram page
*/
read_byte_count += w1_read_block(dev,
rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
for (ii = 0; ii < READ_BYTE_COUNT; ++ii)
c -= snprintf(out_buf + PAGE_SIZE - c,
c, "%02x ",
rbuf[(p * READ_BYTE_COUNT) + ii]);
if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
dev_warn(device,
"w1_counter_read() returned %u bytes "
"instead of %d bytes wanted.\n",
read_byte_count,
READ_BYTE_COUNT);
c -= snprintf(out_buf + PAGE_SIZE - c,
c, "crc=NO\n");
} else {
if (p == 0) {
crc = crc16(CRC16_INIT, wrbuf, 3);
crc = crc16(crc, rbuf, 11);
} else {
/*
* DS2423 calculates crc from all bytes
* read after the previous crc bytes.
*/
crc = crc16(CRC16_INIT,
(rbuf + 11) +
((p - 1) * READ_BYTE_COUNT),
READ_BYTE_COUNT);
}
if (crc == CRC16_VALID) {
result = 0;
for (ii = 4; ii > 0; ii--) {
result <<= 8;
result |= rbuf[(p *
READ_BYTE_COUNT) + ii];
}
c -= snprintf(out_buf + PAGE_SIZE - c,
c, "crc=YES c=%d\n", result);
} else {
c -= snprintf(out_buf + PAGE_SIZE - c,
c, "crc=NO\n");
}
}
}
} else {
c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
}
mutex_unlock(&dev->bus_mutex);
return PAGE_SIZE - c;
}
static DEVICE_ATTR_RO(w1_slave);
static struct attribute *w1_f1d_attrs[] = {
&dev_attr_w1_slave.attr,
NULL,
};
ATTRIBUTE_GROUPS(w1_f1d);
static const struct w1_family_ops w1_f1d_fops = {
.groups = w1_f1d_groups,
};
static struct w1_family w1_family_1d = {
.fid = W1_COUNTER_DS2423,
.fops = &w1_f1d_fops,
};
module_w1_family(w1_family_1d);
MODULE_AUTHOR("Mika Laitio <[email protected]>");
MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_COUNTER_DS2423));
| linux-master | drivers/w1/slaves/w1_ds2423.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* w1_therm.c
*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <asm/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/w1.h>
#define W1_THERM_DS18S20 0x10
#define W1_THERM_DS1822 0x22
#define W1_THERM_DS18B20 0x28
#define W1_THERM_DS1825 0x3B
#define W1_THERM_DS28EA00 0x42
/*
* Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
* In case the parasite power-detection is not working (seems to be the case
* for some DS18S20) the strong pullup can also be forced, regardless of the
* power state of the devices.
*
* Summary of options:
* - strong_pullup = 0 Disable strong pullup completely
* - strong_pullup = 1 Enable automatic strong pullup detection
* - strong_pullup = 2 Force strong pullup
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
/* Counter for devices supporting bulk reading */
static u16 bulk_read_device_counter; /* =0 as per C standard */
/* This command should be in public header w1.h but is not */
#define W1_RECALL_EEPROM 0xB8
/* Nb of try for an operation */
#define W1_THERM_MAX_TRY 5
/* ms delay to retry bus mutex */
#define W1_THERM_RETRY_DELAY 20
/* delay in ms to write in EEPROM */
#define W1_THERM_EEPROM_WRITE_DELAY 10
#define EEPROM_CMD_WRITE "save" /* cmd for write eeprom sysfs */
#define EEPROM_CMD_READ "restore" /* cmd for read eeprom sysfs */
#define BULK_TRIGGER_CMD "trigger" /* cmd to trigger a bulk read */
#define MIN_TEMP -55 /* min temperature that can be measured */
#define MAX_TEMP 125 /* max temperature that can be measured */
/* Allowed values for sysfs conv_time attribute */
#define CONV_TIME_DEFAULT 0
#define CONV_TIME_MEASURE 1
/* Bits in sysfs "features" value */
#define W1_THERM_CHECK_RESULT 1 /* Enable conversion success check */
#define W1_THERM_POLL_COMPLETION 2 /* Poll for conversion completion */
#define W1_THERM_FEATURES_MASK 3 /* All values mask */
/* Poll period in milliseconds. Should be less then a shortest operation on the device */
#define W1_POLL_PERIOD 32
#define W1_POLL_CONVERT_TEMP 2000 /* Timeout for W1_CONVERT_TEMP, ms */
#define W1_POLL_RECALL_EEPROM 500 /* Timeout for W1_RECALL_EEPROM, ms*/
/* Masks for resolution functions, work with all devices */
/* Bit mask for config register for all devices, bits 7,6,5 */
#define W1_THERM_RESOLUTION_MASK 0xE0
/* Bit offset of resolution in config register for all devices */
#define W1_THERM_RESOLUTION_SHIFT 5
/* Bit offset of resolution in config register for all devices */
#define W1_THERM_RESOLUTION_SHIFT 5
/* Add this to bit value to get resolution */
#define W1_THERM_RESOLUTION_MIN 9
/* Maximum allowed value */
#define W1_THERM_RESOLUTION_MAX 14
/* Helpers Macros */
/*
* return a pointer on the slave w1_therm_family_converter struct:
* always test family data existence before using this macro
*/
#define SLAVE_SPECIFIC_FUNC(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->specific_functions)
/*
* return the power mode of the sl slave : 1-ext, 0-parasite, <0 unknown
* always test family data existence before using this macro
*/
#define SLAVE_POWERMODE(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->external_powered)
/*
* return the resolution in bit of the sl slave : <0 unknown
* always test family data existence before using this macro
*/
#define SLAVE_RESOLUTION(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->resolution)
/*
* return the conv_time_override of the sl slave
* always test family data existence before using this macro
*/
#define SLAVE_CONV_TIME_OVERRIDE(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->conv_time_override)
/*
* return the features of the sl slave
* always test family data existence before using this macro
*/
#define SLAVE_FEATURES(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->features)
/*
* return whether or not a converT command has been issued to the slave
* * 0: no bulk read is pending
* * -1: conversion is in progress
* * 1: conversion done, result to be read
*/
#define SLAVE_CONVERT_TRIGGERED(sl) \
(((struct w1_therm_family_data *)(sl->family_data))->convert_triggered)
/* return the address of the refcnt in the family data */
#define THERM_REFCNT(family_data) \
(&((struct w1_therm_family_data *)family_data)->refcnt)
/* Structs definition */
/**
* struct w1_therm_family_converter - bind device specific functions
* @broken: flag for non-registred families
* @reserved: not used here
* @f: pointer to the device binding structure
* @convert: pointer to the device conversion function
* @get_conversion_time: pointer to the device conversion time function
* @set_resolution: pointer to the device set_resolution function
* @get_resolution: pointer to the device get_resolution function
* @write_data: pointer to the device writing function (2 or 3 bytes)
* @bulk_read: true if device family support bulk read, false otherwise
*/
struct w1_therm_family_converter {
u8 broken;
u16 reserved;
struct w1_family *f;
int (*convert)(u8 rom[9]);
int (*get_conversion_time)(struct w1_slave *sl);
int (*set_resolution)(struct w1_slave *sl, int val);
int (*get_resolution)(struct w1_slave *sl);
int (*write_data)(struct w1_slave *sl, const u8 *data);
bool bulk_read;
};
/**
* struct w1_therm_family_data - device data
* @rom: ROM device id (64bit Lasered ROM code + 1 CRC byte)
* @refcnt: ref count
* @external_powered: 1 device powered externally,
* 0 device parasite powered,
* -x error or undefined
* @resolution: current device resolution
* @convert_triggered: conversion state of the device
* @conv_time_override: user selected conversion time or CONV_TIME_DEFAULT
* @features: bit mask - enable temperature validity check, poll for completion
* @specific_functions: pointer to struct of device specific function
*/
struct w1_therm_family_data {
uint8_t rom[9];
atomic_t refcnt;
int external_powered;
int resolution;
int convert_triggered;
int conv_time_override;
unsigned int features;
struct w1_therm_family_converter *specific_functions;
};
/**
* struct therm_info - store temperature reading
* @rom: read device data (8 data bytes + 1 CRC byte)
* @crc: computed crc from rom
* @verdict: 1 crc checked, 0 crc not matching
*/
struct therm_info {
u8 rom[9];
u8 crc;
u8 verdict;
};
/* Hardware Functions declaration */
/**
* reset_select_slave() - reset and select a slave
* @sl: the slave to select
*
* Resets the bus and select the slave by sending a ROM MATCH cmd
* w1_reset_select_slave() from w1_io.c could not be used here because
* it sent a SKIP ROM command if only one device is on the line.
* At the beginning of the such process, sl->master->slave_count is 1 even if
* more devices are on the line, causing collision on the line.
*
* Context: The w1 master lock must be held.
*
* Return: 0 if success, negative kernel error code otherwise.
*/
static int reset_select_slave(struct w1_slave *sl);
/**
* convert_t() - Query the device for temperature conversion and read
* @sl: pointer to the slave to read
* @info: pointer to a structure to store the read results
*
* Return: 0 if success, -kernel error code otherwise
*/
static int convert_t(struct w1_slave *sl, struct therm_info *info);
/**
* read_scratchpad() - read the data in device RAM
* @sl: pointer to the slave to read
* @info: pointer to a structure to store the read results
*
* Return: 0 if success, -kernel error code otherwise
*/
static int read_scratchpad(struct w1_slave *sl, struct therm_info *info);
/**
* write_scratchpad() - write nb_bytes in the device RAM
* @sl: pointer to the slave to write in
* @data: pointer to an array of 3 bytes, as 3 bytes MUST be written
* @nb_bytes: number of bytes to be written (2 for DS18S20, 3 otherwise)
*
* Return: 0 if success, -kernel error code otherwise
*/
static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes);
/**
* copy_scratchpad() - Copy the content of scratchpad in device EEPROM
* @sl: slave involved
*
* Return: 0 if success, -kernel error code otherwise
*/
static int copy_scratchpad(struct w1_slave *sl);
/**
* recall_eeprom() - Restore EEPROM data to device RAM
* @sl: slave involved
*
* Return: 0 if success, -kernel error code otherwise
*/
static int recall_eeprom(struct w1_slave *sl);
/**
* read_powermode() - Query the power mode of the slave
* @sl: slave to retrieve the power mode
*
* Ask the device to get its power mode (external or parasite)
* and store the power status in the &struct w1_therm_family_data.
*
* Return:
* * 0 parasite powered device
* * 1 externally powered device
* * <0 kernel error code
*/
static int read_powermode(struct w1_slave *sl);
/**
* trigger_bulk_read() - function to trigger a bulk read on the bus
* @dev_master: the device master of the bus
*
* Send a SKIP ROM follow by a CONVERT T command on the bus.
* It also set the status flag in each slave &struct w1_therm_family_data
* to signal that a conversion is in progress.
*
* Return: 0 if success, -kernel error code otherwise
*/
static int trigger_bulk_read(struct w1_master *dev_master);
/* Sysfs interface declaration */
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t w1_slave_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t w1_seq_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t temperature_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t ext_power_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t resolution_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t resolution_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t eeprom_cmd_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t alarms_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t alarms_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t therm_bulk_read_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t therm_bulk_read_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t conv_time_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t conv_time_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t size);
static ssize_t features_show(struct device *device,
struct device_attribute *attr, char *buf);
static ssize_t features_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t size);
/* Attributes declarations */
static DEVICE_ATTR_RW(w1_slave);
static DEVICE_ATTR_RO(w1_seq);
static DEVICE_ATTR_RO(temperature);
static DEVICE_ATTR_RO(ext_power);
static DEVICE_ATTR_RW(resolution);
static DEVICE_ATTR_WO(eeprom_cmd);
static DEVICE_ATTR_RW(alarms);
static DEVICE_ATTR_RW(conv_time);
static DEVICE_ATTR_RW(features);
static DEVICE_ATTR_RW(therm_bulk_read); /* attribut at master level */
/* Interface Functions declaration */
/**
* w1_therm_add_slave() - Called when a new slave is discovered
* @sl: slave just discovered by the master.
*
* Called by the master when the slave is discovered on the bus. Used to
* initialize slave state before the beginning of any communication.
*
* Return: 0 - If success, negative kernel code otherwise
*/
static int w1_therm_add_slave(struct w1_slave *sl);
/**
* w1_therm_remove_slave() - Called when a slave is removed
* @sl: slave to be removed.
*
* Called by the master when the slave is considered not to be on the bus
* anymore. Used to free memory.
*/
static void w1_therm_remove_slave(struct w1_slave *sl);
/* Family attributes */
static struct attribute *w1_therm_attrs[] = {
&dev_attr_w1_slave.attr,
&dev_attr_temperature.attr,
&dev_attr_ext_power.attr,
&dev_attr_resolution.attr,
&dev_attr_eeprom_cmd.attr,
&dev_attr_alarms.attr,
&dev_attr_conv_time.attr,
&dev_attr_features.attr,
NULL,
};
static struct attribute *w1_ds18s20_attrs[] = {
&dev_attr_w1_slave.attr,
&dev_attr_temperature.attr,
&dev_attr_ext_power.attr,
&dev_attr_eeprom_cmd.attr,
&dev_attr_alarms.attr,
&dev_attr_conv_time.attr,
&dev_attr_features.attr,
NULL,
};
static struct attribute *w1_ds28ea00_attrs[] = {
&dev_attr_w1_slave.attr,
&dev_attr_w1_seq.attr,
&dev_attr_temperature.attr,
&dev_attr_ext_power.attr,
&dev_attr_resolution.attr,
&dev_attr_eeprom_cmd.attr,
&dev_attr_alarms.attr,
&dev_attr_conv_time.attr,
&dev_attr_features.attr,
NULL,
};
/* Attribute groups */
ATTRIBUTE_GROUPS(w1_therm);
ATTRIBUTE_GROUPS(w1_ds18s20);
ATTRIBUTE_GROUPS(w1_ds28ea00);
#if IS_REACHABLE(CONFIG_HWMON)
static int w1_read_temp(struct device *dev, u32 attr, int channel,
long *val);
static umode_t w1_is_visible(const void *_data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
return attr == hwmon_temp_input ? 0444 : 0;
}
static int w1_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
switch (type) {
case hwmon_temp:
return w1_read_temp(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
}
static const u32 w1_temp_config[] = {
HWMON_T_INPUT,
0
};
static const struct hwmon_channel_info w1_temp = {
.type = hwmon_temp,
.config = w1_temp_config,
};
static const struct hwmon_channel_info * const w1_info[] = {
&w1_temp,
NULL
};
static const struct hwmon_ops w1_hwmon_ops = {
.is_visible = w1_is_visible,
.read = w1_read,
};
static const struct hwmon_chip_info w1_chip_info = {
.ops = &w1_hwmon_ops,
.info = w1_info,
};
#define W1_CHIPINFO (&w1_chip_info)
#else
#define W1_CHIPINFO NULL
#endif
/* Family operations */
static const struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_therm_groups,
.chip_info = W1_CHIPINFO,
};
static const struct w1_family_ops w1_ds18s20_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_ds18s20_groups,
.chip_info = W1_CHIPINFO,
};
static const struct w1_family_ops w1_ds28ea00_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_ds28ea00_groups,
.chip_info = W1_CHIPINFO,
};
/* Family binding operations struct */
static struct w1_family w1_therm_family_DS18S20 = {
.fid = W1_THERM_DS18S20,
.fops = &w1_ds18s20_fops,
};
static struct w1_family w1_therm_family_DS18B20 = {
.fid = W1_THERM_DS18B20,
.fops = &w1_therm_fops,
};
static struct w1_family w1_therm_family_DS1822 = {
.fid = W1_THERM_DS1822,
.fops = &w1_therm_fops,
};
static struct w1_family w1_therm_family_DS28EA00 = {
.fid = W1_THERM_DS28EA00,
.fops = &w1_ds28ea00_fops,
};
static struct w1_family w1_therm_family_DS1825 = {
.fid = W1_THERM_DS1825,
.fops = &w1_therm_fops,
};
/* Device dependent func */
static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
{
int ret;
if (!sl->family_data)
return -ENODEV; /* device unknown */
if (SLAVE_CONV_TIME_OVERRIDE(sl) != CONV_TIME_DEFAULT)
return SLAVE_CONV_TIME_OVERRIDE(sl);
/* Return the conversion time, depending on resolution,
* select maximum conversion time among all compatible devices
*/
switch (SLAVE_RESOLUTION(sl)) {
case 9:
ret = 95;
break;
case 10:
ret = 190;
break;
case 11:
ret = 375;
break;
case 12:
ret = 750;
break;
case 13:
ret = 850; /* GX20MH01 only. Datasheet says 500ms, but that's not enough. */
break;
case 14:
ret = 1600; /* GX20MH01 only. Datasheet says 1000ms - not enough */
break;
default:
ret = 750;
}
return ret;
}
static inline int w1_DS18S20_convert_time(struct w1_slave *sl)
{
if (!sl->family_data)
return -ENODEV; /* device unknown */
if (SLAVE_CONV_TIME_OVERRIDE(sl) == CONV_TIME_DEFAULT)
return 750; /* default for DS18S20 */
else
return SLAVE_CONV_TIME_OVERRIDE(sl);
}
static inline int w1_DS1825_convert_time(struct w1_slave *sl)
{
int ret;
if (!sl->family_data)
return -ENODEV; /* device unknown */
if (SLAVE_CONV_TIME_OVERRIDE(sl) != CONV_TIME_DEFAULT)
return SLAVE_CONV_TIME_OVERRIDE(sl);
/* Return the conversion time, depending on resolution,
* select maximum conversion time among all compatible devices
*/
switch (SLAVE_RESOLUTION(sl)) {
case 9:
ret = 95;
break;
case 10:
ret = 190;
break;
case 11:
ret = 375;
break;
case 12:
ret = 750;
break;
case 14:
ret = 100; /* MAX31850 only. Datasheet says 100ms */
break;
default:
ret = 750;
}
return ret;
}
static inline int w1_DS18B20_write_data(struct w1_slave *sl,
const u8 *data)
{
return write_scratchpad(sl, data, 3);
}
static inline int w1_DS18S20_write_data(struct w1_slave *sl,
const u8 *data)
{
/* No config register */
return write_scratchpad(sl, data, 2);
}
static inline int w1_DS18B20_set_resolution(struct w1_slave *sl, int val)
{
int ret;
struct therm_info info, info2;
/* DS18B20 resolution is 9 to 12 bits */
/* GX20MH01 resolution is 9 to 14 bits */
/* MAX31850 resolution is fixed 14 bits */
if (val < W1_THERM_RESOLUTION_MIN || val > W1_THERM_RESOLUTION_MAX)
return -EINVAL;
/* Calc bit value from resolution */
val = (val - W1_THERM_RESOLUTION_MIN) << W1_THERM_RESOLUTION_SHIFT;
/*
* Read the scratchpad to change only the required bits
* (bit5 & bit 6 from byte 4)
*/
ret = read_scratchpad(sl, &info);
if (ret)
return ret;
info.rom[4] &= ~W1_THERM_RESOLUTION_MASK;
info.rom[4] |= val;
/* Write data in the device RAM */
ret = w1_DS18B20_write_data(sl, info.rom + 2);
if (ret)
return ret;
/* Have to read back the resolution to verify an actual value
* GX20MH01 and DS18B20 are indistinguishable by family number, but resolutions differ
* Some DS18B20 clones don't support resolution change
*/
ret = read_scratchpad(sl, &info2);
if (ret)
/* Scratchpad read fail */
return ret;
if ((info2.rom[4] & W1_THERM_RESOLUTION_MASK) == (info.rom[4] & W1_THERM_RESOLUTION_MASK))
return 0;
/* Resolution verify error */
return -EIO;
}
static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
{
int ret;
int resolution;
struct therm_info info;
ret = read_scratchpad(sl, &info);
if (ret)
return ret;
resolution = ((info.rom[4] & W1_THERM_RESOLUTION_MASK) >> W1_THERM_RESOLUTION_SHIFT)
+ W1_THERM_RESOLUTION_MIN;
/* GX20MH01 has one special case:
* >=14 means 14 bits when getting resolution from bit value.
* MAX31850 delivers fixed 15 and has 14 bits.
* Other devices have no more then 12 bits.
*/
if (resolution > W1_THERM_RESOLUTION_MAX)
resolution = W1_THERM_RESOLUTION_MAX;
return resolution;
}
/**
* w1_DS18B20_convert_temp() - temperature computation for DS18B20
* @rom: data read from device RAM (8 data bytes + 1 CRC byte)
*
* Can be called for any DS18B20 compliant device.
*
* Return: value in millidegrees Celsius.
*/
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
u16 bv;
s16 t;
/* Signed 16-bit value to unsigned, cpu order */
bv = le16_to_cpup((__le16 *)rom);
/* Config register bit R2 = 1 - GX20MH01 in 13 or 14 bit resolution mode */
if (rom[4] & 0x80) {
/* Insert two temperature bits from config register */
/* Avoid arithmetic shift of signed value */
bv = (bv << 2) | (rom[4] & 3);
t = (s16) bv; /* Degrees, lowest bit is 2^-6 */
return (int)t * 1000 / 64; /* Sign-extend to int; millidegrees */
}
t = (s16)bv; /* Degrees, lowest bit is 2^-4 */
return (int)t * 1000 / 16; /* Sign-extend to int; millidegrees */
}
/**
* w1_DS18S20_convert_temp() - temperature computation for DS18S20
* @rom: data read from device RAM (8 data bytes + 1 CRC byte)
*
* Can be called for any DS18S20 compliant device.
*
* Return: value in millidegrees Celsius.
*/
static inline int w1_DS18S20_convert_temp(u8 rom[9])
{
int t, h;
if (!rom[7]) {
pr_debug("%s: Invalid argument for conversion\n", __func__);
return 0;
}
if (rom[1] == 0)
t = ((s32)rom[0] >> 1)*1000;
else
t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
t -= 250;
h = 1000*((s32)rom[7] - (s32)rom[6]);
h /= (s32)rom[7];
t += h;
return t;
}
/**
* w1_DS1825_convert_temp() - temperature computation for DS1825
* @rom: data read from device RAM (8 data bytes + 1 CRC byte)
*
* Can be called for any DS1825 compliant device.
* Is used by MAX31850, too
*
* Return: value in millidegrees Celsius.
*/
static inline int w1_DS1825_convert_temp(u8 rom[9])
{
u16 bv;
s16 t;
/* Signed 16-bit value to unsigned, cpu order */
bv = le16_to_cpup((__le16 *)rom);
/* Config register bit 7 = 1 - MA31850 found, 14 bit resolution */
if (rom[4] & 0x80) {
/* Mask out bits 0 (Fault) and 1 (Reserved) */
/* Avoid arithmetic shift of signed value */
bv = (bv & 0xFFFC); /* Degrees, lowest 4 bits are 2^-1, 2^-2 and 2 zero bits */
}
t = (s16)bv; /* Degrees, lowest bit is 2^-4 */
return (int)t * 1000 / 16; /* Sign-extend to int; millidegrees */
}
/* Device capability description */
/* GX20MH01 device shares family number and structure with DS18B20 */
static struct w1_therm_family_converter w1_therm_families[] = {
{
.f = &w1_therm_family_DS18S20,
.convert = w1_DS18S20_convert_temp,
.get_conversion_time = w1_DS18S20_convert_time,
.set_resolution = NULL, /* no config register */
.get_resolution = NULL, /* no config register */
.write_data = w1_DS18S20_write_data,
.bulk_read = true
},
{
.f = &w1_therm_family_DS1822,
.convert = w1_DS18B20_convert_temp,
.get_conversion_time = w1_DS18B20_convert_time,
.set_resolution = w1_DS18B20_set_resolution,
.get_resolution = w1_DS18B20_get_resolution,
.write_data = w1_DS18B20_write_data,
.bulk_read = true
},
{
/* Also used for GX20MH01 */
.f = &w1_therm_family_DS18B20,
.convert = w1_DS18B20_convert_temp,
.get_conversion_time = w1_DS18B20_convert_time,
.set_resolution = w1_DS18B20_set_resolution,
.get_resolution = w1_DS18B20_get_resolution,
.write_data = w1_DS18B20_write_data,
.bulk_read = true
},
{
.f = &w1_therm_family_DS28EA00,
.convert = w1_DS18B20_convert_temp,
.get_conversion_time = w1_DS18B20_convert_time,
.set_resolution = w1_DS18B20_set_resolution,
.get_resolution = w1_DS18B20_get_resolution,
.write_data = w1_DS18B20_write_data,
.bulk_read = false
},
{
/* Also used for MAX31850 */
.f = &w1_therm_family_DS1825,
.convert = w1_DS1825_convert_temp,
.get_conversion_time = w1_DS1825_convert_time,
.set_resolution = w1_DS18B20_set_resolution,
.get_resolution = w1_DS18B20_get_resolution,
.write_data = w1_DS18B20_write_data,
.bulk_read = true
}
};
/* Helpers Functions */
/**
* device_family() - Retrieve a pointer on &struct w1_therm_family_converter
* @sl: slave to retrieve the device specific structure
*
* Return: pointer to the slaves's family converter, NULL if not known
*/
static struct w1_therm_family_converter *device_family(struct w1_slave *sl)
{
struct w1_therm_family_converter *ret = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
if (w1_therm_families[i].f->fid == sl->family->fid) {
ret = &w1_therm_families[i];
break;
}
}
return ret;
}
/**
* bus_mutex_lock() - Acquire the mutex
* @lock: w1 bus mutex to acquire
*
* It try to acquire the mutex W1_THERM_MAX_TRY times and wait
* W1_THERM_RETRY_DELAY between 2 attempts.
*
* Return: true is mutex is acquired and lock, false otherwise
*/
static inline bool bus_mutex_lock(struct mutex *lock)
{
int max_trying = W1_THERM_MAX_TRY;
/* try to acquire the mutex, if not, sleep retry_delay before retry) */
while (mutex_lock_interruptible(lock) != 0 && max_trying > 0) {
unsigned long sleep_rem;
sleep_rem = msleep_interruptible(W1_THERM_RETRY_DELAY);
if (!sleep_rem)
max_trying--;
}
if (!max_trying)
return false; /* Didn't acquire the bus mutex */
return true;
}
/**
* check_family_data() - Check if family data and specific functions are present
* @sl: W1 device data
*
* Return: 0 - OK, negative value - error
*/
static int check_family_data(struct w1_slave *sl)
{
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(&sl->dev,
"%s: Device is not supported by the driver\n", __func__);
return -EINVAL; /* No device family */
}
return 0;
}
/**
* bulk_read_support() - check if slave support bulk read
* @sl: device to check the ability
*
* Return: true if bulk read is supported, false if not or error
*/
static inline bool bulk_read_support(struct w1_slave *sl)
{
if (SLAVE_SPECIFIC_FUNC(sl))
return SLAVE_SPECIFIC_FUNC(sl)->bulk_read;
dev_info(&sl->dev,
"%s: Device not supported by the driver\n", __func__);
return false; /* No device family */
}
/**
* conversion_time() - get the Tconv for the slave
* @sl: device to get the conversion time
*
* On device supporting resolution settings, conversion time depend
* on the resolution setting. This helper function get the slave timing,
* depending on its current setting.
*
* Return: conversion time in ms, negative values are kernel error code
*/
static inline int conversion_time(struct w1_slave *sl)
{
if (SLAVE_SPECIFIC_FUNC(sl))
return SLAVE_SPECIFIC_FUNC(sl)->get_conversion_time(sl);
dev_info(&sl->dev,
"%s: Device not supported by the driver\n", __func__);
return -ENODEV; /* No device family */
}
/**
* temperature_from_RAM() - Convert the read info to temperature
* @sl: device that sent the RAM data
* @rom: read value on the slave device RAM
*
* Device dependent, the function bind the correct computation method.
*
* Return: temperature in 1/1000degC, 0 on error.
*/
static inline int temperature_from_RAM(struct w1_slave *sl, u8 rom[9])
{
if (SLAVE_SPECIFIC_FUNC(sl))
return SLAVE_SPECIFIC_FUNC(sl)->convert(rom);
dev_info(&sl->dev,
"%s: Device not supported by the driver\n", __func__);
return 0; /* No device family */
}
/**
* int_to_short() - Safe casting of int to short
*
* @i: integer to be converted to short
*
* Device register use 1 byte to store signed integer.
* This helper function convert the int in a signed short,
* using the min/max values that device can measure as limits.
* min/max values are defined by macro.
*
* Return: a short in the range of min/max value
*/
static inline s8 int_to_short(int i)
{
/* Prepare to cast to short by eliminating out of range values */
i = clamp(i, MIN_TEMP, MAX_TEMP);
return (s8) i;
}
/* Interface Functions */
static int w1_therm_add_slave(struct w1_slave *sl)
{
struct w1_therm_family_converter *sl_family_conv;
/* Allocate memory */
sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
GFP_KERNEL);
if (!sl->family_data)
return -ENOMEM;
atomic_set(THERM_REFCNT(sl->family_data), 1);
/* Get a pointer to the device specific function struct */
sl_family_conv = device_family(sl);
if (!sl_family_conv) {
kfree(sl->family_data);
return -ENODEV;
}
/* save this pointer to the device structure */
SLAVE_SPECIFIC_FUNC(sl) = sl_family_conv;
if (bulk_read_support(sl)) {
/*
* add the sys entry to trigger bulk_read
* at master level only the 1st time
*/
if (!bulk_read_device_counter) {
int err = device_create_file(&sl->master->dev,
&dev_attr_therm_bulk_read);
if (err)
dev_warn(&sl->dev,
"%s: Device has been added, but bulk read is unavailable. err=%d\n",
__func__, err);
}
/* Increment the counter */
bulk_read_device_counter++;
}
/* Getting the power mode of the device {external, parasite} */
SLAVE_POWERMODE(sl) = read_powermode(sl);
if (SLAVE_POWERMODE(sl) < 0) {
/* no error returned as device has been added */
dev_warn(&sl->dev,
"%s: Device has been added, but power_mode may be corrupted. err=%d\n",
__func__, SLAVE_POWERMODE(sl));
}
/* Getting the resolution of the device */
if (SLAVE_SPECIFIC_FUNC(sl)->get_resolution) {
SLAVE_RESOLUTION(sl) =
SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
if (SLAVE_RESOLUTION(sl) < 0) {
/* no error returned as device has been added */
dev_warn(&sl->dev,
"%s:Device has been added, but resolution may be corrupted. err=%d\n",
__func__, SLAVE_RESOLUTION(sl));
}
}
/* Finally initialize convert_triggered flag */
SLAVE_CONVERT_TRIGGERED(sl) = 0;
return 0;
}
static void w1_therm_remove_slave(struct w1_slave *sl)
{
int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
if (bulk_read_support(sl)) {
bulk_read_device_counter--;
/* Delete the entry if no more device support the feature */
if (!bulk_read_device_counter)
device_remove_file(&sl->master->dev,
&dev_attr_therm_bulk_read);
}
while (refcnt) {
msleep(1000);
refcnt = atomic_read(THERM_REFCNT(sl->family_data));
}
kfree(sl->family_data);
sl->family_data = NULL;
}
/* Hardware Functions */
/* Safe version of reset_select_slave - avoid using the one in w_io.c */
static int reset_select_slave(struct w1_slave *sl)
{
u8 match[9] = { W1_MATCH_ROM, };
u64 rn = le64_to_cpu(*((u64 *)&sl->reg_num));
if (w1_reset_bus(sl->master))
return -ENODEV;
memcpy(&match[1], &rn, 8);
w1_write_block(sl->master, match, 9);
return 0;
}
/**
* w1_poll_completion - Poll for operation completion, with timeout
* @dev_master: the device master of the bus
* @tout_ms: timeout in milliseconds
*
* The device is answering 0's while an operation is in progress and 1's after it completes
* Timeout may happen if the previous command was not recognised due to a line noise
*
* Return: 0 - OK, negative error - timeout
*/
static int w1_poll_completion(struct w1_master *dev_master, int tout_ms)
{
int i;
for (i = 0; i < tout_ms/W1_POLL_PERIOD; i++) {
/* Delay is before poll, for device to recognize a command */
msleep(W1_POLL_PERIOD);
/* Compare all 8 bits to mitigate a noise on the bus */
if (w1_read_8(dev_master) == 0xFF)
break;
}
if (i == tout_ms/W1_POLL_PERIOD)
return -EIO;
return 0;
}
static int convert_t(struct w1_slave *sl, struct therm_info *info)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int t_conv;
int ret = -ENODEV;
bool strong_pullup;
if (!sl->family_data)
goto error;
strong_pullup = (w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
if (strong_pullup && SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
dev_warn(&sl->dev,
"%s: Disabling W1_THERM_POLL_COMPLETION in parasite power mode.\n",
__func__);
SLAVE_FEATURES(sl) &= ~W1_THERM_POLL_COMPLETION;
}
/* get conversion duration device and id dependent */
t_conv = conversion_time(sl);
memset(info->rom, 0, sizeof(info->rom));
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
info->verdict = 0;
info->crc = 0;
/* safe version to select slave */
if (!reset_select_slave(sl)) {
unsigned long sleep_rem;
/* 750ms strong pullup (or delay) after the convert */
if (strong_pullup)
w1_next_pullup(dev_master, t_conv);
w1_write_8(dev_master, W1_CONVERT_TEMP);
if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
if (ret) {
dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
goto mt_unlock;
}
mutex_unlock(&dev_master->bus_mutex);
} else if (!strong_pullup) { /*no device need pullup */
sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
mutex_unlock(&dev_master->bus_mutex);
} else { /*some device need pullup */
mutex_unlock(&dev_master->bus_mutex);
sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto dec_refcnt;
}
}
ret = read_scratchpad(sl, info);
/* If enabled, check for conversion success */
if ((SLAVE_FEATURES(sl) & W1_THERM_CHECK_RESULT) &&
(info->rom[6] == 0xC) &&
((info->rom[1] == 0x5 && info->rom[0] == 0x50) ||
(info->rom[1] == 0x7 && info->rom[0] == 0xFF))
) {
/* Invalid reading (scratchpad byte 6 = 0xC)
* due to insufficient conversion time
* or power failure.
*/
ret = -EIO;
}
goto dec_refcnt;
}
}
mt_unlock:
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int conv_time_measure(struct w1_slave *sl, int *conv_time)
{
struct therm_info inf,
*info = &inf;
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int ret = -ENODEV;
bool strong_pullup;
if (!sl->family_data)
goto error;
strong_pullup = (w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
if (strong_pullup) {
pr_info("%s: Measure with strong_pullup is not supported.\n", __func__);
return -EINVAL;
}
memset(info->rom, 0, sizeof(info->rom));
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
info->verdict = 0;
info->crc = 0;
/* safe version to select slave */
if (!reset_select_slave(sl)) {
int j_start, j_end;
/*no device need pullup */
w1_write_8(dev_master, W1_CONVERT_TEMP);
j_start = jiffies;
ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
if (ret) {
dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
goto mt_unlock;
}
j_end = jiffies;
/* 1.2x increase for variation and changes over temperature range */
*conv_time = jiffies_to_msecs(j_end-j_start)*12/10;
pr_debug("W1 Measure complete, conv_time = %d, HZ=%d.\n",
*conv_time, HZ);
if (*conv_time <= CONV_TIME_MEASURE) {
ret = -EIO;
goto mt_unlock;
}
mutex_unlock(&dev_master->bus_mutex);
ret = read_scratchpad(sl, info);
goto dec_refcnt;
}
}
mt_unlock:
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int read_scratchpad(struct w1_slave *sl, struct therm_info *info)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int ret = -ENODEV;
info->verdict = 0;
if (!sl->family_data)
goto error;
memset(info->rom, 0, sizeof(info->rom));
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
/* safe version to select slave */
if (!reset_select_slave(sl)) {
u8 nb_bytes_read;
w1_write_8(dev_master, W1_READ_SCRATCHPAD);
nb_bytes_read = w1_read_block(dev_master, info->rom, 9);
if (nb_bytes_read != 9) {
dev_warn(&sl->dev,
"w1_read_block(): returned %u instead of 9.\n",
nb_bytes_read);
ret = -EIO;
}
info->crc = w1_calc_crc8(info->rom, 8);
if (info->rom[8] == info->crc) {
info->verdict = 1;
ret = 0;
} else
ret = -EIO; /* CRC not checked */
}
}
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int ret = -ENODEV;
if (!sl->family_data)
goto error;
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
/* safe version to select slave */
if (!reset_select_slave(sl)) {
w1_write_8(dev_master, W1_WRITE_SCRATCHPAD);
w1_write_block(dev_master, data, nb_bytes);
ret = 0;
}
}
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int copy_scratchpad(struct w1_slave *sl)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int t_write, ret = -ENODEV;
bool strong_pullup;
if (!sl->family_data)
goto error;
t_write = W1_THERM_EEPROM_WRITE_DELAY;
strong_pullup = (w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
/* safe version to select slave */
if (!reset_select_slave(sl)) {
unsigned long sleep_rem;
/* 10ms strong pullup (or delay) after the convert */
if (strong_pullup)
w1_next_pullup(dev_master, t_write);
w1_write_8(dev_master, W1_COPY_SCRATCHPAD);
if (strong_pullup) {
sleep_rem = msleep_interruptible(t_write);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
}
ret = 0;
}
}
mt_unlock:
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int recall_eeprom(struct w1_slave *sl)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int ret = -ENODEV;
if (!sl->family_data)
goto error;
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while (max_trying-- && ret) { /* ret should be 0 */
/* safe version to select slave */
if (!reset_select_slave(sl)) {
w1_write_8(dev_master, W1_RECALL_EEPROM);
ret = w1_poll_completion(dev_master, W1_POLL_RECALL_EEPROM);
}
}
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int read_powermode(struct w1_slave *sl)
{
struct w1_master *dev_master = sl->master;
int max_trying = W1_THERM_MAX_TRY;
int ret = -ENODEV;
if (!sl->family_data)
goto error;
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(sl->family_data));
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
}
while ((max_trying--) && (ret < 0)) {
/* safe version to select slave */
if (!reset_select_slave(sl)) {
w1_write_8(dev_master, W1_READ_PSUPPLY);
/*
* Emit a read time slot and read only one bit,
* 1 is externally powered,
* 0 is parasite powered
*/
ret = w1_touch_bit(dev_master, 1);
/* ret should be either 1 either 0 */
}
}
mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
static int trigger_bulk_read(struct w1_master *dev_master)
{
struct w1_slave *sl = NULL; /* used to iterate through slaves */
int max_trying = W1_THERM_MAX_TRY;
int t_conv = 0;
int ret = -ENODEV;
bool strong_pullup = false;
/*
* Check whether there are parasite powered device on the bus,
* and compute duration of conversion for these devices
* so we can apply a strong pullup if required
*/
list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
if (!sl->family_data)
goto error;
if (bulk_read_support(sl)) {
int t_cur = conversion_time(sl);
t_conv = max(t_cur, t_conv);
strong_pullup = strong_pullup ||
(w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
}
}
/*
* t_conv is the max conversion time required on the bus
* If its 0, no device support the bulk read feature
*/
if (!t_conv)
goto error;
if (!bus_mutex_lock(&dev_master->bus_mutex)) {
ret = -EAGAIN; /* Didn't acquire the mutex */
goto error;
}
while ((max_trying--) && (ret < 0)) { /* ret should be either 0 */
if (!w1_reset_bus(dev_master)) { /* Just reset the bus */
unsigned long sleep_rem;
w1_write_8(dev_master, W1_SKIP_ROM);
if (strong_pullup) /* Apply pullup if required */
w1_next_pullup(dev_master, t_conv);
w1_write_8(dev_master, W1_CONVERT_TEMP);
/* set a flag to instruct that converT pending */
list_for_each_entry(sl,
&dev_master->slist, w1_slave_entry) {
if (bulk_read_support(sl))
SLAVE_CONVERT_TRIGGERED(sl) = -1;
}
if (strong_pullup) { /* some device need pullup */
sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
mutex_unlock(&dev_master->bus_mutex);
} else {
mutex_unlock(&dev_master->bus_mutex);
sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto set_flag;
}
}
ret = 0;
goto set_flag;
}
}
mt_unlock:
mutex_unlock(&dev_master->bus_mutex);
set_flag:
/* set a flag to register convsersion is done */
list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
if (bulk_read_support(sl))
SLAVE_CONVERT_TRIGGERED(sl) = 1;
}
error:
return ret;
}
/* Sysfs Interface definition */
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct therm_info info;
u8 *family_data = sl->family_data;
int ret, i;
ssize_t c = PAGE_SIZE;
if (bulk_read_support(sl)) {
if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
dev_dbg(device,
"%s: Conversion in progress, retry later\n",
__func__);
return 0;
} else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
/* A bulk read has been issued, read the device RAM */
ret = read_scratchpad(sl, &info);
SLAVE_CONVERT_TRIGGERED(sl) = 0;
} else
ret = convert_t(sl, &info);
} else
ret = convert_t(sl, &info);
if (ret < 0) {
dev_dbg(device,
"%s: Temperature data may be corrupted. err=%d\n",
__func__, ret);
return 0;
}
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
info.crc, (info.verdict) ? "YES" : "NO");
if (info.verdict)
memcpy(family_data, info.rom, sizeof(info.rom));
else
dev_warn(device, "%s:Read failed CRC check\n", __func__);
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
((u8 *)family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
temperature_from_RAM(sl, info.rom));
ret = PAGE_SIZE - c;
return ret;
}
static ssize_t w1_slave_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t size)
{
int val, ret = 0;
struct w1_slave *sl = dev_to_w1_slave(device);
ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
if (ret) { /* conversion error */
dev_info(device,
"%s: conversion error. err= %d\n", __func__, ret);
return size; /* return size to avoid call back again */
}
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return size; /* No device family */
}
if (val == 0) /* val=0 : trigger a EEPROM save */
ret = copy_scratchpad(sl);
else {
if (SLAVE_SPECIFIC_FUNC(sl)->set_resolution)
ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
}
if (ret) {
dev_warn(device, "%s: Set resolution - error %d\n", __func__, ret);
/* Propagate error to userspace */
return ret;
}
SLAVE_RESOLUTION(sl) = val;
/* Reset the conversion time to default - it depends on resolution */
SLAVE_CONV_TIME_OVERRIDE(sl) = CONV_TIME_DEFAULT;
return size; /* always return size to avoid infinite calling */
}
static ssize_t temperature_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct therm_info info;
int ret = 0;
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return 0; /* No device family */
}
if (bulk_read_support(sl)) {
if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
dev_dbg(device,
"%s: Conversion in progress, retry later\n",
__func__);
return 0;
} else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
/* A bulk read has been issued, read the device RAM */
ret = read_scratchpad(sl, &info);
SLAVE_CONVERT_TRIGGERED(sl) = 0;
} else
ret = convert_t(sl, &info);
} else
ret = convert_t(sl, &info);
if (ret < 0) {
dev_dbg(device,
"%s: Temperature data may be corrupted. err=%d\n",
__func__, ret);
return 0;
}
return sprintf(buf, "%d\n", temperature_from_RAM(sl, info.rom));
}
static ssize_t ext_power_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
if (!sl->family_data) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return 0; /* No device family */
}
/* Getting the power mode of the device {external, parasite} */
SLAVE_POWERMODE(sl) = read_powermode(sl);
if (SLAVE_POWERMODE(sl) < 0) {
dev_dbg(device,
"%s: Power_mode may be corrupted. err=%d\n",
__func__, SLAVE_POWERMODE(sl));
}
return sprintf(buf, "%d\n", SLAVE_POWERMODE(sl));
}
static ssize_t resolution_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return 0; /* No device family */
}
/* get the correct function depending on the device */
SLAVE_RESOLUTION(sl) = SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
if (SLAVE_RESOLUTION(sl) < 0) {
dev_dbg(device,
"%s: Resolution may be corrupted. err=%d\n",
__func__, SLAVE_RESOLUTION(sl));
}
return sprintf(buf, "%d\n", SLAVE_RESOLUTION(sl));
}
static ssize_t resolution_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
struct w1_slave *sl = dev_to_w1_slave(device);
int val;
int ret = 0;
ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
if (ret) { /* conversion error */
dev_info(device,
"%s: conversion error. err= %d\n", __func__, ret);
return size; /* return size to avoid call back again */
}
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return size; /* No device family */
}
/*
* Don't deal with the val enterd by user,
* only device knows what is correct or not
*/
/* get the correct function depending on the device */
ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
if (ret)
return ret;
SLAVE_RESOLUTION(sl) = val;
/* Reset the conversion time to default because it depends on resolution */
SLAVE_CONV_TIME_OVERRIDE(sl) = CONV_TIME_DEFAULT;
return size;
}
static ssize_t eeprom_cmd_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
struct w1_slave *sl = dev_to_w1_slave(device);
int ret = -EINVAL; /* Invalid argument */
if (size == sizeof(EEPROM_CMD_WRITE)) {
if (!strncmp(buf, EEPROM_CMD_WRITE, sizeof(EEPROM_CMD_WRITE)-1))
ret = copy_scratchpad(sl);
} else if (size == sizeof(EEPROM_CMD_READ)) {
if (!strncmp(buf, EEPROM_CMD_READ, sizeof(EEPROM_CMD_READ)-1))
ret = recall_eeprom(sl);
}
if (ret)
dev_info(device, "%s: error in process %d\n", __func__, ret);
return size;
}
static ssize_t alarms_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
int ret;
s8 th = 0, tl = 0;
struct therm_info scratchpad;
ret = read_scratchpad(sl, &scratchpad);
if (!ret) {
th = scratchpad.rom[2]; /* TH is byte 2 */
tl = scratchpad.rom[3]; /* TL is byte 3 */
} else {
dev_info(device,
"%s: error reading alarms register %d\n",
__func__, ret);
}
return sprintf(buf, "%hd %hd\n", tl, th);
}
static ssize_t alarms_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct therm_info info;
u8 new_config_register[3]; /* array of data to be written */
int temp, ret;
char *token = NULL;
s8 tl, th; /* 1 byte per value + temp ring order */
char *p_args, *orig;
p_args = orig = kmalloc(size, GFP_KERNEL);
/* Safe string copys as buf is const */
if (!p_args) {
dev_warn(device,
"%s: error unable to allocate memory %d\n",
__func__, -ENOMEM);
return size;
}
strcpy(p_args, buf);
/* Split string using space char */
token = strsep(&p_args, " ");
if (!token) {
dev_info(device,
"%s: error parsing args %d\n", __func__, -EINVAL);
goto free_m;
}
/* Convert 1st entry to int */
ret = kstrtoint (token, 10, &temp);
if (ret) {
dev_info(device,
"%s: error parsing args %d\n", __func__, ret);
goto free_m;
}
tl = int_to_short(temp);
/* Split string using space char */
token = strsep(&p_args, " ");
if (!token) {
dev_info(device,
"%s: error parsing args %d\n", __func__, -EINVAL);
goto free_m;
}
/* Convert 2nd entry to int */
ret = kstrtoint (token, 10, &temp);
if (ret) {
dev_info(device,
"%s: error parsing args %d\n", __func__, ret);
goto free_m;
}
/* Prepare to cast to short by eliminating out of range values */
th = int_to_short(temp);
/* Reorder if required th and tl */
if (tl > th)
swap(tl, th);
/*
* Read the scratchpad to change only the required bits
* (th : byte 2 - tl: byte 3)
*/
ret = read_scratchpad(sl, &info);
if (!ret) {
new_config_register[0] = th; /* Byte 2 */
new_config_register[1] = tl; /* Byte 3 */
new_config_register[2] = info.rom[4];/* Byte 4 */
} else {
dev_info(device,
"%s: error reading from the slave device %d\n",
__func__, ret);
goto free_m;
}
/* Write data in the device RAM */
if (!SLAVE_SPECIFIC_FUNC(sl)) {
dev_info(device,
"%s: Device not supported by the driver %d\n",
__func__, -ENODEV);
goto free_m;
}
ret = SLAVE_SPECIFIC_FUNC(sl)->write_data(sl, new_config_register);
if (ret)
dev_info(device,
"%s: error writing to the slave device %d\n",
__func__, ret);
free_m:
/* free allocated memory */
kfree(orig);
return size;
}
static ssize_t therm_bulk_read_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
struct w1_master *dev_master = dev_to_w1_master(device);
int ret = -EINVAL; /* Invalid argument */
if (size == sizeof(BULK_TRIGGER_CMD))
if (!strncmp(buf, BULK_TRIGGER_CMD,
sizeof(BULK_TRIGGER_CMD)-1))
ret = trigger_bulk_read(dev_master);
if (ret)
dev_info(device,
"%s: unable to trigger a bulk read on the bus. err=%d\n",
__func__, ret);
return size;
}
static ssize_t therm_bulk_read_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_master *dev_master = dev_to_w1_master(device);
struct w1_slave *sl = NULL;
int ret = 0;
list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
if (sl->family_data) {
if (bulk_read_support(sl)) {
if (SLAVE_CONVERT_TRIGGERED(sl) == -1) {
ret = -1;
goto show_result;
}
if (SLAVE_CONVERT_TRIGGERED(sl) == 1)
/* continue to check other slaves */
ret = 1;
}
}
}
show_result:
return sprintf(buf, "%d\n", ret);
}
static ssize_t conv_time_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device is not supported by the driver\n", __func__);
return 0; /* No device family */
}
return sprintf(buf, "%d\n", conversion_time(sl));
}
static ssize_t conv_time_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
int val, ret = 0;
struct w1_slave *sl = dev_to_w1_slave(device);
if (kstrtoint(buf, 10, &val)) /* converting user entry to int */
return -EINVAL;
if (check_family_data(sl))
return -ENODEV;
if (val != CONV_TIME_MEASURE) {
if (val >= CONV_TIME_DEFAULT)
SLAVE_CONV_TIME_OVERRIDE(sl) = val;
else
return -EINVAL;
} else {
int conv_time;
ret = conv_time_measure(sl, &conv_time);
if (ret)
return -EIO;
SLAVE_CONV_TIME_OVERRIDE(sl) = conv_time;
}
return size;
}
static ssize_t features_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device,
"%s: Device not supported by the driver\n", __func__);
return 0; /* No device family */
}
return sprintf(buf, "%u\n", SLAVE_FEATURES(sl));
}
static ssize_t features_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t size)
{
int val, ret = 0;
bool strong_pullup;
struct w1_slave *sl = dev_to_w1_slave(device);
ret = kstrtouint(buf, 10, &val); /* converting user entry to int */
if (ret)
return -EINVAL; /* invalid number */
if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
dev_info(device, "%s: Device not supported by the driver\n", __func__);
return -ENODEV;
}
if ((val & W1_THERM_FEATURES_MASK) != val)
return -EINVAL;
SLAVE_FEATURES(sl) = val;
strong_pullup = (w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
if (strong_pullup && SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
dev_warn(&sl->dev,
"%s: W1_THERM_POLL_COMPLETION disabled in parasite power mode.\n",
__func__);
SLAVE_FEATURES(sl) &= ~W1_THERM_POLL_COMPLETION;
}
return size;
}
#if IS_REACHABLE(CONFIG_HWMON)
static int w1_read_temp(struct device *device, u32 attr, int channel,
long *val)
{
struct w1_slave *sl = dev_get_drvdata(device);
struct therm_info info;
int ret;
switch (attr) {
case hwmon_temp_input:
ret = convert_t(sl, &info);
if (ret)
return ret;
if (!info.verdict) {
ret = -EIO;
return ret;
}
*val = temperature_from_RAM(sl, info.rom);
ret = 0;
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
#endif
#define W1_42_CHAIN 0x99
#define W1_42_CHAIN_OFF 0x3C
#define W1_42_CHAIN_OFF_INV 0xC3
#define W1_42_CHAIN_ON 0x5A
#define W1_42_CHAIN_ON_INV 0xA5
#define W1_42_CHAIN_DONE 0x96
#define W1_42_CHAIN_DONE_INV 0x69
#define W1_42_COND_READ 0x0F
#define W1_42_SUCCESS_CONFIRM_BYTE 0xAA
#define W1_42_FINISHED_BYTE 0xFF
static ssize_t w1_seq_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
ssize_t c = PAGE_SIZE;
int i;
u8 ack;
u64 rn;
struct w1_reg_num *reg_num;
int seq = 0;
mutex_lock(&sl->master->bus_mutex);
/* Place all devices in CHAIN state */
if (w1_reset_bus(sl->master))
goto error;
w1_write_8(sl->master, W1_SKIP_ROM);
w1_write_8(sl->master, W1_42_CHAIN);
w1_write_8(sl->master, W1_42_CHAIN_ON);
w1_write_8(sl->master, W1_42_CHAIN_ON_INV);
msleep(sl->master->pullup_duration);
/* check for acknowledgment */
ack = w1_read_8(sl->master);
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
/* In case the bus fails to send 0xFF, limit */
for (i = 0; i <= 64; i++) {
if (w1_reset_bus(sl->master))
goto error;
w1_write_8(sl->master, W1_42_COND_READ);
w1_read_block(sl->master, (u8 *)&rn, 8);
reg_num = (struct w1_reg_num *) &rn;
if (reg_num->family == W1_42_FINISHED_BYTE)
break;
if (sl->reg_num.id == reg_num->id)
seq = i;
if (w1_reset_bus(sl->master))
goto error;
/* Put the device into chain DONE state */
w1_write_8(sl->master, W1_MATCH_ROM);
w1_write_block(sl->master, (u8 *)&rn, 8);
w1_write_8(sl->master, W1_42_CHAIN);
w1_write_8(sl->master, W1_42_CHAIN_DONE);
w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
/* check for acknowledgment */
ack = w1_read_8(sl->master);
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
}
/* Exit from CHAIN state */
if (w1_reset_bus(sl->master))
goto error;
w1_write_8(sl->master, W1_SKIP_ROM);
w1_write_8(sl->master, W1_42_CHAIN);
w1_write_8(sl->master, W1_42_CHAIN_OFF);
w1_write_8(sl->master, W1_42_CHAIN_OFF_INV);
/* check for acknowledgment */
ack = w1_read_8(sl->master);
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
mutex_unlock(&sl->master->bus_mutex);
c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", seq);
return PAGE_SIZE - c;
error:
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
static int __init w1_therm_init(void)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
err = w1_register_family(w1_therm_families[i].f);
if (err)
w1_therm_families[i].broken = 1;
}
return 0;
}
static void __exit w1_therm_fini(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i)
if (!w1_therm_families[i].broken)
w1_unregister_family(w1_therm_families[i].f);
}
module_init(w1_therm_init);
module_exit(w1_therm_fini);
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18S20));
MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1822));
MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18B20));
MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1825));
MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
| linux-master | drivers/w1/slaves/w1_therm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2406.c - w1 family 12 (DS2406) driver
* based on w1_ds2413.c by Mariusz Bialonczyk <[email protected]>
*
* Copyright (c) 2014 Scott Alfter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/crc16.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2406 0x12
#define W1_F12_FUNC_READ_STATUS 0xAA
#define W1_F12_FUNC_WRITE_STATUS 0x55
static ssize_t w1_f12_read_state(
struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u16 crc = 0;
int i;
ssize_t rtnval = 1;
if (off != 0)
return 0;
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
w1_write_block(sl->master, w1_buf, 3);
w1_read_block(sl->master, w1_buf+3, 3);
for (i = 0; i < 6; i++)
crc = crc16_byte(crc, w1_buf[i]);
if (crc == 0xb001) /* good read? */
*buf = ((w1_buf[3]>>5)&3)|0x30;
else
rtnval = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return rtnval;
}
static ssize_t w1_f12_write_output(
struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[6] = {W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0};
u16 crc = 0;
int i;
ssize_t rtnval = 1;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
w1_buf[3] = (((*buf)&3)<<5)|0x1F;
w1_write_block(sl->master, w1_buf, 4);
w1_read_block(sl->master, w1_buf+4, 2);
for (i = 0; i < 6; i++)
crc = crc16_byte(crc, w1_buf[i]);
if (crc == 0xb001) /* good read? */
w1_write_8(sl->master, 0xFF);
else
rtnval = -EIO;
mutex_unlock(&sl->master->bus_mutex);
return rtnval;
}
#define NB_SYSFS_BIN_FILES 2
static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
{
.attr = {
.name = "state",
.mode = 0444,
},
.size = 1,
.read = w1_f12_read_state,
},
{
.attr = {
.name = "output",
.mode = 0664,
},
.size = 1,
.write = w1_f12_write_output,
}
};
static int w1_f12_add_slave(struct w1_slave *sl)
{
int err = 0;
int i;
for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
err = sysfs_create_bin_file(
&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
if (err)
while (--i >= 0)
sysfs_remove_bin_file(&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
return err;
}
static void w1_f12_remove_slave(struct w1_slave *sl)
{
int i;
for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
sysfs_remove_bin_file(&sl->dev.kobj,
&(w1_f12_sysfs_bin_files[i]));
}
static const struct w1_family_ops w1_f12_fops = {
.add_slave = w1_f12_add_slave,
.remove_slave = w1_f12_remove_slave,
};
static struct w1_family w1_family_12 = {
.fid = W1_FAMILY_DS2406,
.fops = &w1_f12_fops,
};
module_w1_family(w1_family_12);
MODULE_AUTHOR("Scott Alfter <[email protected]>");
MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/slaves/w1_ds2406.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1_ds2408.c - w1 family 29 (DS2408) driver
*
* Copyright (c) 2010 Jean-Francois Dagenais <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/w1.h>
#define W1_FAMILY_DS2408 0x29
#define W1_F29_RETRIES 3
#define W1_F29_REG_LOGIG_STATE 0x88 /* R */
#define W1_F29_REG_OUTPUT_LATCH_STATE 0x89 /* R */
#define W1_F29_REG_ACTIVITY_LATCH_STATE 0x8A /* R */
#define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */
#define W1_F29_REG_COND_SEARCH_POL_SELECT 0x8C /* RW */
#define W1_F29_REG_CONTROL_AND_STATUS 0x8D /* RW */
#define W1_F29_FUNC_READ_PIO_REGS 0xF0
#define W1_F29_FUNC_CHANN_ACCESS_READ 0xF5
#define W1_F29_FUNC_CHANN_ACCESS_WRITE 0x5A
/* also used to write the control/status reg (0x8D): */
#define W1_F29_FUNC_WRITE_COND_SEARCH_REG 0xCC
#define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3
#define W1_F29_SUCCESS_CONFIRM_BYTE 0xAA
static int _read_reg(struct w1_slave *sl, u8 address, unsigned char *buf)
{
u8 wrbuf[3];
dev_dbg(&sl->dev, "Reading with slave: %p, reg addr: %0#4x, buff addr: %p",
sl, (unsigned int)address, buf);
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS;
wrbuf[1] = address;
wrbuf[2] = 0;
w1_write_block(sl->master, wrbuf, 3);
*buf = w1_read_8(sl->master);
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex unlocked");
return 1;
}
static ssize_t state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
}
static ssize_t output_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_OUTPUT_LATCH_STATE, buf);
}
static ssize_t activity_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
}
static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
}
static ssize_t cond_search_polarity_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
}
static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
{
u8 w1_buf[3];
if (w1_reset_resume_command(sl->master))
return false;
w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
w1_buf[2] = 0;
w1_write_block(sl->master, w1_buf, 3);
return (w1_read_8(sl->master) == expected);
}
#else
static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
{
return true;
}
#endif
static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
unsigned int retries = W1_F29_RETRIES;
ssize_t bytes_written = -EIO;
if (count != 1 || off != 0)
return -EFAULT;
dev_dbg(&sl->dev, "locking mutex for write_output");
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
goto out;
do {
w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
w1_buf[1] = *buf;
w1_buf[2] = ~(*buf);
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE &&
optional_read_back_valid(sl, *buf)) {
bytes_written = 1;
goto out;
}
if (w1_reset_resume_command(sl->master))
goto out; /* unrecoverable error */
/* try again, the slave is ready for a command */
} while (--retries);
out:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "%s, mutex unlocked retries:%d\n",
(bytes_written > 0) ? "succeeded" : "error", retries);
return bytes_written;
}
/*
* Writing to the activity file resets the activity latches.
*/
static ssize_t activity_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F29_RETRIES;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl))
goto error;
while (retries--) {
w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES);
if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) {
mutex_unlock(&sl->master->bus_mutex);
return 1;
}
if (w1_reset_resume_command(sl->master))
goto error;
}
error:
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
static ssize_t status_control_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[4];
unsigned int retries = W1_F29_RETRIES;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl))
goto error;
while (retries--) {
w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG;
w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
w1_buf[2] = 0;
w1_buf[3] = *buf;
w1_write_block(sl->master, w1_buf, 4);
if (w1_reset_resume_command(sl->master))
goto error;
w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
w1_buf[2] = 0;
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == *buf) {
/* success! */
mutex_unlock(&sl->master->bus_mutex);
return 1;
}
}
error:
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
/*
* This is a special sequence we must do to ensure the P0 output is not stuck
* in test mode. This is described in rev 2 of the ds2408's datasheet
* (http://datasheets.maximintegrated.com/en/ds/DS2408.pdf) under
* "APPLICATION INFORMATION/Power-up timing".
*/
static int w1_f29_disable_test_mode(struct w1_slave *sl)
{
int res;
u8 magic[10] = {0x96, };
u64 rn = le64_to_cpu(*((u64 *)&sl->reg_num));
memcpy(&magic[1], &rn, 8);
magic[9] = 0x3C;
mutex_lock(&sl->master->bus_mutex);
res = w1_reset_bus(sl->master);
if (res)
goto out;
w1_write_block(sl->master, magic, ARRAY_SIZE(magic));
res = w1_reset_bus(sl->master);
out:
mutex_unlock(&sl->master->bus_mutex);
return res;
}
static BIN_ATTR_RO(state, 1);
static BIN_ATTR_RW(output, 1);
static BIN_ATTR_RW(activity, 1);
static BIN_ATTR_RO(cond_search_mask, 1);
static BIN_ATTR_RO(cond_search_polarity, 1);
static BIN_ATTR_RW(status_control, 1);
static struct bin_attribute *w1_f29_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
&bin_attr_activity,
&bin_attr_cond_search_mask,
&bin_attr_cond_search_polarity,
&bin_attr_status_control,
NULL,
};
static const struct attribute_group w1_f29_group = {
.bin_attrs = w1_f29_bin_attrs,
};
static const struct attribute_group *w1_f29_groups[] = {
&w1_f29_group,
NULL,
};
static const struct w1_family_ops w1_f29_fops = {
.add_slave = w1_f29_disable_test_mode,
.groups = w1_f29_groups,
};
static struct w1_family w1_family_29 = {
.fid = W1_FAMILY_DS2408,
.fops = &w1_f29_fops,
};
module_w1_family(w1_family_29);
MODULE_AUTHOR("Jean-Francois Dagenais <[email protected]>");
MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
| linux-master | drivers/w1/slaves/w1_ds2408.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 1-Wire implementation for the ds2780 chip
*
* Copyright (C) 2010 Indesign, LLC
*
* Author: Clifton Barnes <[email protected]>
*
* Based on w1-ds2760 driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/w1.h>
#include "w1_ds2780.h"
#define W1_FAMILY_DS2780 0x32
static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
size_t count, int io)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
if (addr > DS2780_DATA_SIZE || addr < 0)
return 0;
count = min_t(int, count, DS2780_DATA_SIZE - addr);
if (w1_reset_select_slave(sl) == 0) {
if (io) {
w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
w1_write_8(sl->master, addr);
w1_write_block(sl->master, buf, count);
} else {
w1_write_8(sl->master, W1_DS2780_READ_DATA);
w1_write_8(sl->master, addr);
count = w1_read_block(sl->master, buf, count);
}
}
return count;
}
int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
int io)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret;
if (!dev)
return -ENODEV;
mutex_lock(&sl->master->bus_mutex);
ret = w1_ds2780_do_io(dev, buf, addr, count, io);
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
EXPORT_SYMBOL(w1_ds2780_io);
int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
{
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
if (!dev)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl) == 0) {
w1_write_8(sl->master, cmd);
w1_write_8(sl->master, addr);
}
mutex_unlock(&sl->master->bus_mutex);
return 0;
}
EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
return w1_ds2780_io(dev, buf, off, count, 0);
}
static BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
static struct bin_attribute *w1_ds2780_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2780_group = {
.bin_attrs = w1_ds2780_bin_attrs,
};
static const struct attribute_group *w1_ds2780_groups[] = {
&w1_ds2780_group,
NULL,
};
static int w1_ds2780_add_slave(struct w1_slave *sl)
{
int ret;
struct platform_device *pdev;
pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
if (ret)
goto pdev_add_failed;
dev_set_drvdata(&sl->dev, pdev);
return 0;
pdev_add_failed:
platform_device_put(pdev);
return ret;
}
static void w1_ds2780_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
platform_device_unregister(pdev);
}
static const struct w1_family_ops w1_ds2780_fops = {
.add_slave = w1_ds2780_add_slave,
.remove_slave = w1_ds2780_remove_slave,
.groups = w1_ds2780_groups,
};
static struct w1_family w1_ds2780_family = {
.fid = W1_FAMILY_DS2780,
.fops = &w1_ds2780_fops,
};
module_w1_family(w1_ds2780_family);
MODULE_AUTHOR("Clifton Barnes <[email protected]>");
MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2780));
| linux-master | drivers/w1/slaves/w1_ds2780.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w1-gpio - GPIO w1 bus master driver
*
* Copyright (C) 2007 Ville Syrjala <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/w1.h>
static u8 w1_gpio_set_pullup(void *data, int delay)
{
struct w1_gpio_platform_data *pdata = data;
if (delay) {
pdata->pullup_duration = delay;
} else {
if (pdata->pullup_duration) {
/*
* This will OVERRIDE open drain emulation and force-pull
* the line high for some time.
*/
gpiod_set_raw_value(pdata->gpiod, 1);
msleep(pdata->pullup_duration);
/*
* This will simply set the line as input since we are doing
* open drain emulation in the GPIO library.
*/
gpiod_set_value(pdata->gpiod, 1);
}
pdata->pullup_duration = 0;
}
return 0;
}
static void w1_gpio_write_bit(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
gpiod_set_value(pdata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
return gpiod_get_value(pdata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id w1_gpio_dt_ids[] = {
{ .compatible = "w1-gpio" },
{}
};
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
/* Enforce open drain mode by default */
enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
if (of_have_populated_dt()) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
/*
* This parameter means that something else than the gpiolib has
* already set the line into open drain mode, so we should just
* driver it high/low like we are in full control of the line and
* open drain will happen transparently.
*/
if (of_property_present(np, "linux,open-drain"))
gflags = GPIOD_OUT_LOW;
pdev->dev.platform_data = pdata;
}
pdata = dev_get_platdata(dev);
if (!pdata) {
dev_err(dev, "No configuration data\n");
return -ENXIO;
}
master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master)
return -ENOMEM;
pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
if (IS_ERR(pdata->gpiod)) {
dev_err(dev, "gpio_request (pin) failed\n");
return PTR_ERR(pdata->gpiod);
}
pdata->pullup_gpiod =
devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
if (IS_ERR(pdata->pullup_gpiod)) {
dev_err(dev, "gpio_request_one "
"(ext_pullup_enable_pin) failed\n");
return PTR_ERR(pdata->pullup_gpiod);
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
gpiod_direction_output(pdata->gpiod, 1);
master->write_bit = w1_gpio_write_bit;
/*
* If we are using open drain emulation from the GPIO library,
* we need to use this pullup function that hammers the line
* high using a raw accessor to provide pull-up for the w1
* line.
*/
if (gflags == GPIOD_OUT_LOW_OPEN_DRAIN)
master->set_pullup = w1_gpio_set_pullup;
err = w1_add_master_device(master);
if (err) {
dev_err(dev, "w1_add_master device failed\n");
return err;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
if (pdata->pullup_gpiod)
gpiod_set_value(pdata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
return 0;
}
static int w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
if (pdata->pullup_gpiod)
gpiod_set_value(pdata->pullup_gpiod, 0);
w1_remove_master_device(master);
return 0;
}
static int __maybe_unused w1_gpio_suspend(struct device *dev)
{
struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
return 0;
}
static int __maybe_unused w1_gpio_resume(struct device *dev)
{
struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
return 0;
}
static SIMPLE_DEV_PM_OPS(w1_gpio_pm_ops, w1_gpio_suspend, w1_gpio_resume);
static struct platform_driver w1_gpio_driver = {
.driver = {
.name = "w1-gpio",
.pm = &w1_gpio_pm_ops,
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
.remove = w1_gpio_remove,
};
module_platform_driver(w1_gpio_driver);
MODULE_DESCRIPTION("GPIO w1 bus master driver");
MODULE_AUTHOR("Ville Syrjala <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/masters/w1-gpio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ds2482.c - provides i2c to w1-master bridge(s)
* Copyright (C) 2005 Ben Gardner <[email protected]>
*
* The DS2482 is a sensor chip made by Dallas Semiconductor (Maxim).
* It is a I2C to 1-wire bridge.
* There are two variations: -100 and -800, which have 1 or 8 1-wire ports.
* The complete datasheet can be obtained from MAXIM's website at:
* http://www.maxim-ic.com/quick_view2.cfm/qv_pk/4382
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/w1.h>
/*
* Allow the active pullup to be disabled, default is enabled.
*
* Note from the DS2482 datasheet:
* The APU bit controls whether an active pullup (controlled slew-rate
* transistor) or a passive pullup (Rwpu resistor) will be used to drive
* a 1-Wire line from low to high. When APU = 0, active pullup is disabled
* (resistor mode). Active Pullup should always be selected unless there is
* only a single slave on the 1-Wire line.
*/
static int ds2482_active_pullup = 1;
module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
"0-disable, 1-enable (default)");
/* extra configurations - e.g. 1WS */
static int extra_config;
module_param(extra_config, int, 0644);
MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
/*
* The DS2482 registers - there are 3 registers that are addressed by a read
* pointer. The read pointer is set by the last command executed.
*
* To read the data, issue a register read for any address
*/
#define DS2482_CMD_RESET 0xF0 /* No param */
#define DS2482_CMD_SET_READ_PTR 0xE1 /* Param: DS2482_PTR_CODE_xxx */
#define DS2482_CMD_CHANNEL_SELECT 0xC3 /* Param: Channel byte - DS2482-800 only */
#define DS2482_CMD_WRITE_CONFIG 0xD2 /* Param: Config byte */
#define DS2482_CMD_1WIRE_RESET 0xB4 /* Param: None */
#define DS2482_CMD_1WIRE_SINGLE_BIT 0x87 /* Param: Bit byte (bit7) */
#define DS2482_CMD_1WIRE_WRITE_BYTE 0xA5 /* Param: Data byte */
#define DS2482_CMD_1WIRE_READ_BYTE 0x96 /* Param: None */
/* Note to read the byte, Set the ReadPtr to Data then read (any addr) */
#define DS2482_CMD_1WIRE_TRIPLET 0x78 /* Param: Dir byte (bit7) */
/* Values for DS2482_CMD_SET_READ_PTR */
#define DS2482_PTR_CODE_STATUS 0xF0
#define DS2482_PTR_CODE_DATA 0xE1
#define DS2482_PTR_CODE_CHANNEL 0xD2 /* DS2482-800 only */
#define DS2482_PTR_CODE_CONFIG 0xC3
/*
* Configure Register bit definitions
* The top 4 bits always read 0.
* To write, the top nibble must be the 1's compl. of the low nibble.
*/
#define DS2482_REG_CFG_1WS 0x08 /* 1-wire speed */
#define DS2482_REG_CFG_SPU 0x04 /* strong pull-up */
#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/*
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
* To set the channel, write the value at the index of the channel.
* Read and compare against the corresponding value to verify the change.
*/
static const u8 ds2482_chan_wr[8] = { 0xF0, 0xE1, 0xD2, 0xC3, 0xB4, 0xA5, 0x96, 0x87 };
static const u8 ds2482_chan_rd[8] = { 0xB8, 0xB1, 0xAA, 0xA3, 0x9C, 0x95, 0x8E, 0x87 };
/*
* Status Register bit definitions (read only)
*/
#define DS2482_REG_STS_DIR 0x80
#define DS2482_REG_STS_TSB 0x40
#define DS2482_REG_STS_SBR 0x20
#define DS2482_REG_STS_RST 0x10
#define DS2482_REG_STS_LL 0x08
#define DS2482_REG_STS_SD 0x04
#define DS2482_REG_STS_PPD 0x02
#define DS2482_REG_STS_1WB 0x01
/*
* Client data (each client gets its own)
*/
struct ds2482_data;
struct ds2482_w1_chan {
struct ds2482_data *pdev;
u8 channel;
struct w1_bus_master w1_bm;
};
struct ds2482_data {
struct i2c_client *client;
struct mutex access_lock;
/* 1-wire interface(s) */
int w1_count; /* 1 or 8 */
struct ds2482_w1_chan w1_ch[8];
/* per-device values */
u8 channel;
u8 read_prt; /* see DS2482_PTR_CODE_xxx */
u8 reg_config;
};
/**
* ds2482_calculate_config - Helper to calculate values for configuration register
* @conf: the raw config value
* Return: the value w/ complements that can be written to register
*/
static inline u8 ds2482_calculate_config(u8 conf)
{
conf |= extra_config;
if (ds2482_active_pullup)
conf |= DS2482_REG_CFG_APU;
return conf | ((~conf & 0x0f) << 4);
}
/**
* ds2482_select_register - Sets the read pointer.
* @pdev: The ds2482 client pointer
* @read_ptr: see DS2482_PTR_CODE_xxx above
* Return: -1 on failure, 0 on success
*/
static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr)
{
if (pdev->read_prt != read_ptr) {
if (i2c_smbus_write_byte_data(pdev->client,
DS2482_CMD_SET_READ_PTR,
read_ptr) < 0)
return -1;
pdev->read_prt = read_ptr;
}
return 0;
}
/**
* ds2482_send_cmd - Sends a command without a parameter
* @pdev: The ds2482 client pointer
* @cmd: DS2482_CMD_RESET,
* DS2482_CMD_1WIRE_RESET,
* DS2482_CMD_1WIRE_READ_BYTE
* Return: -1 on failure, 0 on success
*/
static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd)
{
if (i2c_smbus_write_byte(pdev->client, cmd) < 0)
return -1;
pdev->read_prt = DS2482_PTR_CODE_STATUS;
return 0;
}
/**
* ds2482_send_cmd_data - Sends a command with a parameter
* @pdev: The ds2482 client pointer
* @cmd: DS2482_CMD_WRITE_CONFIG,
* DS2482_CMD_1WIRE_SINGLE_BIT,
* DS2482_CMD_1WIRE_WRITE_BYTE,
* DS2482_CMD_1WIRE_TRIPLET
* @byte: The data to send
* Return: -1 on failure, 0 on success
*/
static inline int ds2482_send_cmd_data(struct ds2482_data *pdev,
u8 cmd, u8 byte)
{
if (i2c_smbus_write_byte_data(pdev->client, cmd, byte) < 0)
return -1;
/* all cmds leave in STATUS, except CONFIG */
pdev->read_prt = (cmd != DS2482_CMD_WRITE_CONFIG) ?
DS2482_PTR_CODE_STATUS : DS2482_PTR_CODE_CONFIG;
return 0;
}
/*
* 1-Wire interface code
*/
#define DS2482_WAIT_IDLE_TIMEOUT 100
/**
* ds2482_wait_1wire_idle - Waits until the 1-wire interface is idle (not busy)
*
* @pdev: Pointer to the device structure
* Return: the last value read from status or -1 (failure)
*/
static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
{
int temp = -1;
int retries = 0;
if (!ds2482_select_register(pdev, DS2482_PTR_CODE_STATUS)) {
do {
temp = i2c_smbus_read_byte(pdev->client);
} while ((temp >= 0) && (temp & DS2482_REG_STS_1WB) &&
(++retries < DS2482_WAIT_IDLE_TIMEOUT));
}
if (retries >= DS2482_WAIT_IDLE_TIMEOUT)
pr_err("%s: timeout on channel %d\n",
__func__, pdev->channel);
return temp;
}
/**
* ds2482_set_channel - Selects a w1 channel.
* The 1-wire interface must be idle before calling this function.
*
* @pdev: The ds2482 client pointer
* @channel: 0-7
* Return: -1 (failure) or 0 (success)
*/
static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel)
{
if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_CHANNEL_SELECT,
ds2482_chan_wr[channel]) < 0)
return -1;
pdev->read_prt = DS2482_PTR_CODE_CHANNEL;
pdev->channel = -1;
if (i2c_smbus_read_byte(pdev->client) == ds2482_chan_rd[channel]) {
pdev->channel = channel;
return 0;
}
return -1;
}
/**
* ds2482_w1_touch_bit - Performs the touch-bit function, which writes a 0 or 1 and reads the level.
*
* @data: The ds2482 channel pointer
* @bit: The level to write: 0 or non-zero
* Return: The level read: 0 or 1
*/
static u8 ds2482_w1_touch_bit(void *data, u8 bit)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
int status = -1;
mutex_lock(&pdev->access_lock);
/* Select the channel */
ds2482_wait_1wire_idle(pdev);
if (pdev->w1_count > 1)
ds2482_set_channel(pdev, pchan->channel);
/* Send the touch command, wait until 1WB == 0, return the status */
if (!ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_SINGLE_BIT,
bit ? 0xFF : 0))
status = ds2482_wait_1wire_idle(pdev);
mutex_unlock(&pdev->access_lock);
return (status & DS2482_REG_STS_SBR) ? 1 : 0;
}
/**
* ds2482_w1_triplet - Performs the triplet function, which reads two bits and writes a bit.
* The bit written is determined by the two reads:
* 00 => dbit, 01 => 0, 10 => 1
*
* @data: The ds2482 channel pointer
* @dbit: The direction to choose if both branches are valid
* Return: b0=read1 b1=read2 b3=bit written
*/
static u8 ds2482_w1_triplet(void *data, u8 dbit)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
int status = (3 << 5);
mutex_lock(&pdev->access_lock);
/* Select the channel */
ds2482_wait_1wire_idle(pdev);
if (pdev->w1_count > 1)
ds2482_set_channel(pdev, pchan->channel);
/* Send the triplet command, wait until 1WB == 0, return the status */
if (!ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_TRIPLET,
dbit ? 0xFF : 0))
status = ds2482_wait_1wire_idle(pdev);
mutex_unlock(&pdev->access_lock);
/* Decode the status */
return (status >> 5);
}
/**
* ds2482_w1_write_byte - Performs the write byte function.
*
* @data: The ds2482 channel pointer
* @byte: The value to write
*/
static void ds2482_w1_write_byte(void *data, u8 byte)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
mutex_lock(&pdev->access_lock);
/* Select the channel */
ds2482_wait_1wire_idle(pdev);
if (pdev->w1_count > 1)
ds2482_set_channel(pdev, pchan->channel);
/* Send the write byte command */
ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_WRITE_BYTE, byte);
mutex_unlock(&pdev->access_lock);
}
/**
* ds2482_w1_read_byte - Performs the read byte function.
*
* @data: The ds2482 channel pointer
* Return: The value read
*/
static u8 ds2482_w1_read_byte(void *data)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
int result;
mutex_lock(&pdev->access_lock);
/* Select the channel */
ds2482_wait_1wire_idle(pdev);
if (pdev->w1_count > 1)
ds2482_set_channel(pdev, pchan->channel);
/* Send the read byte command */
ds2482_send_cmd(pdev, DS2482_CMD_1WIRE_READ_BYTE);
/* Wait until 1WB == 0 */
ds2482_wait_1wire_idle(pdev);
/* Select the data register */
ds2482_select_register(pdev, DS2482_PTR_CODE_DATA);
/* Read the data byte */
result = i2c_smbus_read_byte(pdev->client);
mutex_unlock(&pdev->access_lock);
return result;
}
/**
* ds2482_w1_reset_bus - Sends a reset on the 1-wire interface
*
* @data: The ds2482 channel pointer
* Return: 0=Device present, 1=No device present or error
*/
static u8 ds2482_w1_reset_bus(void *data)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
int err;
u8 retval = 1;
mutex_lock(&pdev->access_lock);
/* Select the channel */
ds2482_wait_1wire_idle(pdev);
if (pdev->w1_count > 1)
ds2482_set_channel(pdev, pchan->channel);
/* Send the reset command */
err = ds2482_send_cmd(pdev, DS2482_CMD_1WIRE_RESET);
if (err >= 0) {
/* Wait until the reset is complete */
err = ds2482_wait_1wire_idle(pdev);
retval = !(err & DS2482_REG_STS_PPD);
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
ds2482_calculate_config(0x00));
}
mutex_unlock(&pdev->access_lock);
return retval;
}
static u8 ds2482_w1_set_pullup(void *data, int delay)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
u8 retval = 1;
/* if delay is non-zero activate the pullup,
* the strong pullup will be automatically deactivated
* by the master, so do not explicitly deactive it
*/
if (delay) {
/* both waits are crucial, otherwise devices might not be
* powered long enough, causing e.g. a w1_therm sensor to
* provide wrong conversion results
*/
ds2482_wait_1wire_idle(pdev);
/* note: it seems like both SPU and APU have to be set! */
retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
ds2482_calculate_config(DS2482_REG_CFG_SPU |
DS2482_REG_CFG_APU));
ds2482_wait_1wire_idle(pdev);
}
return retval;
}
static int ds2482_probe(struct i2c_client *client)
{
struct ds2482_data *data;
int err = -ENODEV;
int temp1;
int idx;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
data->client = client;
i2c_set_clientdata(client, data);
/* Reset the device (sets the read_ptr to status) */
if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) {
dev_warn(&client->dev, "DS2482 reset failed.\n");
goto exit_free;
}
/* Sleep at least 525ns to allow the reset to complete */
ndelay(525);
/* Read the status byte - only reset bit and line should be set */
temp1 = i2c_smbus_read_byte(client);
if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) {
dev_warn(&client->dev, "DS2482 reset status "
"0x%02X - not a DS2482\n", temp1);
goto exit_free;
}
/* Detect the 8-port version */
data->w1_count = 1;
if (ds2482_set_channel(data, 7) == 0)
data->w1_count = 8;
/* Set all config items to 0 (off) */
ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
ds2482_calculate_config(0x00));
mutex_init(&data->access_lock);
/* Register 1-wire interface(s) */
for (idx = 0; idx < data->w1_count; idx++) {
data->w1_ch[idx].pdev = data;
data->w1_ch[idx].channel = idx;
/* Populate all the w1 bus master stuff */
data->w1_ch[idx].w1_bm.data = &data->w1_ch[idx];
data->w1_ch[idx].w1_bm.read_byte = ds2482_w1_read_byte;
data->w1_ch[idx].w1_bm.write_byte = ds2482_w1_write_byte;
data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit;
data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet;
data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus;
data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
if (err) {
data->w1_ch[idx].pdev = NULL;
goto exit_w1_remove;
}
}
return 0;
exit_w1_remove:
for (idx = 0; idx < data->w1_count; idx++) {
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
exit_free:
kfree(data);
exit:
return err;
}
static void ds2482_remove(struct i2c_client *client)
{
struct ds2482_data *data = i2c_get_clientdata(client);
int idx;
/* Unregister the 1-wire bridge(s) */
for (idx = 0; idx < data->w1_count; idx++) {
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
/* Free the memory */
kfree(data);
}
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id ds2482_id[] = {
{ "ds2482", 0 },
{ "ds2484", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds2482_id);
static struct i2c_driver ds2482_driver = {
.driver = {
.name = "ds2482",
},
.probe = ds2482_probe,
.remove = ds2482_remove,
.id_table = ds2482_id,
};
module_i2c_driver(ds2482_driver);
MODULE_AUTHOR("Ben Gardner <[email protected]>");
MODULE_DESCRIPTION("DS2482 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/masters/ds2482.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2005-2008 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Luotao Fu, [email protected]
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/w1.h>
/*
* MXC W1 Register offsets
*/
#define MXC_W1_CONTROL 0x00
# define MXC_W1_CONTROL_RDST BIT(3)
# define MXC_W1_CONTROL_WR(x) BIT(5 - (x))
# define MXC_W1_CONTROL_PST BIT(6)
# define MXC_W1_CONTROL_RPP BIT(7)
#define MXC_W1_TIME_DIVIDER 0x02
#define MXC_W1_RESET 0x04
# define MXC_W1_RESET_RST BIT(0)
struct mxc_w1_device {
void __iomem *regs;
struct clk *clk;
struct w1_bus_master bus_master;
};
/*
* this is the low level routine to
* reset the device on the One Wire interface
* on the hardware
*/
static u8 mxc_w1_ds2_reset_bus(void *data)
{
struct mxc_w1_device *dev = data;
ktime_t timeout;
writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
/* Wait for reset sequence 511+512us, use 1500us for sure */
timeout = ktime_add_us(ktime_get(), 1500);
udelay(511 + 512);
do {
u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
/* PST bit is valid after the RPP bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_RPP))
return !(ctrl & MXC_W1_CONTROL_PST);
} while (ktime_before(ktime_get(), timeout));
return 1;
}
/*
* this is the low level routine to read/write a bit on the One Wire
* interface on the hardware. It does write 0 if parameter bit is set
* to 0, otherwise a write 1/read.
*/
static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
{
struct mxc_w1_device *dev = data;
ktime_t timeout;
writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
/* Wait for read/write bit (60us, Max 120us), use 200us for sure */
timeout = ktime_add_us(ktime_get(), 200);
udelay(60);
do {
u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
/* RDST bit is valid after the WR1/RD bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
return !!(ctrl & MXC_W1_CONTROL_RDST);
} while (ktime_before(ktime_get(), timeout));
return 0;
}
static int mxc_w1_probe(struct platform_device *pdev)
{
struct mxc_w1_device *mdev;
unsigned long clkrate;
unsigned int clkdiv;
int err;
mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
err = clk_prepare_enable(mdev->clk);
if (err)
return err;
clkrate = clk_get_rate(mdev->clk);
if (clkrate < 10000000)
dev_warn(&pdev->dev,
"Low clock frequency causes improper function\n");
clkdiv = DIV_ROUND_CLOSEST(clkrate, 1000000);
clkrate /= clkdiv;
if ((clkrate < 980000) || (clkrate > 1020000))
dev_warn(&pdev->dev,
"Incorrect time base frequency %lu Hz\n", clkrate);
mdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->regs)) {
err = PTR_ERR(mdev->regs);
goto out_disable_clk;
}
/* Software reset 1-Wire module */
writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
writeb(0, mdev->regs + MXC_W1_RESET);
writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
platform_set_drvdata(pdev, mdev);
err = w1_add_master_device(&mdev->bus_master);
if (err)
goto out_disable_clk;
return 0;
out_disable_clk:
clk_disable_unprepare(mdev->clk);
return err;
}
/*
* disassociate the w1 device from the driver
*/
static int mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
w1_remove_master_device(&mdev->bus_master);
clk_disable_unprepare(mdev->clk);
return 0;
}
static const struct of_device_id mxc_w1_dt_ids[] = {
{ .compatible = "fsl,imx21-owire" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
static struct platform_driver mxc_w1_driver = {
.driver = {
.name = "mxc_w1",
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
.remove = mxc_w1_remove,
};
module_platform_driver(mxc_w1_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale Semiconductors Inc");
MODULE_DESCRIPTION("Driver for One-Wire on MXC");
| linux-master | drivers/w1/masters/mxc_w1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sgi_w1.c - w1 master driver for one wire support in SGI ASICs
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/platform_data/sgi-w1.h>
#include <linux/w1.h>
#define MCR_RD_DATA BIT(0)
#define MCR_DONE BIT(1)
#define MCR_PACK(pulse, sample) (((pulse) << 10) | ((sample) << 2))
struct sgi_w1_device {
u32 __iomem *mcr;
struct w1_bus_master bus_master;
char dev_id[64];
};
static u8 sgi_w1_wait(u32 __iomem *mcr)
{
u32 mcr_val;
do {
mcr_val = readl(mcr);
} while (!(mcr_val & MCR_DONE));
return (mcr_val & MCR_RD_DATA) ? 1 : 0;
}
/*
* this is the low level routine to
* reset the device on the One Wire interface
* on the hardware
*/
static u8 sgi_w1_reset_bus(void *data)
{
struct sgi_w1_device *dev = data;
u8 ret;
writel(MCR_PACK(520, 65), dev->mcr);
ret = sgi_w1_wait(dev->mcr);
udelay(500); /* recovery time */
return ret;
}
/*
* this is the low level routine to read/write a bit on the One Wire
* interface on the hardware. It does write 0 if parameter bit is set
* to 0, otherwise a write 1/read.
*/
static u8 sgi_w1_touch_bit(void *data, u8 bit)
{
struct sgi_w1_device *dev = data;
u8 ret;
if (bit)
writel(MCR_PACK(6, 13), dev->mcr);
else
writel(MCR_PACK(80, 30), dev->mcr);
ret = sgi_w1_wait(dev->mcr);
if (bit)
udelay(100); /* recovery */
return ret;
}
static int sgi_w1_probe(struct platform_device *pdev)
{
struct sgi_w1_device *sdev;
struct sgi_w1_platform_data *pdata;
sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
sdev->mcr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mcr))
return PTR_ERR(sdev->mcr);
sdev->bus_master.data = sdev;
sdev->bus_master.reset_bus = sgi_w1_reset_bus;
sdev->bus_master.touch_bit = sgi_w1_touch_bit;
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
strscpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
sdev->bus_master.dev_id = sdev->dev_id;
}
platform_set_drvdata(pdev, sdev);
return w1_add_master_device(&sdev->bus_master);
}
/*
* disassociate the w1 device from the driver
*/
static int sgi_w1_remove(struct platform_device *pdev)
{
struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
w1_remove_master_device(&sdev->bus_master);
return 0;
}
static struct platform_driver sgi_w1_driver = {
.driver = {
.name = "sgi_w1",
},
.probe = sgi_w1_probe,
.remove = sgi_w1_remove,
};
module_platform_driver(sgi_w1_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Bogendoerfer");
MODULE_DESCRIPTION("Driver for One-Wire IP in SGI ASICs");
| linux-master | drivers/w1/masters/sgi_w1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* matrox_w1.c
*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <asm/types.h>
#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/w1.h>
/*
* Matrox G400 DDC registers.
*/
#define MATROX_G400_DDC_CLK (1<<4)
#define MATROX_G400_DDC_DATA (1<<1)
#define MATROX_BASE 0x3C00
#define MATROX_STATUS 0x1e14
#define MATROX_PORT_INDEX_OFFSET 0x00
#define MATROX_PORT_DATA_OFFSET 0x0A
#define MATROX_GET_CONTROL 0x2A
#define MATROX_GET_DATA 0x2B
#define MATROX_CURSOR_CTL 0x06
struct matrox_device {
void __iomem *base_addr;
void __iomem *port_index;
void __iomem *port_data;
u8 data_mask;
unsigned long phys_addr;
void __iomem *virt_addr;
unsigned long found;
struct w1_bus_master *bus_master;
};
/*
* These functions read and write DDC Data bit.
*
* Using tristate pins, since i can't find any open-drain pin in whole motherboard.
* Unfortunately we can't connect to Intel's 82801xx IO controller
* since we don't know motherboard schema, which has pretty unused(may be not) GPIO.
*
* I've heard that PIIX also has open drain pin.
*
* Port mapping.
*/
static inline u8 matrox_w1_read_reg(struct matrox_device *dev, u8 reg)
{
u8 ret;
writeb(reg, dev->port_index);
ret = readb(dev->port_data);
barrier();
return ret;
}
static inline void matrox_w1_write_reg(struct matrox_device *dev, u8 reg, u8 val)
{
writeb(reg, dev->port_index);
writeb(val, dev->port_data);
wmb();
}
static void matrox_w1_write_ddc_bit(void *data, u8 bit)
{
u8 ret;
struct matrox_device *dev = data;
if (bit)
bit = 0;
else
bit = dev->data_mask;
ret = matrox_w1_read_reg(dev, MATROX_GET_CONTROL);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, ((ret & ~dev->data_mask) | bit));
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0x00);
}
static u8 matrox_w1_read_ddc_bit(void *data)
{
u8 ret;
struct matrox_device *dev = data;
ret = matrox_w1_read_reg(dev, MATROX_GET_DATA);
return ret;
}
static void matrox_w1_hw_init(struct matrox_device *dev)
{
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0xFF);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, 0x00);
}
static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct matrox_device *dev;
int err;
if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
return -ENODEV;
dev = kzalloc(sizeof(struct matrox_device) +
sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->bus_master = (struct w1_bus_master *)(dev + 1);
/*
* True for G400, for some other we need resource 0, see drivers/video/matrox/matroxfb_base.c
*/
dev->phys_addr = pci_resource_start(pdev, 1);
dev->virt_addr = ioremap(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
err = -EIO;
goto err_out_free_device;
}
dev->base_addr = dev->virt_addr + MATROX_BASE;
dev->port_index = dev->base_addr + MATROX_PORT_INDEX_OFFSET;
dev->port_data = dev->base_addr + MATROX_PORT_DATA_OFFSET;
dev->data_mask = (MATROX_G400_DDC_DATA);
matrox_w1_hw_init(dev);
dev->bus_master->data = dev;
dev->bus_master->read_bit = &matrox_w1_read_ddc_bit;
dev->bus_master->write_bit = &matrox_w1_write_ddc_bit;
err = w1_add_master_device(dev->bus_master);
if (err)
goto err_out_free_device;
pci_set_drvdata(pdev, dev);
dev->found = 1;
dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n");
return 0;
err_out_free_device:
if (dev->virt_addr)
iounmap(dev->virt_addr);
kfree(dev);
return err;
}
static void matrox_w1_remove(struct pci_dev *pdev)
{
struct matrox_device *dev = pci_get_drvdata(pdev);
if (dev->found) {
w1_remove_master_device(dev->bus_master);
iounmap(dev->virt_addr);
}
kfree(dev);
}
static struct pci_device_id matrox_w1_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
{ },
};
MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
static struct pci_driver matrox_w1_pci_driver = {
.name = "matrox_w1",
.id_table = matrox_w1_tbl,
.probe = matrox_w1_probe,
.remove = matrox_w1_remove,
};
module_pci_driver(matrox_w1_pci_driver);
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/masters/matrox_w1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ds2490.c USB to one wire bridge
*
* Copyright (c) 2004 Evgeniy Polyakov <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/w1.h>
/* USB Standard */
/* USB Control request vendor type */
#define VENDOR 0x40
/* COMMAND TYPE CODES */
#define CONTROL_CMD 0x00
#define COMM_CMD 0x01
#define MODE_CMD 0x02
/* CONTROL COMMAND CODES */
#define CTL_RESET_DEVICE 0x0000
#define CTL_START_EXE 0x0001
#define CTL_RESUME_EXE 0x0002
#define CTL_HALT_EXE_IDLE 0x0003
#define CTL_HALT_EXE_DONE 0x0004
#define CTL_FLUSH_COMM_CMDS 0x0007
#define CTL_FLUSH_RCV_BUFFER 0x0008
#define CTL_FLUSH_XMT_BUFFER 0x0009
#define CTL_GET_COMM_CMDS 0x000A
/* MODE COMMAND CODES */
#define MOD_PULSE_EN 0x0000
#define MOD_SPEED_CHANGE_EN 0x0001
#define MOD_1WIRE_SPEED 0x0002
#define MOD_STRONG_PU_DURATION 0x0003
#define MOD_PULLDOWN_SLEWRATE 0x0004
#define MOD_PROG_PULSE_DURATION 0x0005
#define MOD_WRITE1_LOWTIME 0x0006
#define MOD_DSOW0_TREC 0x0007
/* COMMUNICATION COMMAND CODES */
#define COMM_ERROR_ESCAPE 0x0601
#define COMM_SET_DURATION 0x0012
#define COMM_BIT_IO 0x0020
#define COMM_PULSE 0x0030
#define COMM_1_WIRE_RESET 0x0042
#define COMM_BYTE_IO 0x0052
#define COMM_MATCH_ACCESS 0x0064
#define COMM_BLOCK_IO 0x0074
#define COMM_READ_STRAIGHT 0x0080
#define COMM_DO_RELEASE 0x6092
#define COMM_SET_PATH 0x00A2
#define COMM_WRITE_SRAM_PAGE 0x00B2
#define COMM_WRITE_EPROM 0x00C4
#define COMM_READ_CRC_PROT_PAGE 0x00D4
#define COMM_READ_REDIRECT_PAGE_CRC 0x21E4
#define COMM_SEARCH_ACCESS 0x00F4
/* Communication command bits */
#define COMM_TYPE 0x0008
#define COMM_SE 0x0008
#define COMM_D 0x0008
#define COMM_Z 0x0008
#define COMM_CH 0x0008
#define COMM_SM 0x0008
#define COMM_R 0x0008
#define COMM_IM 0x0001
#define COMM_PS 0x4000
#define COMM_PST 0x4000
#define COMM_CIB 0x4000
#define COMM_RTS 0x4000
#define COMM_DT 0x2000
#define COMM_SPU 0x1000
#define COMM_F 0x0800
#define COMM_NTF 0x0400
#define COMM_ICP 0x0200
#define COMM_RST 0x0100
#define PULSE_PROG 0x01
#define PULSE_SPUE 0x02
#define BRANCH_MAIN 0xCC
#define BRANCH_AUX 0x33
/* Status flags */
#define ST_SPUA 0x01 /* Strong Pull-up is active */
#define ST_PRGA 0x02 /* 12V programming pulse is being generated */
#define ST_12VP 0x04 /* external 12V programming voltage is present */
#define ST_PMOD 0x08 /* DS2490 powered from USB and external sources */
#define ST_HALT 0x10 /* DS2490 is currently halted */
#define ST_IDLE 0x20 /* DS2490 is currently idle */
#define ST_EPOF 0x80
/* Status transfer size, 16 bytes status, 16 byte result flags */
#define ST_SIZE 0x20
/* Result Register flags */
#define RR_DETECT 0xA5 /* New device detected */
#define RR_NRS 0x01 /* Reset no presence or ... */
#define RR_SH 0x02 /* short on reset or set path */
#define RR_APP 0x04 /* alarming presence on reset */
#define RR_VPP 0x08 /* 12V expected not seen */
#define RR_CMP 0x10 /* compare error */
#define RR_CRC 0x20 /* CRC error detected */
#define RR_RDP 0x40 /* redirected page */
#define RR_EOS 0x80 /* end of search error */
#define SPEED_NORMAL 0x00
#define SPEED_FLEXIBLE 0x01
#define SPEED_OVERDRIVE 0x02
#define NUM_EP 4
#define EP_CONTROL 0
#define EP_STATUS 1
#define EP_DATA_OUT 2
#define EP_DATA_IN 3
struct ds_device {
struct list_head ds_entry;
struct usb_device *udev;
struct usb_interface *intf;
int ep[NUM_EP];
/* Strong PullUp
* 0: pullup not active, else duration in milliseconds
*/
int spu_sleep;
/* spu_bit contains COMM_SPU or 0 depending on if the strong pullup
* should be active or not for writes.
*/
u16 spu_bit;
u8 st_buf[ST_SIZE];
u8 byte_buf;
struct w1_bus_master master;
};
struct ds_status {
u8 enable;
u8 speed;
u8 pullup_dur;
u8 ppuls_dur;
u8 pulldown_slew;
u8 write1_time;
u8 write0_time;
u8 reserved0;
u8 status;
u8 command0;
u8 command1;
u8 command_buffer_status;
u8 data_out_buffer_status;
u8 data_in_buffer_status;
u8 reserved1;
u8 reserved2;
};
static LIST_HEAD(ds_devices);
static DEFINE_MUTEX(ds_mutex);
static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
dev_err(&dev->udev->dev,
"Failed to send command control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
dev_err(&dev->udev->dev,
"Failed to send mode control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
{
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
dev_err(&dev->udev->dev,
"Failed to send control message %x.%x: err=%d.\n",
value, index, err);
return err;
}
return err;
}
static void ds_dump_status(struct ds_device *ds_dev, unsigned char *buf, int count)
{
struct device *dev = &ds_dev->udev->dev;
int i;
dev_info(dev, "ep_status=0x%x, count=%d, status=%*phC",
ds_dev->ep[EP_STATUS], count, count, buf);
if (count >= 16) {
dev_dbg(dev, "enable flag: 0x%02x", buf[0]);
dev_dbg(dev, "1-wire speed: 0x%02x", buf[1]);
dev_dbg(dev, "strong pullup duration: 0x%02x", buf[2]);
dev_dbg(dev, "programming pulse duration: 0x%02x", buf[3]);
dev_dbg(dev, "pulldown slew rate control: 0x%02x", buf[4]);
dev_dbg(dev, "write-1 low time: 0x%02x", buf[5]);
dev_dbg(dev, "data sample offset/write-0 recovery time: 0x%02x", buf[6]);
dev_dbg(dev, "reserved (test register): 0x%02x", buf[7]);
dev_dbg(dev, "device status flags: 0x%02x", buf[8]);
dev_dbg(dev, "communication command byte 1: 0x%02x", buf[9]);
dev_dbg(dev, "communication command byte 2: 0x%02x", buf[10]);
dev_dbg(dev, "communication command buffer status: 0x%02x", buf[11]);
dev_dbg(dev, "1-wire data output buffer status: 0x%02x", buf[12]);
dev_dbg(dev, "1-wire data input buffer status: 0x%02x", buf[13]);
dev_dbg(dev, "reserved: 0x%02x", buf[14]);
dev_dbg(dev, "reserved: 0x%02x", buf[15]);
}
for (i = 16; i < count; ++i) {
if (buf[i] == RR_DETECT) {
dev_dbg(dev, "New device detect.\n");
continue;
}
dev_dbg(dev, "Result Register Value: 0x%02x", buf[i]);
if (buf[i] & RR_NRS)
dev_dbg(dev, "NRS: Reset no presence or ...\n");
if (buf[i] & RR_SH)
dev_dbg(dev, "SH: short on reset or set path\n");
if (buf[i] & RR_APP)
dev_dbg(dev, "APP: alarming presence on reset\n");
if (buf[i] & RR_VPP)
dev_dbg(dev, "VPP: 12V expected not seen\n");
if (buf[i] & RR_CMP)
dev_dbg(dev, "CMP: compare error\n");
if (buf[i] & RR_CRC)
dev_dbg(dev, "CRC: CRC error detected\n");
if (buf[i] & RR_RDP)
dev_dbg(dev, "RDP: redirected page\n");
if (buf[i] & RR_EOS)
dev_dbg(dev, "EOS: end of search error\n");
}
}
static int ds_recv_status(struct ds_device *dev, struct ds_status *st)
{
int count, err;
if (st)
memset(st, 0, sizeof(*st));
count = 0;
err = usb_interrupt_msg(dev->udev,
usb_rcvintpipe(dev->udev,
dev->ep[EP_STATUS]),
dev->st_buf, sizeof(dev->st_buf),
&count, 1000);
if (err < 0) {
dev_err(&dev->udev->dev,
"Failed to read 1-wire data from 0x%x: err=%d.\n",
dev->ep[EP_STATUS], err);
return err;
}
if (st && count >= sizeof(*st))
memcpy(st, dev->st_buf, sizeof(*st));
return count;
}
static void ds_reset_device(struct ds_device *dev)
{
ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
/* Always allow strong pullup which allow individual writes to use
* the strong pullup.
*/
if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
dev_err(&dev->udev->dev,
"%s: Error allowing strong pullup\n", __func__);
/* Chip strong pullup time was cleared. */
if (dev->spu_sleep) {
/* lower 4 bits are 0, see ds_set_pullup */
u8 del = dev->spu_sleep>>4;
if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
dev_err(&dev->udev->dev,
"%s: Error setting duration\n", __func__);
}
}
static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
{
int count, err;
/* Careful on size. If size is less than what is available in
* the input buffer, the device fails the bulk transfer and
* clears the input buffer. It could read the maximum size of
* the data buffer, but then do you return the first, last, or
* some set of the middle size bytes? As long as the rest of
* the code is correct there will be size bytes waiting. A
* call to ds_wait_status will wait until the device is idle
* and any data to be received would have been available.
*/
count = 0;
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
int recv_len;
dev_info(&dev->udev->dev, "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
/* status might tell us why endpoint is stuck? */
recv_len = ds_recv_status(dev, NULL);
if (recv_len >= 0)
ds_dump_status(dev, dev->st_buf, recv_len);
return err;
}
#if 0
{
int i;
printk("%s: count=%d: ", __func__, count);
for (i = 0; i < count; ++i)
printk("%02x ", buf[i]);
printk("\n");
}
#endif
return count;
}
static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
{
int count, err;
count = 0;
err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
if (err < 0) {
dev_err(&dev->udev->dev, "Failed to write 1-wire data to ep0x%x: "
"err=%d.\n", dev->ep[EP_DATA_OUT], err);
return err;
}
return err;
}
#if 0
int ds_stop_pulse(struct ds_device *dev, int limit)
{
struct ds_status st;
int count = 0, err = 0;
do {
err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
if (err)
break;
err = ds_send_control(dev, CTL_RESUME_EXE, 0);
if (err)
break;
err = ds_recv_status(dev, &st);
if (err)
break;
if ((st.status & ST_SPUA) == 0) {
err = ds_send_control_mode(dev, MOD_PULSE_EN, 0);
if (err)
break;
}
} while (++count < limit);
return err;
}
int ds_detect(struct ds_device *dev, struct ds_status *st)
{
int err;
err = ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
if (err)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, 0);
if (err)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM | COMM_TYPE, 0x40);
if (err)
return err;
err = ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_PROG);
if (err)
return err;
err = ds_dump_status(dev, st);
return err;
}
#endif /* 0 */
static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
{
int err, count = 0;
do {
st->status = 0;
err = ds_recv_status(dev, st);
#if 0
if (err >= 0) {
int i;
printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
for (i = 0; i < err; ++i)
printk("%02x ", dev->st_buf[i]);
printk("\n");
}
#endif
} while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
dev_info(&dev->udev->dev, "Resetting device after ST_EPOF.\n");
ds_reset_device(dev);
/* Always dump the device status. */
count = 101;
}
/* Dump the status for errors or if there is extended return data.
* The extended status includes new device detection (maybe someone
* can do something with it).
*/
if (err > 16 || count >= 100 || err < 0)
ds_dump_status(dev, dev->st_buf, err);
/* Extended data isn't an error. Well, a short is, but the dump
* would have already told the user that and we can't do anything
* about it in software anyway.
*/
if (count >= 100 || err < 0)
return -1;
else
return 0;
}
static int ds_reset(struct ds_device *dev)
{
int err;
/* Other potentionally interesting flags for reset.
*
* COMM_NTF: Return result register feedback. This could be used to
* detect some conditions such as short, alarming presence, or
* detect if a new device was detected.
*
* COMM_SE which allows SPEED_NORMAL, SPEED_FLEXIBLE, SPEED_OVERDRIVE:
* Select the data transfer rate.
*/
err = ds_send_control(dev, COMM_1_WIRE_RESET | COMM_IM, SPEED_NORMAL);
if (err)
return err;
return 0;
}
#if 0
static int ds_set_speed(struct ds_device *dev, int speed)
{
int err;
if (speed != SPEED_NORMAL && speed != SPEED_FLEXIBLE && speed != SPEED_OVERDRIVE)
return -EINVAL;
if (speed != SPEED_OVERDRIVE)
speed = SPEED_FLEXIBLE;
speed &= 0xff;
err = ds_send_control_mode(dev, MOD_1WIRE_SPEED, speed);
if (err)
return err;
return err;
}
#endif /* 0 */
static int ds_set_pullup(struct ds_device *dev, int delay)
{
int err = 0;
u8 del = 1 + (u8)(delay >> 4);
/* Just storing delay would not get the trunication and roundup. */
int ms = del<<4;
/* Enable spu_bit if a delay is set. */
dev->spu_bit = delay ? COMM_SPU : 0;
/* If delay is zero, it has already been disabled, if the time is
* the same as the hardware was last programmed to, there is also
* nothing more to do. Compare with the recalculated value ms
* rather than del or delay which can have a different value.
*/
if (delay == 0 || ms == dev->spu_sleep)
return err;
err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del);
if (err)
return err;
dev->spu_sleep = ms;
return err;
}
static int ds_touch_bit(struct ds_device *dev, u8 bit, u8 *tbit)
{
int err;
struct ds_status st;
err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | (bit ? COMM_D : 0),
0);
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_recv_data(dev, tbit, sizeof(*tbit));
if (err < 0)
return err;
return 0;
}
#if 0
static int ds_write_bit(struct ds_device *dev, u8 bit)
{
int err;
struct ds_status st;
/* Set COMM_ICP to write without a readback. Note, this will
* produce one time slot, a down followed by an up with COMM_D
* only determing the timing.
*/
err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | COMM_ICP |
(bit ? COMM_D : 0), 0);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
#endif
static int ds_write_byte(struct ds_device *dev, u8 byte)
{
int err;
struct ds_status st;
err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
if (err)
return err;
if (dev->spu_bit)
msleep(dev->spu_sleep);
err = ds_wait_status(dev, &st);
if (err)
return err;
err = ds_recv_data(dev, &dev->byte_buf, 1);
if (err < 0)
return err;
return !(byte == dev->byte_buf);
}
static int ds_read_byte(struct ds_device *dev, u8 *byte)
{
int err;
struct ds_status st;
err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM, 0xff);
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_recv_data(dev, byte, sizeof(*byte));
if (err < 0)
return err;
return 0;
}
static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
{
struct ds_status st;
int err;
if (len > 64*1024)
return -E2BIG;
memset(buf, 0xFF, len);
err = ds_send_data(dev, buf, len);
if (err < 0)
return err;
err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM, len);
if (err)
return err;
ds_wait_status(dev, &st);
memset(buf, 0x00, len);
err = ds_recv_data(dev, buf, len);
return err;
}
static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
{
int err;
struct ds_status st;
err = ds_send_data(dev, buf, len);
if (err < 0)
return err;
err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM | dev->spu_bit, len);
if (err)
return err;
if (dev->spu_bit)
msleep(dev->spu_sleep);
ds_wait_status(dev, &st);
err = ds_recv_data(dev, buf, len);
if (err < 0)
return err;
return !(err == len);
}
static void ds9490r_search(void *data, struct w1_master *master,
u8 search_type, w1_slave_found_callback callback)
{
/* When starting with an existing id, the first id returned will
* be that device (if it is still on the bus most likely).
*
* If the number of devices found is less than or equal to the
* search_limit, that number of IDs will be returned. If there are
* more, search_limit IDs will be returned followed by a non-zero
* discrepency value.
*/
struct ds_device *dev = data;
int err;
u16 value, index;
struct ds_status st;
int search_limit;
int found = 0;
int i;
/* DS18b20 spec, 13.16 ms per device, 75 per second, sleep for
* discovering 8 devices (1 bulk transfer and 1/2 FIFO size) at a time.
*/
const unsigned long jtime = msecs_to_jiffies(1000*8/75);
/* FIFO 128 bytes, bulk packet size 64, read a multiple of the
* packet size.
*/
const size_t bufsize = 2 * 64;
u64 *buf, *found_ids;
buf = kmalloc(bufsize, GFP_KERNEL);
if (!buf)
return;
/*
* We are holding the bus mutex during the scan, but adding devices via the
* callback needs the bus to be unlocked. So we queue up found ids here.
*/
found_ids = kmalloc_array(master->max_slave_count, sizeof(u64), GFP_KERNEL);
if (!found_ids) {
kfree(buf);
return;
}
mutex_lock(&master->bus_mutex);
/* address to start searching at */
if (ds_send_data(dev, (u8 *)&master->search_id, 8) < 0)
goto search_out;
master->search_id = 0;
value = COMM_SEARCH_ACCESS | COMM_IM | COMM_RST | COMM_SM | COMM_F |
COMM_RTS;
search_limit = master->max_slave_count;
if (search_limit > 255)
search_limit = 0;
index = search_type | (search_limit << 8);
if (ds_send_control(dev, value, index) < 0)
goto search_out;
do {
schedule_timeout(jtime);
err = ds_recv_status(dev, &st);
if (err < 0 || err < sizeof(st))
break;
if (st.data_in_buffer_status) {
/*
* Bulk in can receive partial ids, but when it does
* they fail crc and will be discarded anyway.
* That has only been seen when status in buffer
* is 0 and bulk is read anyway, so don't read
* bulk without first checking if status says there
* is data to read.
*/
err = ds_recv_data(dev, (u8 *)buf, bufsize);
if (err < 0)
break;
for (i = 0; i < err/8; ++i) {
found_ids[found++] = buf[i];
/*
* can't know if there will be a discrepancy
* value after until the next id
*/
if (found == search_limit) {
master->search_id = buf[i];
break;
}
}
}
if (test_bit(W1_ABORT_SEARCH, &master->flags))
break;
} while (!(st.status & (ST_IDLE | ST_HALT)));
/* only continue the search if some weren't found */
if (found <= search_limit) {
master->search_id = 0;
} else if (!test_bit(W1_WARN_MAX_COUNT, &master->flags)) {
/*
* Only max_slave_count will be scanned in a search,
* but it will start where it left off next search
* until all ids are identified and then it will start
* over. A continued search will report the previous
* last id as the first id (provided it is still on the
* bus).
*/
dev_info(&dev->udev->dev, "%s: max_slave_count %d reached, "
"will continue next search.\n", __func__,
master->max_slave_count);
set_bit(W1_WARN_MAX_COUNT, &master->flags);
}
search_out:
mutex_unlock(&master->bus_mutex);
kfree(buf);
for (i = 0; i < found; i++) /* run callback for all queued up IDs */
callback(master, found_ids[i]);
kfree(found_ids);
}
#if 0
/*
* FIXME: if this disabled code is ever used in the future all ds_send_data()
* calls must be changed to use a DMAable buffer.
*/
static int ds_match_access(struct ds_device *dev, u64 init)
{
int err;
struct ds_status st;
err = ds_send_data(dev, (unsigned char *)&init, sizeof(init));
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_send_control(dev, COMM_MATCH_ACCESS | COMM_IM | COMM_RST, 0x0055);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
static int ds_set_path(struct ds_device *dev, u64 init)
{
int err;
struct ds_status st;
u8 buf[9];
memcpy(buf, &init, 8);
buf[8] = BRANCH_MAIN;
err = ds_send_data(dev, buf, sizeof(buf));
if (err)
return err;
ds_wait_status(dev, &st);
err = ds_send_control(dev, COMM_SET_PATH | COMM_IM | COMM_RST, 0);
if (err)
return err;
ds_wait_status(dev, &st);
return 0;
}
#endif /* 0 */
static u8 ds9490r_touch_bit(void *data, u8 bit)
{
struct ds_device *dev = data;
if (ds_touch_bit(dev, bit, &dev->byte_buf))
return 0;
return dev->byte_buf;
}
#if 0
static void ds9490r_write_bit(void *data, u8 bit)
{
struct ds_device *dev = data;
ds_write_bit(dev, bit);
}
static u8 ds9490r_read_bit(void *data)
{
struct ds_device *dev = data;
int err;
err = ds_touch_bit(dev, 1, &dev->byte_buf);
if (err)
return 0;
return dev->byte_buf & 1;
}
#endif
static void ds9490r_write_byte(void *data, u8 byte)
{
struct ds_device *dev = data;
ds_write_byte(dev, byte);
}
static u8 ds9490r_read_byte(void *data)
{
struct ds_device *dev = data;
int err;
err = ds_read_byte(dev, &dev->byte_buf);
if (err)
return 0;
return dev->byte_buf;
}
static void ds9490r_write_block(void *data, const u8 *buf, int len)
{
struct ds_device *dev = data;
u8 *tbuf;
if (len <= 0)
return;
tbuf = kmemdup(buf, len, GFP_KERNEL);
if (!tbuf)
return;
ds_write_block(dev, tbuf, len);
kfree(tbuf);
}
static u8 ds9490r_read_block(void *data, u8 *buf, int len)
{
struct ds_device *dev = data;
int err;
u8 *tbuf;
if (len <= 0)
return 0;
tbuf = kmalloc(len, GFP_KERNEL);
if (!tbuf)
return 0;
err = ds_read_block(dev, tbuf, len);
if (err >= 0)
memcpy(buf, tbuf, len);
kfree(tbuf);
return err >= 0 ? len : 0;
}
static u8 ds9490r_reset(void *data)
{
struct ds_device *dev = data;
int err;
err = ds_reset(dev);
if (err)
return 1;
return 0;
}
static u8 ds9490r_set_pullup(void *data, int delay)
{
struct ds_device *dev = data;
if (ds_set_pullup(dev, delay))
return 1;
return 0;
}
static int ds_w1_init(struct ds_device *dev)
{
memset(&dev->master, 0, sizeof(struct w1_bus_master));
/* Reset the device as it can be in a bad state.
* This is necessary because a block write will wait for data
* to be placed in the output buffer and block any later
* commands which will keep accumulating and the device will
* not be idle. Another case is removing the ds2490 module
* while a bus search is in progress, somehow a few commands
* get through, but the input transfers fail leaving data in
* the input buffer. This will cause the next read to fail
* see the note in ds_recv_data.
*/
ds_reset_device(dev);
dev->master.data = dev;
dev->master.touch_bit = &ds9490r_touch_bit;
/* read_bit and write_bit in w1_bus_master are expected to set and
* sample the line level. For write_bit that means it is expected to
* set it to that value and leave it there. ds2490 only supports an
* individual time slot at the lowest level. The requirement from
* pulling the bus state down to reading the state is 15us, something
* that isn't realistic on the USB bus anyway.
dev->master.read_bit = &ds9490r_read_bit;
dev->master.write_bit = &ds9490r_write_bit;
*/
dev->master.read_byte = &ds9490r_read_byte;
dev->master.write_byte = &ds9490r_write_byte;
dev->master.read_block = &ds9490r_read_block;
dev->master.write_block = &ds9490r_write_block;
dev->master.reset_bus = &ds9490r_reset;
dev->master.set_pullup = &ds9490r_set_pullup;
dev->master.search = &ds9490r_search;
return w1_add_master_device(&dev->master);
}
static void ds_w1_fini(struct ds_device *dev)
{
w1_remove_master_device(&dev->master);
}
static int ds_probe(struct usb_interface *intf,
const struct usb_device_id *udev_id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_desc;
struct ds_device *dev;
int i, err, alt;
dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
goto err_out_free;
}
memset(dev->ep, 0, sizeof(dev->ep));
usb_set_intfdata(intf, dev);
err = usb_reset_configuration(dev->udev);
if (err) {
dev_err(&dev->udev->dev,
"Failed to reset configuration: err=%d.\n", err);
goto err_out_clear;
}
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
alt = 3;
err = usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber, alt);
if (err) {
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
"for %d interface: err=%d.\n", alt,
intf->cur_altsetting->desc.bInterfaceNumber, err);
goto err_out_clear;
}
iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
dev_err(&dev->udev->dev, "Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
err = -EINVAL;
goto err_out_clear;
}
/*
* This loop doesn'd show control 0 endpoint,
* so we will fill only 1-3 endpoints entry.
*/
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
dev->ep[i+1] = endpoint->bEndpointAddress;
#if 0
printk("%d: addr=%x, size=%d, dir=%s, type=%x\n",
i, endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize),
(endpoint->bEndpointAddress & USB_DIR_IN)?"IN":"OUT",
endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
#endif
}
err = ds_w1_init(dev);
if (err)
goto err_out_clear;
mutex_lock(&ds_mutex);
list_add_tail(&dev->ds_entry, &ds_devices);
mutex_unlock(&ds_mutex);
return 0;
err_out_clear:
usb_set_intfdata(intf, NULL);
usb_put_dev(dev->udev);
err_out_free:
kfree(dev);
return err;
}
static void ds_disconnect(struct usb_interface *intf)
{
struct ds_device *dev;
dev = usb_get_intfdata(intf);
if (!dev)
return;
mutex_lock(&ds_mutex);
list_del(&dev->ds_entry);
mutex_unlock(&ds_mutex);
ds_w1_fini(dev);
usb_set_intfdata(intf, NULL);
usb_put_dev(dev->udev);
kfree(dev);
}
static const struct usb_device_id ds_id_table[] = {
{ USB_DEVICE(0x04fa, 0x2490) },
{ },
};
MODULE_DEVICE_TABLE(usb, ds_id_table);
static struct usb_driver ds_driver = {
.name = "DS9490R",
.probe = ds_probe,
.disconnect = ds_disconnect,
.id_table = ds_id_table,
};
module_usb_driver(ds_driver);
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/masters/ds2490.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007,2012 Texas Instruments, Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/w1.h>
#define MOD_NAME "OMAP_HDQ:"
#define OMAP_HDQ_REVISION 0x00
#define OMAP_HDQ_TX_DATA 0x04
#define OMAP_HDQ_RX_DATA 0x08
#define OMAP_HDQ_CTRL_STATUS 0x0c
#define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
#define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
#define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
#define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
#define OMAP_HDQ_INT_STATUS 0x10
#define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
#define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
#define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
#define OMAP_HDQ_FLAG_CLEAR 0
#define OMAP_HDQ_FLAG_SET 1
#define OMAP_HDQ_TIMEOUT (HZ/5)
#define OMAP_HDQ_MAX_USER 4
static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
static int w1_id;
module_param(w1_id, int, 0400);
MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
struct hdq_data {
struct device *dev;
void __iomem *hdq_base;
/* lock read/write/break operations */
struct mutex hdq_mutex;
/* interrupt status and a lock for it */
u8 hdq_irqstatus;
spinlock_t hdq_spinlock;
/* mode: 0-HDQ 1-W1 */
int mode;
};
/* HDQ register I/O routines */
static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
{
return __raw_readl(hdq_data->hdq_base + offset);
}
static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
{
__raw_writel(val, hdq_data->hdq_base + offset);
}
static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
u8 val, u8 mask)
{
u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
| (val & mask);
__raw_writel(new_val, hdq_data->hdq_base + offset);
return new_val;
}
/*
* Wait for one or more bits in flag change.
* HDQ_FLAG_SET: wait until any bit in the flag is set.
* HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
* return 0 on success and -ETIMEDOUT in the case of timeout.
*/
static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
u8 flag, u8 flag_set, u8 *status)
{
int ret = 0;
unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
/* wait for the flag clear */
while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
&& time_before(jiffies, timeout)) {
schedule_timeout_uninterruptible(1);
}
if (*status & flag)
ret = -ETIMEDOUT;
} else if (flag_set == OMAP_HDQ_FLAG_SET) {
/* wait for the flag set */
while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
&& time_before(jiffies, timeout)) {
schedule_timeout_uninterruptible(1);
}
if (!(*status & flag))
ret = -ETIMEDOUT;
} else
return -EINVAL;
return ret;
}
/* Clear saved irqstatus after using an interrupt */
static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
{
unsigned long irqflags;
u8 status;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
status = hdq_data->hdq_irqstatus;
/* this is a read-modify-write */
hdq_data->hdq_irqstatus &= ~bits;
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
return status;
}
/* write out a byte and fill *status with HDQ_INT_STATUS */
static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
{
int ret;
u8 tmp_status;
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (ret < 0) {
ret = -EINTR;
goto rtn;
}
if (hdq_data->hdq_irqstatus)
dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
hdq_data->hdq_irqstatus);
*status = 0;
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
/* set the GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
/* wait for the TXCOMPLETE bit */
ret = wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
OMAP_HDQ_TIMEOUT);
*status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
if (ret == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
ret = -ETIMEDOUT;
goto out;
}
/* check irqstatus */
if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
" TXCOMPLETE/RXCOMPLETE, %x\n", *status);
ret = -ETIMEDOUT;
goto out;
}
/* wait for the GO bit return to zero */
ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_GO,
OMAP_HDQ_FLAG_CLEAR, &tmp_status);
if (ret) {
dev_dbg(hdq_data->dev, "timeout waiting GO bit"
" return to zero, %x\n", tmp_status);
}
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
}
/* HDQ Interrupt service routine */
static irqreturn_t hdq_isr(int irq, void *_hdq)
{
struct hdq_data *hdq_data = _hdq;
unsigned long irqflags;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
if (hdq_data->hdq_irqstatus &
(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
/* wake up sleeping process */
wake_up(&hdq_wait_queue);
}
return IRQ_HANDLED;
}
/* W1 search callback function in HDQ mode */
static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
u8 search_type, w1_slave_found_callback slave_found)
{
u64 module_id, rn_le, cs, id;
if (w1_id)
module_id = w1_id;
else
module_id = 0x1;
rn_le = cpu_to_le64(module_id);
/*
* HDQ might not obey truly the 1-wire spec.
* So calculate CRC based on module parameter.
*/
cs = w1_calc_crc8((u8 *)&rn_le, 7);
id = (cs << 56) | module_id;
slave_found(master_dev, id);
}
/* Issue break pulse to the device */
static int omap_hdq_break(struct hdq_data *hdq_data)
{
int ret = 0;
u8 tmp_status;
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (ret < 0) {
dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
ret = -EINTR;
goto rtn;
}
if (hdq_data->hdq_irqstatus)
dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
hdq_data->hdq_irqstatus);
/* set the INIT and GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
OMAP_HDQ_CTRL_STATUS_GO);
/* wait for the TIMEOUT bit */
ret = wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
OMAP_HDQ_TIMEOUT);
tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
if (ret == 0) {
dev_dbg(hdq_data->dev, "break wait elapsed\n");
ret = -EINTR;
goto out;
}
/* check irqstatus */
if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
tmp_status);
ret = -ETIMEDOUT;
goto out;
}
/*
* check for the presence detect bit to get
* set to show that the slave is responding
*/
if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
dev_dbg(hdq_data->dev, "Presence bit not set\n");
ret = -ETIMEDOUT;
goto out;
}
/*
* wait for both INIT and GO bits rerurn to zero.
* zero wait time expected for interrupt mode.
*/
ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
&tmp_status);
if (ret)
dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
" return to zero, %x\n", tmp_status);
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
}
static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
{
int ret = 0;
u8 status;
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (ret < 0) {
ret = -EINTR;
goto rtn;
}
if (pm_runtime_suspended(hdq_data->dev)) {
ret = -EINVAL;
goto out;
}
if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
/*
* The RX comes immediately after TX.
*/
wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
& (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
OMAP_HDQ_INT_STATUS_TIMEOUT)),
OMAP_HDQ_TIMEOUT);
status = hdq_reset_irqstatus(hdq_data,
OMAP_HDQ_INT_STATUS_RXCOMPLETE |
OMAP_HDQ_INT_STATUS_TIMEOUT);
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
OMAP_HDQ_CTRL_STATUS_DIR);
/* check irqstatus */
if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
" RXCOMPLETE, %x", status);
ret = -ETIMEDOUT;
goto out;
}
} else { /* interrupt had occurred before hdq_read_byte was called */
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
}
/* the data is ready. Read it in! */
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
}
/*
* W1 triplet callback function - used for searching ROM addresses.
* Registered only when controller is in 1-wire mode.
*/
static u8 omap_w1_triplet(void *_hdq, u8 bdir)
{
u8 id_bit, comp_bit;
int err;
u8 ret = 0x3; /* no slaves responded */
struct hdq_data *hdq_data = _hdq;
u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
err = pm_runtime_get_sync(hdq_data->dev);
if (err < 0) {
pm_runtime_put_noidle(hdq_data->dev);
return err;
}
err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (err < 0) {
dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
goto rtn;
}
/* read id_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
err = wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
/* Must clear irqstatus for another RXCOMPLETE interrupt */
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
}
id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
/* read comp_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
err = wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
/* Must clear irqstatus for another RXCOMPLETE interrupt */
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
}
comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
if (id_bit && comp_bit) {
ret = 0x03; /* no slaves responded */
goto out;
}
if (!id_bit && !comp_bit) {
/* Both bits are valid, take the direction given */
ret = bdir ? 0x04 : 0;
} else {
/* Only one bit is valid, take that direction */
bdir = id_bit;
ret = id_bit ? 0x05 : 0x02;
}
/* write bdir bit */
hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
err = wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_TXCOMPLETE),
OMAP_HDQ_TIMEOUT);
/* Must clear irqstatus for another TXCOMPLETE interrupt */
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
if (err == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
goto out;
}
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
OMAP_HDQ_CTRL_STATUS_SINGLE);
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
return ret;
}
/* reset callback */
static u8 omap_w1_reset_bus(void *_hdq)
{
struct hdq_data *hdq_data = _hdq;
int err;
err = pm_runtime_get_sync(hdq_data->dev);
if (err < 0) {
pm_runtime_put_noidle(hdq_data->dev);
return err;
}
omap_hdq_break(hdq_data);
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
return 0;
}
/* Read a byte of data from the device */
static u8 omap_w1_read_byte(void *_hdq)
{
struct hdq_data *hdq_data = _hdq;
u8 val = 0;
int ret;
ret = pm_runtime_get_sync(hdq_data->dev);
if (ret < 0) {
pm_runtime_put_noidle(hdq_data->dev);
return -1;
}
ret = hdq_read_byte(hdq_data, &val);
if (ret)
val = -1;
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
return val;
}
/* Write a byte of data to the device */
static void omap_w1_write_byte(void *_hdq, u8 byte)
{
struct hdq_data *hdq_data = _hdq;
int ret;
u8 status;
ret = pm_runtime_get_sync(hdq_data->dev);
if (ret < 0) {
pm_runtime_put_noidle(hdq_data->dev);
return;
}
/*
* We need to reset the slave before
* issuing the SKIP ROM command, else
* the slave will not work.
*/
if (byte == W1_SKIP_ROM)
omap_hdq_break(hdq_data);
ret = hdq_write_byte(hdq_data, byte, &status);
if (ret < 0) {
dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
goto out_err;
}
out_err:
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
}
static struct w1_bus_master omap_w1_master = {
.read_byte = omap_w1_read_byte,
.write_byte = omap_w1_write_byte,
.reset_bus = omap_w1_reset_bus,
};
static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
{
struct hdq_data *hdq_data = dev_get_drvdata(dev);
hdq_reg_out(hdq_data, 0, hdq_data->mode);
hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
return 0;
}
static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
{
struct hdq_data *hdq_data = dev_get_drvdata(dev);
/* select HDQ/1W mode & enable clocks */
hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
hdq_data->mode);
hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
return 0;
}
static const struct dev_pm_ops omap_hdq_pm_ops = {
SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
omap_hdq_runtime_resume, NULL)
};
static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdq_data *hdq_data;
int ret, irq;
u8 rev;
const char *mode;
hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
if (!hdq_data)
return -ENOMEM;
hdq_data->dev = dev;
platform_set_drvdata(pdev, hdq_data);
hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
mutex_init(&hdq_data->hdq_mutex);
ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
if (ret < 0 || !strcmp(mode, "hdq")) {
hdq_data->mode = 0;
omap_w1_master.search = omap_w1_search_bus;
} else {
hdq_data->mode = 1;
omap_w1_master.triplet = omap_w1_triplet;
}
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
goto err_w1;
}
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
spin_lock_init(&hdq_data->hdq_spinlock);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
ret = irq;
goto err_irq;
}
ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
if (ret < 0) {
dev_dbg(&pdev->dev, "could not request irq\n");
goto err_irq;
}
omap_hdq_break(hdq_data);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
omap_w1_master.data = hdq_data;
ret = w1_add_master_device(&omap_w1_master);
if (ret) {
dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
goto err_w1;
}
return 0;
err_irq:
pm_runtime_put_sync(&pdev->dev);
err_w1:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
static int omap_hdq_remove(struct platform_device *pdev)
{
int active;
active = pm_runtime_get_sync(&pdev->dev);
if (active < 0)
pm_runtime_put_noidle(&pdev->dev);
w1_remove_master_device(&omap_w1_master);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (active >= 0)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id omap_hdq_dt_ids[] = {
{ .compatible = "ti,omap3-1w" },
{ .compatible = "ti,am4372-hdq" },
{}
};
MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
static struct platform_driver omap_hdq_driver = {
.probe = omap_hdq_probe,
.remove = omap_hdq_remove,
.driver = {
.name = "omap_hdq",
.of_match_table = omap_hdq_dt_ids,
.pm = &omap_hdq_pm_ops,
},
};
module_platform_driver(omap_hdq_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("HDQ-1W driver Library");
MODULE_LICENSE("GPL");
| linux-master | drivers/w1/masters/omap_hdq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011-2012 Calxeda, Inc.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "edac_module.h"
#define SR_CLR_SB_ECC_INTR 0x0
#define SR_CLR_DB_ECC_INTR 0x4
struct hb_l2_drvdata {
void __iomem *base;
int sb_irq;
int db_irq;
};
static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
{
struct edac_device_ctl_info *dci = dev_id;
struct hb_l2_drvdata *drvdata = dci->pvt_info;
if (irq == drvdata->sb_irq) {
writel(1, drvdata->base + SR_CLR_SB_ECC_INTR);
edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
}
if (irq == drvdata->db_irq) {
writel(1, drvdata->base + SR_CLR_DB_ECC_INTR);
edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
}
return IRQ_HANDLED;
}
static const struct of_device_id hb_l2_err_of_match[] = {
{ .compatible = "calxeda,hb-sregs-l2-ecc", },
{},
};
MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
static int highbank_l2_err_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct edac_device_ctl_info *dci;
struct hb_l2_drvdata *drvdata;
struct resource *r;
int res = 0;
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
1, "L", 1, 2, NULL, 0, 0);
if (!dci)
return -ENOMEM;
drvdata = dci->pvt_info;
dci->dev = &pdev->dev;
platform_set_drvdata(pdev, dci);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "Unable to get mem resource\n");
res = -ENODEV;
goto err;
}
if (!devm_request_mem_region(&pdev->dev, r->start,
resource_size(r), dev_name(&pdev->dev))) {
dev_err(&pdev->dev, "Error while requesting mem region\n");
res = -EBUSY;
goto err;
}
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base) {
dev_err(&pdev->dev, "Unable to map regs\n");
res = -ENOMEM;
goto err;
}
id = of_match_device(hb_l2_err_of_match, &pdev->dev);
dci->mod_name = pdev->dev.driver->name;
dci->ctl_name = id ? id->compatible : "unknown";
dci->dev_name = dev_name(&pdev->dev);
if (edac_device_add_device(dci))
goto err;
drvdata->db_irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, drvdata->db_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
goto err2;
drvdata->sb_irq = platform_get_irq(pdev, 1);
res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
goto err2;
devres_close_group(&pdev->dev, NULL);
return 0;
err2:
edac_device_del_device(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
edac_device_free_ctl_info(dci);
return res;
}
static int highbank_l2_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
return 0;
}
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
.remove = highbank_l2_err_remove,
.driver = {
.name = "hb_l2_edac",
.of_match_table = hb_l2_err_of_match,
},
};
module_platform_driver(highbank_l2_edac_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Calxeda, Inc.");
MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache");
| linux-master | drivers/edac/highbank_l2_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* EDAC PCI component
*
* Author: Dave Jiang <[email protected]>
*
* 2007 (c) MontaVista Software, Inc.
*/
#include <asm/page.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/timer.h>
#include "edac_pci.h"
#include "edac_module.h"
static DEFINE_MUTEX(edac_pci_ctls_mutex);
static LIST_HEAD(edac_pci_list);
static atomic_t pci_indexes = ATOMIC_INIT(0);
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
edac_dbg(1, "\n");
pci = kzalloc(sizeof(struct edac_pci_ctl_info), GFP_KERNEL);
if (!pci)
return NULL;
if (sz_pvt) {
pci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL);
if (!pci->pvt_info)
goto free;
}
pci->op_state = OP_ALLOC;
snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
return pci;
free:
kfree(pci);
return NULL;
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
edac_dbg(1, "\n");
edac_pci_remove_sysfs(pci);
}
EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
/*
* find_edac_pci_by_dev()
* scans the edac_pci list for a specific 'struct device *'
*
* return NULL if not found, or return control struct pointer
*/
static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
{
struct edac_pci_ctl_info *pci;
struct list_head *item;
edac_dbg(1, "\n");
list_for_each(item, &edac_pci_list) {
pci = list_entry(item, struct edac_pci_ctl_info, link);
if (pci->dev == dev)
return pci;
}
return NULL;
}
/*
* add_edac_pci_to_global_list
* Before calling this function, caller must assign a unique value to
* edac_dev->pci_idx.
* Return:
* 0 on success
* 1 on failure
*/
static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
{
struct list_head *item, *insert_before;
struct edac_pci_ctl_info *rover;
edac_dbg(1, "\n");
insert_before = &edac_pci_list;
/* Determine if already on the list */
rover = find_edac_pci_by_dev(pci->dev);
if (unlikely(rover != NULL))
goto fail0;
/* Insert in ascending order by 'pci_idx', so find position */
list_for_each(item, &edac_pci_list) {
rover = list_entry(item, struct edac_pci_ctl_info, link);
if (rover->pci_idx >= pci->pci_idx) {
if (unlikely(rover->pci_idx == pci->pci_idx))
goto fail1;
insert_before = item;
break;
}
}
list_add_tail_rcu(&pci->link, insert_before);
return 0;
fail0:
edac_printk(KERN_WARNING, EDAC_PCI,
"%s (%s) %s %s already assigned %d\n",
dev_name(rover->dev), edac_dev_name(rover),
rover->mod_name, rover->ctl_name, rover->pci_idx);
return 1;
fail1:
edac_printk(KERN_WARNING, EDAC_PCI,
"but in low-level driver: attempt to assign\n"
"\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
__func__);
return 1;
}
/*
* del_edac_pci_from_global_list
*
* remove the PCI control struct from the global list
*/
static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
{
list_del_rcu(&pci->link);
/* these are for safe removal of devices from global list while
* NMI handlers may be traversing list
*/
synchronize_rcu();
INIT_LIST_HEAD(&pci->link);
}
/*
* edac_pci_workq_function()
*
* periodic function that performs the operation
* scheduled by a workq request, for a given PCI control struct
*/
static void edac_pci_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = to_delayed_work(work_req);
struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
int msec;
unsigned long delay;
edac_dbg(3, "checking\n");
mutex_lock(&edac_pci_ctls_mutex);
if (pci->op_state != OP_RUNNING_POLL) {
mutex_unlock(&edac_pci_ctls_mutex);
return;
}
if (edac_pci_get_check_errors())
pci->edac_check(pci);
/* if we are on a one second period, then use round */
msec = edac_pci_get_poll_msec();
if (msec == 1000)
delay = round_jiffies_relative(msecs_to_jiffies(msec));
else
delay = msecs_to_jiffies(msec);
edac_queue_work(&pci->work, delay);
mutex_unlock(&edac_pci_ctls_mutex);
}
int edac_pci_alloc_index(void)
{
return atomic_inc_return(&pci_indexes) - 1;
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
{
edac_dbg(0, "\n");
pci->pci_idx = edac_idx;
pci->start_time = jiffies;
mutex_lock(&edac_pci_ctls_mutex);
if (add_edac_pci_to_global_list(pci))
goto fail0;
if (edac_pci_create_sysfs(pci)) {
edac_pci_printk(pci, KERN_WARNING,
"failed to create sysfs pci\n");
goto fail1;
}
if (pci->edac_check) {
pci->op_state = OP_RUNNING_POLL;
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
} else {
pci->op_state = OP_RUNNING_INTERRUPT;
}
edac_pci_printk(pci, KERN_INFO,
"Giving out device to module %s controller %s: DEV %s (%s)\n",
pci->mod_name, pci->ctl_name, pci->dev_name,
edac_op_state_to_string(pci->op_state));
mutex_unlock(&edac_pci_ctls_mutex);
return 0;
/* error unwind stack */
fail1:
del_edac_pci_from_global_list(pci);
fail0:
mutex_unlock(&edac_pci_ctls_mutex);
return 1;
}
EXPORT_SYMBOL_GPL(edac_pci_add_device);
struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
{
struct edac_pci_ctl_info *pci;
edac_dbg(0, "\n");
mutex_lock(&edac_pci_ctls_mutex);
/* ensure the control struct is on the global list
* if not, then leave
*/
pci = find_edac_pci_by_dev(dev);
if (pci == NULL) {
mutex_unlock(&edac_pci_ctls_mutex);
return NULL;
}
pci->op_state = OP_OFFLINE;
del_edac_pci_from_global_list(pci);
mutex_unlock(&edac_pci_ctls_mutex);
if (pci->edac_check)
edac_stop_work(&pci->work);
edac_printk(KERN_INFO, EDAC_PCI,
"Removed device %d for %s %s: DEV %s\n",
pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
return pci;
}
EXPORT_SYMBOL_GPL(edac_pci_del_device);
/*
* edac_pci_generic_check
*
* a Generic parity check API
*/
static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
{
edac_dbg(4, "\n");
edac_pci_do_parity_check();
}
/* free running instance index counter */
static int edac_pci_idx;
#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller"
struct edac_pci_gen_data {
int edac_idx;
};
struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
const char *mod_name)
{
struct edac_pci_ctl_info *pci;
struct edac_pci_gen_data *pdata;
pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
if (!pci)
return NULL;
pdata = pci->pvt_info;
pci->dev = dev;
dev_set_drvdata(pci->dev, pci);
pci->dev_name = pci_name(to_pci_dev(dev));
pci->mod_name = mod_name;
pci->ctl_name = EDAC_PCI_GENCTL_NAME;
if (edac_op_state == EDAC_OPSTATE_POLL)
pci->edac_check = edac_pci_generic_check;
pdata->edac_idx = edac_pci_idx++;
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
edac_dbg(3, "failed edac_pci_add_device()\n");
edac_pci_free_ctl_info(pci);
return NULL;
}
return pci;
}
EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "pci mod=%s\n", pci->mod_name);
edac_pci_del_device(pci->dev);
edac_pci_free_ctl_info(pci);
}
EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
| linux-master | drivers/edac/edac_pci.c |
/*
* Intel 3200/3210 Memory Controller kernel module
* Copyright (C) 2008-2009 Akamai Technologies, Inc.
* Portions by Hitoshi Mitake <[email protected]>.
*
* This file may be distributed under the terms of the
* GNU General Public License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io.h>
#include "edac_module.h"
#include <linux/io-64-nonatomic-lo-hi.h>
#define EDAC_MOD_STR "i3200_edac"
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
#define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
#define I3200_MCHBAR_HIGH 0x4c
#define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
#define I3200_MMR_WINDOW_SIZE 16384
#define I3200_TOM 0xa0 /* Top of Memory (16b)
*
* 15:10 reserved
* 9:0 total populated physical memory
*/
#define I3200_TOM_MASK 0x3ff /* bits 9:0 */
#define I3200_TOM_SHIFT 26 /* 64MiB grain */
#define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15 reserved
* 14 Isochronous TBWRR Run Behind FIFO Full
* (ITCV)
* 13 Isochronous TBWRR Run Behind FIFO Put
* (ITSTV)
* 12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR (GTSE)
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 reserved
* 7 DRAM Throttle Flag (DTF)
* 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define I3200_ERRSTS_UE 0x0002
#define I3200_ERRSTS_CE 0x0001
#define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
*
* 15:10 reserved
* 9:0 Channel 0 DRAM Rank Boundary Address
*/
#define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
#define I3200_DRB_MASK 0x3ff /* bits 9:0 */
#define I3200_DRB_SHIFT 26 /* 64MiB grain */
#define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
*
* 63:48 Error Column Address (ERRCOL)
* 47:32 Error Row Address (ERRROW)
* 31:29 Error Bank Address (ERRBANK)
* 28:27 Error Rank Address (ERRRANK)
* 26:24 reserved
* 23:16 Error Syndrome (ERRSYND)
* 15: 2 reserved
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
#define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
#define I3200_ECCERRLOG_CE 0x1
#define I3200_ECCERRLOG_UE 0x2
#define I3200_ECCERRLOG_RANK_BITS 0x18000000
#define I3200_ECCERRLOG_RANK_SHIFT 27
#define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
#define I3200_ECCERRLOG_SYNDROME_SHIFT 16
#define I3200_CAPID0 0xe0 /* P.95 of spec for details */
struct i3200_priv {
void __iomem *window;
};
static int nr_channels;
static int how_many_channels(struct pci_dev *pdev)
{
int n_channels;
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
edac_dbg(0, "In single channel mode\n");
n_channels = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
n_channels = 2;
}
if (capid0_8b & 0x10) /* check if both channels are filled */
edac_dbg(0, "2 DIMMS per channel disabled\n");
else
edac_dbg(0, "2 DIMMS per channel enabled\n");
return n_channels;
}
static unsigned long eccerrlog_syndrome(u64 log)
{
return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
I3200_ECCERRLOG_SYNDROME_SHIFT;
}
static int eccerrlog_row(int channel, u64 log)
{
u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
I3200_ECCERRLOG_RANK_SHIFT);
return rank | (channel * I3200_RANKS_PER_CHANNEL);
}
enum i3200_chips {
I3200 = 0,
};
struct i3200_dev_info {
const char *ctl_name;
};
struct i3200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[I3200_CHANNELS];
};
static const struct i3200_dev_info i3200_devs[] = {
[I3200] = {
.ctl_name = "i3200"
},
};
static struct pci_dev *mci_pdev;
static int i3200_registered = 1;
static void i3200_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
I3200_ERRSTS_BITS);
}
static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct i3200_error_info *info)
{
struct pci_dev *pdev;
struct i3200_priv *priv = mci->pvt_info;
void __iomem *window = priv->window;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
if (!(info->errsts & I3200_ERRSTS_BITS))
return;
info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
if (nr_channels == 2)
info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
if (nr_channels == 2)
info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
}
i3200_clear_error_info(mci);
}
static void i3200_process_error_info(struct mem_ctl_info *mci,
struct i3200_error_info *info)
{
int channel;
u64 log;
if (!(info->errsts & I3200_ERRSTS_BITS))
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(channel, log),
-1, -1,
"i3000 UE", "");
} else if (log & I3200_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, eccerrlog_syndrome(log),
eccerrlog_row(channel, log),
-1, -1,
"i3000 CE", "");
}
}
}
static void i3200_check(struct mem_ctl_info *mci)
{
struct i3200_error_info info;
i3200_get_and_clear_error_info(mci, &info);
i3200_process_error_info(mci, &info);
}
static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
struct {
u32 mchbar_low;
u32 mchbar_high;
};
} u;
void __iomem *window;
pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
u.mchbar &= I3200_MCHBAR_MASK;
if (u.mchbar != (resource_size_t)u.mchbar) {
printk(KERN_ERR
"i3200: mmio space beyond accessible range (0x%llx)\n",
(unsigned long long)u.mchbar);
return NULL;
}
window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
return window;
}
static void i3200_get_drbs(void __iomem *window,
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
{
int i;
for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
}
}
static bool i3200_is_stacked(struct pci_dev *pdev,
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
{
u16 tom;
pci_read_config_word(pdev, I3200_TOM, &tom);
tom &= I3200_TOM_MASK;
return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
}
static unsigned long drb_to_nr_pages(
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
int channel, int rank)
{
int n;
n = drbs[channel][rank];
if (!n)
return 0;
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) &&
drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
return n;
}
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i, j;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
struct i3200_priv *priv;
edac_dbg(0, "MC:\n");
window = i3200_map_mchbar(pdev);
if (!window)
return -ENODEV;
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I3200_DIMMS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i3200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
stacked = i3200_is_stacked(pdev, drbs);
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 64MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
for (i = 0; i < I3200_DIMMS; i++) {
unsigned long nr_pages;
for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = edac_get_dimm(mci, i, j, 0);
nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
if (nr_pages == 0)
continue;
edac_dbg(0, "csrow %d, channel %d%s, size = %ld MiB\n", i, j,
stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
dimm->grain = nr_pages << PAGE_SHIFT;
dimm->mtype = MEM_DDR2;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
}
i3200_clear_error_info(mci);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
edac_dbg(3, "MC: success\n");
return 0;
fail:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}
static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i3200_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i3200_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i3200_priv *priv;
edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
priv = mci->pvt_info;
iounmap(priv->window);
edac_mc_free(mci);
pci_disable_device(pdev);
}
static const struct pci_device_id i3200_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3200},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
static struct pci_driver i3200_driver = {
.name = EDAC_MOD_STR,
.probe = i3200_init_one,
.remove = i3200_remove_one,
.id_table = i3200_pci_tbl,
};
static int __init i3200_init(void)
{
int pci_rc;
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i3200_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
i3200_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3200_HB, NULL);
if (!mci_pdev) {
edac_dbg(0, "i3200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "i3200 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i3200_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i3200_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3200_driver);
if (!i3200_registered) {
i3200_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(i3200_init);
module_exit(i3200_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies, Inc.");
MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i3200_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Bluefield-specific EDAC driver.
*
* Copyright (c) 2019 Mellanox Technologies.
*/
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/edac.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "edac_module.h"
#define DRIVER_NAME "bluefield-edac"
/*
* Mellanox BlueField EMI (External Memory Interface) register definitions.
*/
#define MLXBF_ECC_CNT 0x340
#define MLXBF_ECC_CNT__SERR_CNT GENMASK(15, 0)
#define MLXBF_ECC_CNT__DERR_CNT GENMASK(31, 16)
#define MLXBF_ECC_ERR 0x348
#define MLXBF_ECC_ERR__SECC BIT(0)
#define MLXBF_ECC_ERR__DECC BIT(16)
#define MLXBF_ECC_LATCH_SEL 0x354
#define MLXBF_ECC_LATCH_SEL__START BIT(24)
#define MLXBF_ERR_ADDR_0 0x358
#define MLXBF_ERR_ADDR_1 0x37c
#define MLXBF_SYNDROM 0x35c
#define MLXBF_SYNDROM__DERR BIT(0)
#define MLXBF_SYNDROM__SERR BIT(1)
#define MLXBF_SYNDROM__SYN GENMASK(25, 16)
#define MLXBF_ADD_INFO 0x364
#define MLXBF_ADD_INFO__ERR_PRANK GENMASK(9, 8)
#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
#define MLXBF_EDAC_ERROR_GRAIN 8
/*
* Request MLNX_SIP_GET_DIMM_INFO
*
* Retrieve information about DIMM on a certain slot.
*
* Call register usage:
* a0: MLNX_SIP_GET_DIMM_INFO
* a1: (Memory controller index) << 16 | (Dimm index in memory controller)
* a2-7: not used.
*
* Return status:
* a0: MLXBF_DIMM_INFO defined below describing the DIMM.
* a1-3: not used.
*/
#define MLNX_SIP_GET_DIMM_INFO 0x82000008
/* Format for the SMC response about the memory information */
#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
#define MLXBF_DIMM_INFO__IS_RDIMM BIT(16)
#define MLXBF_DIMM_INFO__IS_LRDIMM BIT(17)
#define MLXBF_DIMM_INFO__IS_NVDIMM BIT(18)
#define MLXBF_DIMM_INFO__RANKS GENMASK_ULL(23, 21)
#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
struct bluefield_edac_priv {
int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
void __iomem *emi_base;
int dimm_per_mc;
};
static u64 smc_call1(u64 smc_op, u64 smc_arg)
{
struct arm_smccc_res res;
arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
/*
* Gather the ECC information from the External Memory Interface registers
* and report it to the edac handler.
*/
static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
int error_cnt,
int is_single_ecc)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
u32 dram_additional_info, err_prank, edea0, edea1;
u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
enum hw_event_mc_err_type ecc_type;
u64 ecc_dimm_addr;
int ecc_dimm;
ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
HW_EVENT_ERR_UNCORRECTED;
/*
* Tell the External Memory Interface to populate the relevant
* registers with information about the last ECC error occurrence.
*/
ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
/*
* Verify that the ECC reported info in the registers is of the
* same type as the one asked to report. If not, just report the
* error without the detailed information.
*/
dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
if ((is_single_ecc && !serr) || (!is_single_ecc && !derr)) {
edac_mc_handle_error(ecc_type, mci, error_cnt, 0, 0, 0,
0, 0, -1, mci->ctl_name, "");
return;
}
dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
edac_mc_handle_error(ecc_type, mci, error_cnt,
PFN_DOWN(ecc_dimm_addr),
offset_in_page(ecc_dimm_addr),
syndrom, ecc_dimm, 0, 0, mci->ctl_name, "");
}
static void bluefield_edac_check(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
/*
* The memory controller might not be initialized by the firmware
* when there isn't memory, which may lead to bad register readings.
*/
if (mci->edac_cap == EDAC_FLAG_NONE)
return;
ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
if (single_error_count) {
ecc_error |= MLXBF_ECC_ERR__SECC;
bluefield_gather_report_ecc(mci, single_error_count, 1);
}
if (double_error_count) {
ecc_error |= MLXBF_ECC_ERR__DECC;
bluefield_gather_report_ecc(mci, double_error_count, 0);
}
/* Write to clear reported errors. */
if (ecc_count)
writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
}
/* Initialize the DIMMs information for the given memory controller. */
static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
int mem_ctrl_idx = mci->mc_idx;
struct dimm_info *dimm;
u64 smc_info, smc_arg;
int is_empty = 1, i;
for (i = 0; i < priv->dimm_per_mc; i++) {
dimm = mci->dimms[i];
smc_arg = mem_ctrl_idx << 16 | i;
smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
dimm->mtype = MEM_EMPTY;
continue;
}
is_empty = 0;
dimm->edac_mode = EDAC_SECDED;
if (FIELD_GET(MLXBF_DIMM_INFO__IS_NVDIMM, smc_info))
dimm->mtype = MEM_NVDIMM;
else if (FIELD_GET(MLXBF_DIMM_INFO__IS_LRDIMM, smc_info))
dimm->mtype = MEM_LRDDR4;
else if (FIELD_GET(MLXBF_DIMM_INFO__IS_RDIMM, smc_info))
dimm->mtype = MEM_RDDR4;
else
dimm->mtype = MEM_DDR4;
dimm->nr_pages =
FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info) *
(SZ_1G / PAGE_SIZE);
dimm->grain = MLXBF_EDAC_ERROR_GRAIN;
/* Mem controller for BlueField only supports x4, x8 and x16 */
switch (FIELD_GET(MLXBF_DIMM_INFO__PACKAGE_X, smc_info)) {
case 4:
dimm->dtype = DEV_X4;
break;
case 8:
dimm->dtype = DEV_X8;
break;
case 16:
dimm->dtype = DEV_X16;
break;
default:
dimm->dtype = DEV_UNKNOWN;
}
priv->dimm_ranks[i] =
FIELD_GET(MLXBF_DIMM_INFO__RANKS, smc_info);
}
if (is_empty)
mci->edac_cap = EDAC_FLAG_NONE;
else
mci->edac_cap = EDAC_FLAG_SECDED;
}
static int bluefield_edac_mc_probe(struct platform_device *pdev)
{
struct bluefield_edac_priv *priv;
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[1];
struct mem_ctl_info *mci;
struct resource *emi_res;
unsigned int mc_idx, dimm_count;
int rc, ret;
/* Read the MSS (Memory SubSystem) index from ACPI table. */
if (device_property_read_u32(dev, "mss_number", &mc_idx)) {
dev_warn(dev, "bf_edac: MSS number unknown\n");
return -EINVAL;
}
/* Read the DIMMs per MC from ACPI table. */
if (device_property_read_u32(dev, "dimm_per_mc", &dimm_count)) {
dev_warn(dev, "bf_edac: DIMMs per MC unknown\n");
return -EINVAL;
}
if (dimm_count > MLXBF_EDAC_MAX_DIMM_PER_MC) {
dev_warn(dev, "bf_edac: DIMMs per MC not valid\n");
return -EINVAL;
}
emi_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!emi_res)
return -EINVAL;
layers[0].type = EDAC_MC_LAYER_SLOT;
layers[0].size = dimm_count;
layers[0].is_virt_csrow = true;
mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv));
if (!mci)
return -ENOMEM;
priv = mci->pvt_info;
priv->dimm_per_mc = dimm_count;
priv->emi_base = devm_ioremap_resource(dev, emi_res);
if (IS_ERR(priv->emi_base)) {
dev_err(dev, "failed to map EMI IO resource\n");
ret = PTR_ERR(priv->emi_base);
goto err;
}
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
MEM_FLAG_LRDDR4 | MEM_FLAG_NVDIMM;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->mod_name = DRIVER_NAME;
mci->ctl_name = "BlueField_Memory_Controller";
mci->dev_name = dev_name(dev);
mci->edac_check = bluefield_edac_check;
/* Initialize mci with the actual populated DIMM information. */
bluefield_edac_init_dimms(mci);
platform_set_drvdata(pdev, mci);
/* Register with EDAC core */
rc = edac_mc_add_mc(mci);
if (rc) {
dev_err(dev, "failed to register with EDAC core\n");
ret = rc;
goto err;
}
/* Only POLL mode supported so far. */
edac_op_state = EDAC_OPSTATE_POLL;
return 0;
err:
edac_mc_free(mci);
return ret;
}
static int bluefield_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
{"MLNXBF08", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, bluefield_mc_acpi_ids);
static struct platform_driver bluefield_edac_mc_driver = {
.driver = {
.name = DRIVER_NAME,
.acpi_match_table = bluefield_mc_acpi_ids,
},
.probe = bluefield_edac_mc_probe,
.remove = bluefield_edac_mc_remove,
};
module_platform_driver(bluefield_edac_mc_driver);
MODULE_DESCRIPTION("Mellanox BlueField memory edac driver");
MODULE_AUTHOR("Mellanox Technologies");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/bluefield_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008 Nuovation System Designs, LLC
* Grant Erickson <[email protected]>
*/
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
#include <asm/dcr.h>
#include "edac_module.h"
#include "ppc4xx_edac.h"
/*
* This file implements a driver for monitoring and handling events
* associated with the IMB DDR2 ECC controller found in the AMCC/IBM
* 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
*
* As realized in the 405EX[r], this controller features:
*
* - Support for registered- and non-registered DDR1 and DDR2 memory.
* - 32-bit or 16-bit memory interface with optional ECC.
*
* o ECC support includes:
*
* - 4-bit SEC/DED
* - Aligned-nibble error detect
* - Bypass mode
*
* - Two (2) memory banks/ranks.
* - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
* bank/rank in 16-bit mode.
*
* As realized in the 440SP and 440SPe, this controller changes/adds:
*
* - 64-bit or 32-bit memory interface with optional ECC.
*
* o ECC support includes:
*
* - 8-bit SEC/DED
* - Aligned-nibble error detect
* - Bypass mode
*
* - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
* per bank/rank in 32-bit mode.
*
* As realized in the 460EX and 460GT, this controller changes/adds:
*
* - 64-bit or 32-bit memory interface with optional ECC.
*
* o ECC support includes:
*
* - 8-bit SEC/DED
* - Aligned-nibble error detect
* - Bypass mode
*
* - Four (4) memory banks/ranks.
* - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
* per bank/rank in 32-bit mode.
*
* At present, this driver has ONLY been tested against the controller
* realization in the 405EX[r] on the AMCC Kilauea and Haleakala
* boards (256 MiB w/o ECC memory soldered onto the board) and a
* proprietary board based on those designs (128 MiB ECC memory, also
* soldered onto the board).
*
* Dynamic feature detection and handling needs to be added for the
* other realizations of this controller listed above.
*
* Eventually, this driver will likely be adapted to the above variant
* realizations of this controller as well as broken apart to handle
* the other known ECC-capable controllers prevalent in other 4xx
* processors:
*
* - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
* - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
* - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
*
* For this controller, unfortunately, correctable errors report
* nothing more than the beat/cycle and byte/lane the correction
* occurred on and the check bit group that covered the error.
*
* In contrast, uncorrectable errors also report the failing address,
* the bus master and the transaction direction (i.e. read or write)
*
* Regardless of whether the error is a CE or a UE, we report the
* following pieces of information in the driver-unique message to the
* EDAC subsystem:
*
* - Device tree path
* - Bank(s)
* - Check bit error group
* - Beat(s)/lane(s)
*/
/* Preprocessor Definitions */
#define EDAC_OPSTATE_INT_STR "interrupt"
#define EDAC_OPSTATE_POLL_STR "polled"
#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0"
#define PPC4XX_EDAC_MESSAGE_SIZE 256
/*
* Kernel logging without an EDAC instance
*/
#define ppc4xx_edac_printk(level, fmt, arg...) \
edac_printk(level, "PPC4xx MC", fmt, ##arg)
/*
* Kernel logging with an EDAC instance
*/
#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
/*
* Macros to convert bank configuration size enumerations into MiB and
* page values.
*/
#define SDRAM_MBCF_SZ_MiB_MIN 4
#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \
<< (SDRAM_MBCF_SZ_DECODE(n)))
#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \
<< (20 - PAGE_SHIFT + \
SDRAM_MBCF_SZ_DECODE(n)))
/*
* The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
* indirectly accessed and have a base and length defined by the
* device tree. The base can be anything; however, we expect the
* length to be precisely two registers, the first for the address
* window and the second for the data window.
*/
#define SDRAM_DCR_RESOURCE_LEN 2
#define SDRAM_DCR_ADDR_OFFSET 0
#define SDRAM_DCR_DATA_OFFSET 1
/*
* Device tree interrupt indices
*/
#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */
#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */
/* Type Definitions */
/*
* PPC4xx SDRAM memory controller private instance data
*/
struct ppc4xx_edac_pdata {
dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */
struct {
int sec; /* Single-bit correctable error IRQ assigned */
int ded; /* Double-bit detectable error IRQ assigned */
} irqs;
};
/*
* Various status data gathered and manipulated when checking and
* reporting ECC status.
*/
struct ppc4xx_ecc_status {
u32 ecces;
u32 besr;
u32 bearh;
u32 bearl;
u32 wmirq;
};
/* Global Variables */
/*
* Device tree node type and compatible tuples this driver can match
* on.
*/
static const struct of_device_id ppc4xx_edac_match[] = {
{
.compatible = "ibm,sdram-4xx-ddr2"
},
{ }
};
MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
/*
* TODO: The row and channel parameters likely need to be dynamically
* set based on the aforementioned variant controller realizations.
*/
static const unsigned ppc4xx_edac_nr_csrows = 2;
static const unsigned ppc4xx_edac_nr_chans = 1;
/*
* Strings associated with PLB master IDs capable of being posted in
* SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
*/
static const char * const ppc4xx_plb_masters[9] = {
[SDRAM_PLB_M0ID_ICU] = "ICU",
[SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0",
[SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1",
[SDRAM_PLB_M0ID_DMA] = "DMA",
[SDRAM_PLB_M0ID_DCU] = "DCU",
[SDRAM_PLB_M0ID_OPB] = "OPB",
[SDRAM_PLB_M0ID_MAL] = "MAL",
[SDRAM_PLB_M0ID_SEC] = "SEC",
[SDRAM_PLB_M0ID_AHB] = "AHB"
};
/**
* mfsdram - read and return controller register data
* @dcr_host: A pointer to the DCR mapping.
* @idcr_n: The indirect DCR register to read.
*
* This routine reads and returns the data associated with the
* controller's specified indirect DCR register.
*
* Returns the read data.
*/
static inline u32
mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
{
return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
dcr_host->base + SDRAM_DCR_DATA_OFFSET,
idcr_n);
}
/**
* mtsdram - write controller register data
* @dcr_host: A pointer to the DCR mapping.
* @idcr_n: The indirect DCR register to write.
* @value: The data to write.
*
* This routine writes the provided data to the controller's specified
* indirect DCR register.
*/
static inline void
mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
{
return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
dcr_host->base + SDRAM_DCR_DATA_OFFSET,
idcr_n,
value);
}
/**
* ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
* @status: A pointer to the ECC status structure to check for an
* ECC bank error.
* @bank: The bank to check for an ECC error.
*
* This routine determines whether the specified bank has an ECC
* error.
*
* Returns true if the specified bank has an ECC error; otherwise,
* false.
*/
static bool
ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
unsigned int bank)
{
switch (bank) {
case 0:
return status->ecces & SDRAM_ECCES_BK0ER;
case 1:
return status->ecces & SDRAM_ECCES_BK1ER;
default:
return false;
}
}
/**
* ppc4xx_edac_generate_bank_message - generate interpretted bank status message
* @mci: A pointer to the EDAC memory controller instance associated
* with the bank message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the portion of the
* driver-unique report message associated with the ECCESS[BKNER]
* field of the specified ECC status.
*
* Returns the number of characters generated on success; otherwise, <
* 0 on error.
*/
static int
ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
int n, total = 0;
unsigned int row, rows;
n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
if (ppc4xx_edac_check_bank_error(status, row)) {
n = snprintf(buffer, size, "%s%u",
(rows++ ? ", " : ""), row);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
}
}
n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
fail:
return total;
}
/**
* ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
* @mci: A pointer to the EDAC memory controller instance associated
* with the checkbit message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the portion of the
* driver-unique report message associated with the ECCESS[CKBER]
* field of the specified ECC status.
*
* Returns the number of characters generated on success; otherwise, <
* 0 on error.
*/
static int
ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
const char *ckber = NULL;
switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
case SDRAM_ECCES_CKBER_NONE:
ckber = "None";
break;
case SDRAM_ECCES_CKBER_32_ECC_0_3:
ckber = "ECC0:3";
break;
case SDRAM_ECCES_CKBER_32_ECC_4_8:
switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
SDRAM_MCOPT1_WDTH_MASK) {
case SDRAM_MCOPT1_WDTH_16:
ckber = "ECC0:3";
break;
case SDRAM_MCOPT1_WDTH_32:
ckber = "ECC4:8";
break;
default:
ckber = "Unknown";
break;
}
break;
case SDRAM_ECCES_CKBER_32_ECC_0_8:
ckber = "ECC0:8";
break;
default:
ckber = "Unknown";
break;
}
return snprintf(buffer, size, "Checkbit Error: %s", ckber);
}
/**
* ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
* @mci: A pointer to the EDAC memory controller instance associated
* with the byte lane message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the portion of the
* driver-unique report message associated with the ECCESS[BNCE]
* field of the specified ECC status.
*
* Returns the number of characters generated on success; otherwise, <
* 0 on error.
*/
static int
ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
int n, total = 0;
unsigned int lane, lanes;
const unsigned int first_lane = 0;
const unsigned int lane_count = 16;
n = snprintf(buffer, size, "; Byte Lane Errors: ");
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
n = snprintf(buffer, size,
"%s%u",
(lanes++ ? ", " : ""), lane);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
}
}
n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
fail:
return total;
}
/**
* ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
* @mci: A pointer to the EDAC memory controller instance associated
* with the ECCES message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the portion of the
* driver-unique report message associated with the ECCESS register of
* the specified ECC status.
*
* Returns the number of characters generated on success; otherwise, <
* 0 on error.
*/
static int
ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
int n, total = 0;
n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
if (n < 0 || n >= size)
goto fail;
buffer += n;
size -= n;
total += n;
fail:
return total;
}
/**
* ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
* @mci: A pointer to the EDAC memory controller instance associated
* with the PLB message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the portion of the
* driver-unique report message associated with the PLB-related BESR
* and/or WMIRQ registers of the specified ECC status.
*
* Returns the number of characters generated on success; otherwise, <
* 0 on error.
*/
static int
ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
unsigned int master;
bool read;
if ((status->besr & SDRAM_BESR_MASK) == 0)
return 0;
if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
return 0;
read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
master = SDRAM_BESR_M0ID_DECODE(status->besr);
return snprintf(buffer, size,
"%s error w/ PLB master %u \"%s\"; ",
(read ? "Read" : "Write"),
master,
(((master >= SDRAM_PLB_M0ID_FIRST) &&
(master <= SDRAM_PLB_M0ID_LAST)) ?
ppc4xx_plb_masters[master] : "UNKNOWN"));
}
/**
* ppc4xx_edac_generate_message - generate interpretted status message
* @mci: A pointer to the EDAC memory controller instance associated
* with the driver-unique message being generated.
* @status: A pointer to the ECC status structure to generate the
* message from.
* @buffer: A pointer to the buffer in which to generate the
* message.
* @size: The size, in bytes, of space available in buffer.
*
* This routine generates to the provided buffer the driver-unique
* EDAC report message from the specified ECC status.
*/
static void
ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status,
char *buffer,
size_t size)
{
int n;
if (buffer == NULL || size == 0)
return;
n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
if (n < 0 || n >= size)
return;
buffer += n;
size -= n;
ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
}
#ifdef DEBUG
/**
* ppc4xx_ecc_dump_status - dump controller ECC status registers
* @mci: A pointer to the EDAC memory controller instance
* associated with the status being dumped.
* @status: A pointer to the ECC status structure to generate the
* dump from.
*
* This routine dumps to the kernel log buffer the raw and
* interpretted specified ECC status.
*/
static void
ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status)
{
char message[PPC4XX_EDAC_MESSAGE_SIZE];
ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
ppc4xx_edac_mc_printk(KERN_INFO, mci,
"\n"
"\tECCES: 0x%08x\n"
"\tWMIRQ: 0x%08x\n"
"\tBESR: 0x%08x\n"
"\tBEAR: 0x%08x%08x\n"
"\t%s\n",
status->ecces,
status->wmirq,
status->besr,
status->bearh,
status->bearl,
message);
}
#endif /* DEBUG */
/**
* ppc4xx_ecc_get_status - get controller ECC status
* @mci: A pointer to the EDAC memory controller instance
* associated with the status being retrieved.
* @status: A pointer to the ECC status structure to populate the
* ECC status with.
*
* This routine reads and masks, as appropriate, all the relevant
* status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
* While we read all of them, for correctable errors, we only expect
* to deal with ECCES. For uncorrectable errors, we expect to deal
* with all of them.
*/
static void
ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
struct ppc4xx_ecc_status *status)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
const dcr_host_t *dcr_host = &pdata->dcr_host;
status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK;
status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
}
/**
* ppc4xx_ecc_clear_status - clear controller ECC status
* @mci: A pointer to the EDAC memory controller instance
* associated with the status being cleared.
* @status: A pointer to the ECC status structure containing the
* values to write to clear the ECC status.
*
* This routine clears--by writing the masked (as appropriate) status
* values back to--the status registers that deal with
* ibm,sdram-4xx-ddr2 ECC errors.
*/
static void
ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
const dcr_host_t *dcr_host = &pdata->dcr_host;
mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK);
mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK);
mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK);
mtsdram(dcr_host, SDRAM_BEARL, 0);
mtsdram(dcr_host, SDRAM_BEARH, 0);
}
/**
* ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
* @mci: A pointer to the EDAC memory controller instance
* associated with the correctable error being handled and reported.
* @status: A pointer to the ECC status structure associated with
* the correctable error being handled and reported.
*
* This routine handles an ibm,sdram-4xx-ddr2 controller ECC
* correctable error. Per the aforementioned discussion, there's not
* enough status available to use the full EDAC correctable error
* interface, so we just pass driver-unique message to the "no info"
* interface.
*/
static void
ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status)
{
int row;
char message[PPC4XX_EDAC_MESSAGE_SIZE];
ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, 0,
row, 0, -1,
message, "");
}
/**
* ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
* @mci: A pointer to the EDAC memory controller instance
* associated with the uncorrectable error being handled and
* reported.
* @status: A pointer to the ECC status structure associated with
* the uncorrectable error being handled and reported.
*
* This routine handles an ibm,sdram-4xx-ddr2 controller ECC
* uncorrectable error.
*/
static void
ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
const struct ppc4xx_ecc_status *status)
{
const u64 bear = ((u64)status->bearh << 32 | status->bearl);
const unsigned long page = bear >> PAGE_SHIFT;
const unsigned long offset = bear & ~PAGE_MASK;
int row;
char message[PPC4XX_EDAC_MESSAGE_SIZE];
ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offset, 0,
row, 0, -1,
message, "");
}
/**
* ppc4xx_edac_check - check controller for ECC errors
* @mci: A pointer to the EDAC memory controller instance
* associated with the ibm,sdram-4xx-ddr2 controller being
* checked.
*
* This routine is used to check and post ECC errors and is called by
* both the EDAC polling thread and this driver's CE and UE interrupt
* handler.
*/
static void
ppc4xx_edac_check(struct mem_ctl_info *mci)
{
#ifdef DEBUG
static unsigned int count;
#endif
struct ppc4xx_ecc_status status;
ppc4xx_ecc_get_status(mci, &status);
#ifdef DEBUG
if (count++ % 30 == 0)
ppc4xx_ecc_dump_status(mci, &status);
#endif
if (status.ecces & SDRAM_ECCES_UE)
ppc4xx_edac_handle_ue(mci, &status);
if (status.ecces & SDRAM_ECCES_CE)
ppc4xx_edac_handle_ce(mci, &status);
ppc4xx_ecc_clear_status(mci, &status);
}
/**
* ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
* @irq: The virtual interrupt number being serviced.
* @dev_id: A pointer to the EDAC memory controller instance
* associated with the interrupt being handled.
*
* This routine implements the interrupt handler for both correctable
* (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
* controller. It simply calls through to the same routine used during
* polling to check, report and clear the ECC status.
*
* Unconditionally returns IRQ_HANDLED.
*/
static irqreturn_t
ppc4xx_edac_isr(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
ppc4xx_edac_check(mci);
return IRQ_HANDLED;
}
/**
* ppc4xx_edac_get_dtype - return the controller memory width
* @mcopt1: The 32-bit Memory Controller Option 1 register value
* currently set for the controller, from which the width
* is derived.
*
* This routine returns the EDAC device type width appropriate for the
* current controller configuration.
*
* TODO: This needs to be conditioned dynamically through feature
* flags or some such when other controller variants are supported as
* the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
* 16- and 64-bit field definition/value/enumeration (b1) overloaded
* among them.
*
* Returns a device type width enumeration.
*/
static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1)
{
switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
case SDRAM_MCOPT1_WDTH_16:
return DEV_X2;
case SDRAM_MCOPT1_WDTH_32:
return DEV_X4;
default:
return DEV_UNKNOWN;
}
}
/**
* ppc4xx_edac_get_mtype - return controller memory type
* @mcopt1: The 32-bit Memory Controller Option 1 register value
* currently set for the controller, from which the memory type
* is derived.
*
* This routine returns the EDAC memory type appropriate for the
* current controller configuration.
*
* Returns a memory type enumeration.
*/
static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1)
{
bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
case SDRAM_MCOPT1_DDR2_TYPE:
return rden ? MEM_RDDR2 : MEM_DDR2;
case SDRAM_MCOPT1_DDR1_TYPE:
return rden ? MEM_RDDR : MEM_DDR;
default:
return MEM_UNKNOWN;
}
}
/**
* ppc4xx_edac_init_csrows - initialize driver instance rows
* @mci: A pointer to the EDAC memory controller instance
* associated with the ibm,sdram-4xx-ddr2 controller for which
* the csrows (i.e. banks/ranks) are being initialized.
* @mcopt1: The 32-bit Memory Controller Option 1 register value
* currently set for the controller, from which bank width
* and memory typ information is derived.
*
* This routine initializes the virtual "chip select rows" associated
* with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
* controller bank/rank is mapped to a row.
*
* Returns 0 if OK; otherwise, -EINVAL if the memory bank size
* configuration cannot be determined.
*/
static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
int status = 0;
enum mem_type mtype;
enum dev_type dtype;
enum edac_type edac_mode;
int row, j;
u32 mbxcf, size, nr_pages;
/* Establish the memory type and width */
mtype = ppc4xx_edac_get_mtype(mcopt1);
dtype = ppc4xx_edac_get_dtype(mcopt1);
/* Establish EDAC mode */
if (mci->edac_cap & EDAC_FLAG_SECDED)
edac_mode = EDAC_SECDED;
else if (mci->edac_cap & EDAC_FLAG_EC)
edac_mode = EDAC_EC;
else
edac_mode = EDAC_NONE;
/*
* Initialize each chip select row structure which correspond
* 1:1 with a controller bank/rank.
*/
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
* row/bank/rank and skip disabled banks.
*/
mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
continue;
/* Map the bank configuration size setting to pages. */
size = mbxcf & SDRAM_MBCF_SZ_MASK;
switch (size) {
case SDRAM_MBCF_SZ_4MB:
case SDRAM_MBCF_SZ_8MB:
case SDRAM_MBCF_SZ_16MB:
case SDRAM_MBCF_SZ_32MB:
case SDRAM_MBCF_SZ_64MB:
case SDRAM_MBCF_SZ_128MB:
case SDRAM_MBCF_SZ_256MB:
case SDRAM_MBCF_SZ_512MB:
case SDRAM_MBCF_SZ_1GB:
case SDRAM_MBCF_SZ_2GB:
case SDRAM_MBCF_SZ_4GB:
case SDRAM_MBCF_SZ_8GB:
nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
break;
default:
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Unrecognized memory bank %d "
"size 0x%08x\n",
row, SDRAM_MBCF_SZ_DECODE(size));
status = -EINVAL;
goto done;
}
/*
* It's unclear exactly what grain should be set to
* here. The SDRAM_ECCES register allows resolution of
* an error down to a nibble which would potentially
* argue for a grain of '1' byte, even though we only
* know the associated address for uncorrectable
* errors. This value is not used at present for
* anything other than error reporting so getting it
* wrong should be of little consequence. Other
* possible values would be the PLB width (16), the
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
for (j = 0; j < csi->nr_channels; j++) {
struct dimm_info *dimm = csi->channels[j]->dimm;
dimm->nr_pages = nr_pages / csi->nr_channels;
dimm->grain = 1;
dimm->mtype = mtype;
dimm->dtype = dtype;
dimm->edac_mode = edac_mode;
}
}
done:
return status;
}
/**
* ppc4xx_edac_mc_init - initialize driver instance
* @mci: A pointer to the EDAC memory controller instance being
* initialized.
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller this EDAC instance is bound to.
* @dcr_host: A pointer to the DCR data containing the DCR mapping
* for this controller instance.
* @mcopt1: The 32-bit Memory Controller Option 1 register value
* currently set for the controller, from which ECC capabilities
* and scrub mode are derived.
*
* This routine performs initialization of the EDAC memory controller
* instance and related driver-private data associated with the
* ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
*
* Returns 0 if OK; otherwise, < 0 on error.
*/
static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
struct platform_device *op,
const dcr_host_t *dcr_host, u32 mcopt1)
{
int status = 0;
const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
struct ppc4xx_edac_pdata *pdata = NULL;
const struct device_node *np = op->dev.of_node;
if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
return -EINVAL;
/* Initial driver pointers and private data */
mci->pdev = &op->dev;
dev_set_drvdata(mci->pdev, mci);
pdata = mci->pvt_info;
pdata->dcr_host = *dcr_host;
/* Initialize controller capabilities and configuration */
mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR |
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
mci->edac_ctl_cap = (EDAC_FLAG_NONE |
EDAC_FLAG_EC |
EDAC_FLAG_SECDED);
mci->scrub_cap = SCRUB_NONE;
mci->scrub_mode = SCRUB_NONE;
/*
* Update the actual capabilites based on the MCOPT1[MCHK]
* settings. Scrubbing is only useful if reporting is enabled.
*/
switch (memcheck) {
case SDRAM_MCOPT1_MCHK_CHK:
mci->edac_cap = EDAC_FLAG_EC;
break;
case SDRAM_MCOPT1_MCHK_CHK_REP:
mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
mci->scrub_mode = SCRUB_SW_SRC;
break;
default:
mci->edac_cap = EDAC_FLAG_NONE;
break;
}
/* Initialize strings */
mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
mci->ctl_name = ppc4xx_edac_match->compatible;
mci->dev_name = np->full_name;
/* Initialize callbacks */
mci->edac_check = ppc4xx_edac_check;
mci->ctl_page_to_phys = NULL;
/* Initialize chip select rows */
status = ppc4xx_edac_init_csrows(mci, mcopt1);
if (status)
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Failed to initialize rows!\n");
return status;
}
/**
* ppc4xx_edac_register_irq - setup and register controller interrupts
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller this EDAC instance is bound to.
* @mci: A pointer to the EDAC memory controller instance
* associated with the ibm,sdram-4xx-ddr2 controller for which
* interrupts are being registered.
*
* This routine parses the correctable (CE) and uncorrectable error (UE)
* interrupts from the device tree node and maps and assigns them to
* the associated EDAC memory controller instance.
*
* Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
* mapped and assigned.
*/
static int ppc4xx_edac_register_irq(struct platform_device *op,
struct mem_ctl_info *mci)
{
int status = 0;
int ded_irq, sec_irq;
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
struct device_node *np = op->dev.of_node;
ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
if (!ded_irq || !sec_irq) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Unable to map interrupts.\n");
status = -ENODEV;
goto fail;
}
status = request_irq(ded_irq,
ppc4xx_edac_isr,
0,
"[EDAC] MC ECCDED",
mci);
if (status < 0) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Unable to request irq %d for ECC DED",
ded_irq);
status = -ENODEV;
goto fail1;
}
status = request_irq(sec_irq,
ppc4xx_edac_isr,
0,
"[EDAC] MC ECCSEC",
mci);
if (status < 0) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Unable to request irq %d for ECC SEC",
sec_irq);
status = -ENODEV;
goto fail2;
}
ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
pdata->irqs.ded = ded_irq;
pdata->irqs.sec = sec_irq;
return 0;
fail2:
free_irq(sec_irq, mci);
fail1:
free_irq(ded_irq, mci);
fail:
return status;
}
/**
* ppc4xx_edac_map_dcrs - locate and map controller registers
* @np: A pointer to the device tree node containing the DCR
* resources to map.
* @dcr_host: A pointer to the DCR data to populate with the
* DCR mapping.
*
* This routine attempts to locate in the device tree and map the DCR
* register resources associated with the controller's indirect DCR
* address and data windows.
*
* Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
* error.
*/
static int ppc4xx_edac_map_dcrs(const struct device_node *np,
dcr_host_t *dcr_host)
{
unsigned int dcr_base, dcr_len;
if (np == NULL || dcr_host == NULL)
return -EINVAL;
/* Get the DCR resource extent and sanity check the values. */
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (dcr_base == 0 || dcr_len == 0) {
ppc4xx_edac_printk(KERN_ERR,
"Failed to obtain DCR property.\n");
return -ENODEV;
}
if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
ppc4xx_edac_printk(KERN_ERR,
"Unexpected DCR length %d, expected %d.\n",
dcr_len, SDRAM_DCR_RESOURCE_LEN);
return -ENODEV;
}
/* Attempt to map the DCR extent. */
*dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(*dcr_host)) {
ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
return -ENODEV;
}
return 0;
}
/**
* ppc4xx_edac_probe - check controller and bind driver
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller being probed for driver binding.
*
* This routine probes a specific ibm,sdram-4xx-ddr2 controller
* instance for binding with the driver.
*
* Returns 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
static int ppc4xx_edac_probe(struct platform_device *op)
{
int status = 0;
u32 mcopt1, memcheck;
dcr_host_t dcr_host;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
static int ppc4xx_edac_instance;
/*
* At this point, we only support the controller realized on
* the AMCC PPC 405EX[r]. Reject anything else.
*/
if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
!of_device_is_compatible(np, "ibm,sdram-405exr")) {
ppc4xx_edac_printk(KERN_NOTICE,
"Only the PPC405EX[r] is supported.\n");
return -ENODEV;
}
/*
* Next, get the DCR property and attempt to map it so that we
* can probe the controller.
*/
status = ppc4xx_edac_map_dcrs(np, &dcr_host);
if (status)
return status;
/*
* First determine whether ECC is enabled at all. If not,
* there is no useful checking or monitoring that can be done
* for this controller.
*/
mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
ppc4xx_edac_printk(KERN_INFO, "%pOF: No ECC memory detected or "
"ECC is disabled.\n", np);
status = -ENODEV;
goto done;
}
/*
* At this point, we know ECC is enabled, allocate an EDAC
* controller instance and perform the appropriate
* initialization.
*/
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = ppc4xx_edac_nr_csrows;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = ppc4xx_edac_nr_chans;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%pOF: "
"Failed to allocate EDAC MC instance!\n",
np);
status = -ENOMEM;
goto done;
}
status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1);
if (status) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Failed to initialize instance!\n");
goto fail;
}
/*
* We have a valid, initialized EDAC instance bound to the
* controller. Attempt to register it with the EDAC subsystem
* and, if necessary, register interrupts.
*/
if (edac_mc_add_mc(mci)) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Failed to add instance!\n");
status = -ENODEV;
goto fail;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
status = ppc4xx_edac_register_irq(op, mci);
if (status)
goto fail1;
}
ppc4xx_edac_instance++;
return 0;
fail1:
edac_mc_del_mc(mci->pdev);
fail:
edac_mc_free(mci);
done:
return status;
}
/**
* ppc4xx_edac_remove - unbind driver from controller
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller this EDAC instance is to be unbound/removed
* from.
*
* This routine unbinds the EDAC memory controller instance associated
* with the specified ibm,sdram-4xx-ddr2 controller described by the
* OpenFirmware device tree node passed as a parameter.
*
* Unconditionally returns 0.
*/
static int
ppc4xx_edac_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
if (edac_op_state == EDAC_OPSTATE_INT) {
free_irq(pdata->irqs.sec, mci);
free_irq(pdata->irqs.ded, mci);
}
dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
return 0;
}
/**
* ppc4xx_edac_opstate_init - initialize EDAC reporting method
*
* This routine ensures that the EDAC memory controller reporting
* method is mapped to a sane value as the EDAC core defines the value
* to EDAC_OPSTATE_INVAL by default. We don't call the global
* opstate_init as that defaults to polling and we want interrupt as
* the default.
*/
static inline void __init
ppc4xx_edac_opstate_init(void)
{
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
((edac_op_state == EDAC_OPSTATE_POLL) ?
EDAC_OPSTATE_POLL_STR :
((edac_op_state == EDAC_OPSTATE_INT) ?
EDAC_OPSTATE_INT_STR :
EDAC_OPSTATE_UNKNOWN_STR)));
}
static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
.driver = {
.name = PPC4XX_EDAC_MODULE_NAME,
.of_match_table = ppc4xx_edac_match,
},
};
/**
* ppc4xx_edac_init - driver/module insertion entry point
*
* This routine is the driver/module insertion entry point. It
* initializes the EDAC memory controller reporting state and
* registers the driver as an OpenFirmware device tree platform
* driver.
*/
static int __init
ppc4xx_edac_init(void)
{
ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
ppc4xx_edac_opstate_init();
return platform_driver_register(&ppc4xx_edac_driver);
}
/**
* ppc4xx_edac_exit - driver/module removal entry point
*
* This routine is the driver/module removal entry point. It
* unregisters the driver as an OpenFirmware device tree platform
* driver.
*/
static void __exit
ppc4xx_edac_exit(void)
{
platform_driver_unregister(&ppc4xx_edac_driver);
}
module_init(ppc4xx_edac_init);
module_exit(ppc4xx_edac_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Grant Erickson <[email protected]>");
MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
"0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);
| linux-master | drivers/edac/ppc4xx_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
*
* Copyright (c) 2008 Wind River Systems, Inc.
*
* Authors: Cao Qingtao <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/edac.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include "edac_module.h"
#define CPC925_EDAC_REVISION " Ver: 1.0.0"
#define CPC925_EDAC_MOD_STR "cpc925_edac"
#define cpc925_printk(level, fmt, arg...) \
edac_printk(level, "CPC925", fmt, ##arg)
#define cpc925_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
/*
* CPC925 registers are of 32 bits with bit0 defined at the
* most significant bit and bit31 at that of least significant.
*/
#define CPC925_BITS_PER_REG 32
#define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
/*
* EDAC device names for the error detections of
* CPU Interface and Hypertransport Link.
*/
#define CPC925_CPU_ERR_DEV "cpu"
#define CPC925_HT_LINK_DEV "htlink"
/* Suppose DDR Refresh cycle is 15.6 microsecond */
#define CPC925_REF_FREQ 0xFA69
#define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
#define CPC925_NR_CSROWS 8
/*
* All registers and bits definitions are taken from
* "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
*/
/*
* CPU and Memory Controller Registers
*/
/************************************************************
* Processor Interface Exception Mask Register (APIMASK)
************************************************************/
#define REG_APIMASK_OFFSET 0x30070
enum apimask_bits {
APIMASK_DART = CPC925_BIT(0), /* DART Exception */
APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
/* BIT(7) Reserved */
APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
APIMASK_ADRS1),
ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
};
#define APIMASK_ADI(n) CPC925_BIT(((n)+1))
/************************************************************
* Processor Interface Exception Register (APIEXCP)
************************************************************/
#define REG_APIEXCP_OFFSET 0x30060
enum apiexcp_bits {
APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
/* BIT(7) Reserved */
APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
APIEXCP_ADRS1),
UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
};
/************************************************************
* Memory Bus Configuration Register (MBCR)
************************************************************/
#define REG_MBCR_OFFSET 0x2190
#define MBCR_64BITCFG_SHIFT 23
#define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
#define MBCR_64BITBUS_SHIFT 22
#define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
/************************************************************
* Memory Bank Mode Register (MBMR)
************************************************************/
#define REG_MBMR_OFFSET 0x21C0
#define MBMR_MODE_MAX_VALUE 0xF
#define MBMR_MODE_SHIFT 25
#define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
#define MBMR_BBA_SHIFT 24
#define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
/************************************************************
* Memory Bank Boundary Address Register (MBBAR)
************************************************************/
#define REG_MBBAR_OFFSET 0x21D0
#define MBBAR_BBA_MAX_VALUE 0xFF
#define MBBAR_BBA_SHIFT 24
#define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
/************************************************************
* Memory Scrub Control Register (MSCR)
************************************************************/
#define REG_MSCR_OFFSET 0x2400
#define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
#define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
#define MSCR_SI_SHIFT 16 /* si - bit8:15*/
#define MSCR_SI_MAX_VALUE 0xFF
#define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
/************************************************************
* Memory Scrub Range Start Register (MSRSR)
************************************************************/
#define REG_MSRSR_OFFSET 0x2410
/************************************************************
* Memory Scrub Range End Register (MSRER)
************************************************************/
#define REG_MSRER_OFFSET 0x2420
/************************************************************
* Memory Scrub Pattern Register (MSPR)
************************************************************/
#define REG_MSPR_OFFSET 0x2430
/************************************************************
* Memory Check Control Register (MCCR)
************************************************************/
#define REG_MCCR_OFFSET 0x2440
enum mccr_bits {
MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
};
/************************************************************
* Memory Check Range End Register (MCRER)
************************************************************/
#define REG_MCRER_OFFSET 0x2450
/************************************************************
* Memory Error Address Register (MEAR)
************************************************************/
#define REG_MEAR_OFFSET 0x2460
#define MEAR_BCNT_MAX_VALUE 0x3
#define MEAR_BCNT_SHIFT 30
#define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
#define MEAR_RANK_MAX_VALUE 0x7
#define MEAR_RANK_SHIFT 27
#define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
#define MEAR_COL_MAX_VALUE 0x7FF
#define MEAR_COL_SHIFT 16
#define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
#define MEAR_BANK_MAX_VALUE 0x3
#define MEAR_BANK_SHIFT 14
#define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
#define MEAR_ROW_MASK 0x00003FFF
/************************************************************
* Memory Error Syndrome Register (MESR)
************************************************************/
#define REG_MESR_OFFSET 0x2470
#define MESR_ECC_SYN_H_MASK 0xFF00
#define MESR_ECC_SYN_L_MASK 0x00FF
/************************************************************
* Memory Mode Control Register (MMCR)
************************************************************/
#define REG_MMCR_OFFSET 0x2500
enum mmcr_bits {
MMCR_REG_DIMM_MODE = CPC925_BIT(3),
};
/*
* HyperTransport Link Registers
*/
/************************************************************
* Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
************************************************************/
#define REG_ERRCTRL_OFFSET 0x70140
enum errctrl_bits { /* nonfatal interrupts for */
ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
};
/************************************************************
* Link Configuration and Link Control Register (LINKCTRL)
************************************************************/
#define REG_LINKCTRL_OFFSET 0x70110
enum linkctrl_bits {
LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
LINKCTRL_LINK_FAIL = CPC925_BIT(27),
HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
};
/************************************************************
* Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
************************************************************/
#define REG_LINKERR_OFFSET 0x70120
enum linkerr_bits {
LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
LINKERR_PROT_ERR),
};
/************************************************************
* Bridge Control Register (BRGCTRL)
************************************************************/
#define REG_BRGCTRL_OFFSET 0x70300
enum brgctrl_bits {
BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
};
/* Private structure for edac memory controller */
struct cpc925_mc_pdata {
void __iomem *vbase;
unsigned long total_mem;
const char *name;
int edac_idx;
};
/* Private structure for common edac device */
struct cpc925_dev_info {
void __iomem *vbase;
struct platform_device *pdev;
char *ctl_name;
int edac_idx;
struct edac_device_ctl_info *edac_dev;
void (*init)(struct cpc925_dev_info *dev_info);
void (*exit)(struct cpc925_dev_info *dev_info);
void (*check)(struct edac_device_ctl_info *edac_dev);
};
/* Get total memory size from Open Firmware DTB */
static void get_total_mem(struct cpc925_mc_pdata *pdata)
{
struct device_node *np = NULL;
const unsigned int *reg, *reg_end;
int len, sw, aw;
unsigned long start, size;
np = of_find_node_by_type(NULL, "memory");
if (!np)
return;
aw = of_n_addr_cells(np);
sw = of_n_size_cells(np);
reg = (const unsigned int *)of_get_property(np, "reg", &len);
reg_end = reg + len/4;
pdata->total_mem = 0;
do {
start = of_read_number(reg, aw);
reg += aw;
size = of_read_number(reg, sw);
reg += sw;
edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
pdata->total_mem += size;
} while (reg < reg_end);
of_node_put(np);
edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
}
static void cpc925_init_csrows(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
struct dimm_info *dimm;
enum dev_type dtype;
int index, j;
u32 mbmr, mbbar, bba, grain;
unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
for (index = 0; index < mci->nr_csrows; index++) {
mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
0x20 * index);
mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
0x20 + index);
bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
if (bba == 0)
continue; /* not populated */
csrow = mci->csrows[index];
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
nr_pages = row_size >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
switch (csrow->nr_channels) {
case 1: /* Single channel */
grain = 32; /* four-beat burst of 32 bytes */
break;
case 2: /* Dual channel */
default:
grain = 64; /* four-beat burst of 64 bytes */
break;
}
switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
case 6: /* 0110, no way to differentiate X8 VS X16 */
case 5: /* 0101 */
case 8: /* 1000 */
dtype = DEV_X16;
break;
case 7: /* 0111 */
case 9: /* 1001 */
dtype = DEV_X8;
break;
default:
dtype = DEV_UNKNOWN;
break;
}
for (j = 0; j < csrow->nr_channels; j++) {
dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->mtype = MEM_RDDR;
dimm->edac_mode = EDAC_SECDED;
dimm->grain = grain;
dimm->dtype = dtype;
}
}
}
/* Enable memory controller ECC detection */
static void cpc925_mc_init(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
u32 apimask;
u32 mccr;
/* Enable various ECC error exceptions */
apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
if ((apimask & ECC_MASK_ENABLE) == 0) {
apimask |= ECC_MASK_ENABLE;
__raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
}
/* Enable ECC detection */
mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
if ((mccr & MCCR_ECC_EN) == 0) {
mccr |= MCCR_ECC_EN;
__raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
}
}
/* Disable memory controller ECC detection */
static void cpc925_mc_exit(struct mem_ctl_info *mci)
{
/*
* WARNING:
* We are supposed to clear the ECC error detection bits,
* and it will be no problem to do so. However, once they
* are cleared here if we want to re-install CPC925 EDAC
* module later, setting them up in cpc925_mc_init() will
* trigger machine check exception.
* Also, it's ok to leave ECC error detection bits enabled,
* since they are reset to 1 by default or by boot loader.
*/
return;
}
/*
* Revert DDR column/row/bank addresses into page frame number and
* offset in page.
*
* Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
* physical address(PA) bits to column address(CA) bits mappings are:
* CA 0 1 2 3 4 5 6 7 8 9 10
* PA 59 58 57 56 55 54 53 52 51 50 49
*
* physical address(PA) bits to bank address(BA) bits mappings are:
* BA 0 1
* PA 43 44
*
* physical address(PA) bits to row address(RA) bits mappings are:
* RA 0 1 2 3 4 5 6 7 8 9 10 11 12
* PA 36 35 34 48 47 46 45 40 41 42 39 38 37
*/
static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
unsigned long *pfn, unsigned long *offset, int *csrow)
{
u32 bcnt, rank, col, bank, row;
u32 c;
unsigned long pa;
int i;
bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
row = mear & MEAR_ROW_MASK;
*csrow = rank;
#ifdef CONFIG_EDAC_DEBUG
if (mci->csrows[rank]->first_page == 0) {
cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
"non-populated csrow, broken hardware?\n");
return;
}
#endif
/* Revert csrow number */
pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
/* Revert column address */
col += bcnt;
for (i = 0; i < 11; i++) {
c = col & 0x1;
col >>= 1;
pa |= c << (14 - i);
}
/* Revert bank address */
pa |= bank << 19;
/* Revert row address, in 4 steps */
for (i = 0; i < 3; i++) {
c = row & 0x1;
row >>= 1;
pa |= c << (26 - i);
}
for (i = 0; i < 3; i++) {
c = row & 0x1;
row >>= 1;
pa |= c << (21 + i);
}
for (i = 0; i < 4; i++) {
c = row & 0x1;
row >>= 1;
pa |= c << (18 - i);
}
for (i = 0; i < 3; i++) {
c = row & 0x1;
row >>= 1;
pa |= c << (29 - i);
}
*offset = pa & (PAGE_SIZE - 1);
*pfn = pa >> PAGE_SHIFT;
edac_dbg(0, "ECC physical address 0x%lx\n", pa);
}
static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
{
if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
return 0;
if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
return 1;
cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
syndrome);
return 1;
}
/* Check memory controller registers for ECC errors */
static void cpc925_mc_check(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
u32 apiexcp;
u32 mear;
u32 mesr;
u16 syndrome;
unsigned long pfn = 0, offset = 0;
int csrow = 0, channel = 0;
/* APIEXCP is cleared when read */
apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
if ((apiexcp & ECC_EXCP_DETECTED) == 0)
return;
mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
/* Revert column/row addresses into page frame number, etc */
cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, offset, syndrome,
csrow, channel, -1,
mci->ctl_name, "");
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
pfn, offset, 0,
csrow, -1, -1,
mci->ctl_name, "");
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
__raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
apiexcp);
cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
__raw_readl(pdata->vbase + REG_MSCR_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
__raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
__raw_readl(pdata->vbase + REG_MSRER_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
__raw_readl(pdata->vbase + REG_MSPR_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
__raw_readl(pdata->vbase + REG_MCCR_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
__raw_readl(pdata->vbase + REG_MCRER_OFFSET));
cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
mesr);
cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
syndrome);
}
/******************** CPU err device********************************/
static u32 cpc925_cpu_mask_disabled(void)
{
struct device_node *cpunode;
static u32 mask = 0;
/* use cached value if available */
if (mask != 0)
return mask;
mask = APIMASK_ADI0 | APIMASK_ADI1;
for_each_of_cpu_node(cpunode) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || *reg > 2) {
cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
}
mask &= ~APIMASK_ADI(*reg);
}
if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
/* We assume that each CPU sits on it's own PI and that
* for present CPUs the reg property equals to the PI
* interface id */
cpc925_printk(KERN_WARNING,
"Assuming PI id is equal to CPU MPIC id!\n");
}
return mask;
}
/* Enable CPU Errors detection */
static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
{
u32 apimask;
u32 cpumask;
apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
cpumask = cpc925_cpu_mask_disabled();
if (apimask & cpumask) {
cpc925_printk(KERN_WARNING, "CPU(s) not present, "
"but enabled in APIMASK, disabling\n");
apimask &= ~cpumask;
}
if ((apimask & CPU_MASK_ENABLE) == 0)
apimask |= CPU_MASK_ENABLE;
__raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
}
/* Disable CPU Errors detection */
static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
{
/*
* WARNING:
* We are supposed to clear the CPU error detection bits,
* and it will be no problem to do so. However, once they
* are cleared here if we want to re-install CPC925 EDAC
* module later, setting them up in cpc925_cpu_init() will
* trigger machine check exception.
* Also, it's ok to leave CPU error detection bits enabled,
* since they are reset to 1 by default.
*/
return;
}
/* Check for CPU Errors */
static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
{
struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
u32 apiexcp;
u32 apimask;
/* APIEXCP is cleared when read */
apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
if ((apiexcp & CPU_EXCP_DETECTED) == 0)
return;
if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
return;
apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
"Processor Interface register dump:\n");
cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
/******************** HT Link err device****************************/
/* Enable HyperTransport Link Error detection */
static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
{
u32 ht_errctrl;
ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
ht_errctrl |= HT_ERRCTRL_ENABLE;
__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
}
}
/* Disable HyperTransport Link Error detection */
static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
{
u32 ht_errctrl;
ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
ht_errctrl &= ~HT_ERRCTRL_ENABLE;
__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
}
/* Check for HyperTransport Link errors */
static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
{
struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
if (!((brgctrl & BRGCTRL_DETSERR) ||
(linkctrl & HT_LINKCTRL_DETECTED) ||
(errctrl & HT_ERRCTRL_DETECTED) ||
(linkerr & HT_LINKERR_DETECTED)))
return;
cpc925_printk(KERN_INFO, "HT Link Fault\n"
"HT register dump:\n");
cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
brgctrl);
cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
linkctrl);
cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
errctrl);
cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
linkerr);
/* Clear by write 1 */
if (brgctrl & BRGCTRL_DETSERR)
__raw_writel(BRGCTRL_DETSERR,
dev_info->vbase + REG_BRGCTRL_OFFSET);
if (linkctrl & HT_LINKCTRL_DETECTED)
__raw_writel(HT_LINKCTRL_DETECTED,
dev_info->vbase + REG_LINKCTRL_OFFSET);
/* Initiate Secondary Bus Reset to clear the chain failure */
if (errctrl & ERRCTRL_CHN_FAL)
__raw_writel(BRGCTRL_SECBUSRESET,
dev_info->vbase + REG_BRGCTRL_OFFSET);
if (errctrl & ERRCTRL_RSP_ERR)
__raw_writel(ERRCTRL_RSP_ERR,
dev_info->vbase + REG_ERRCTRL_OFFSET);
if (linkerr & HT_LINKERR_DETECTED)
__raw_writel(HT_LINKERR_DETECTED,
dev_info->vbase + REG_LINKERR_OFFSET);
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
}
static struct cpc925_dev_info cpc925_devs[] = {
{
.ctl_name = CPC925_CPU_ERR_DEV,
.init = cpc925_cpu_init,
.exit = cpc925_cpu_exit,
.check = cpc925_cpu_check,
},
{
.ctl_name = CPC925_HT_LINK_DEV,
.init = cpc925_htlink_init,
.exit = cpc925_htlink_exit,
.check = cpc925_htlink_check,
},
{ }
};
/*
* Add CPU Err detection and HyperTransport Link Err detection
* as common "edac_device", they have no corresponding device
* nodes in the Open Firmware DTB and we have to add platform
* devices for them. Also, they will share the MMIO with that
* of memory controller.
*/
static void cpc925_add_edac_devices(void __iomem *vbase)
{
struct cpc925_dev_info *dev_info;
if (!vbase) {
cpc925_printk(KERN_ERR, "MMIO not established yet\n");
return;
}
for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
dev_info->vbase = vbase;
dev_info->pdev = platform_device_register_simple(
dev_info->ctl_name, 0, NULL, 0);
if (IS_ERR(dev_info->pdev)) {
cpc925_printk(KERN_ERR,
"Can't register platform device for %s\n",
dev_info->ctl_name);
continue;
}
/*
* Don't have to allocate private structure but
* make use of cpc925_devs[] instead.
*/
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name,
1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
cpc925_printk(KERN_ERR, "No memory for edac device\n");
goto err1;
}
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->pdev->dev;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = dev_info->check;
if (dev_info->init)
dev_info->init(dev_info);
if (edac_device_add_device(dev_info->edac_dev) > 0) {
cpc925_printk(KERN_ERR,
"Unable to add edac device for %s\n",
dev_info->ctl_name);
goto err2;
}
edac_dbg(0, "Successfully added edac device for %s\n",
dev_info->ctl_name);
continue;
err2:
if (dev_info->exit)
dev_info->exit(dev_info);
edac_device_free_ctl_info(dev_info->edac_dev);
err1:
platform_device_unregister(dev_info->pdev);
}
}
/*
* Delete the common "edac_device" for CPU Err Detection
* and HyperTransport Link Err Detection
*/
static void cpc925_del_edac_devices(void)
{
struct cpc925_dev_info *dev_info;
for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
if (dev_info->edac_dev) {
edac_device_del_device(dev_info->edac_dev->dev);
edac_device_free_ctl_info(dev_info->edac_dev);
platform_device_unregister(dev_info->pdev);
}
if (dev_info->exit)
dev_info->exit(dev_info);
edac_dbg(0, "Successfully deleted edac device for %s\n",
dev_info->ctl_name);
}
}
/* Convert current back-ground scrub rate into byte/sec bandwidth */
static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
int bw;
u32 mscr;
u8 si;
mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
(si == 0)) {
cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
bw = 0;
} else
bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
return bw;
}
/* Return 0 for single channel; 1 for dual channel */
static int cpc925_mc_get_channels(void __iomem *vbase)
{
int dual = 0;
u32 mbcr;
mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
/*
* Dual channel only when 128-bit wide physical bus
* and 128-bit configuration.
*/
if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
((mbcr & MBCR_64BITBUS_MASK) == 0))
dual = 1;
edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
return dual;
}
static int cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
void __iomem *vbase;
struct cpc925_mc_pdata *pdata;
struct resource *r;
int res = 0, nr_channels;
edac_dbg(0, "%s platform device found!\n", pdev->name);
if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
res = -ENOMEM;
goto out;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
cpc925_printk(KERN_ERR, "Unable to get resource\n");
res = -ENOENT;
goto err1;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdev->name)) {
cpc925_printk(KERN_ERR, "Unable to request mem region\n");
res = -EBUSY;
goto err1;
}
vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!vbase) {
cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
res = -ENOMEM;
goto err2;
}
nr_channels = cpc925_mc_get_channels(vbase) + 1;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = CPC925_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
sizeof(struct cpc925_mc_pdata));
if (!mci) {
cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
res = -ENOMEM;
goto err2;
}
pdata = mci->pvt_info;
pdata->vbase = vbase;
pdata->edac_idx = edac_mc_idx++;
pdata->name = pdev->name;
mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
mci->dev_name = dev_name(&pdev->dev);
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = CPC925_EDAC_MOD_STR;
mci->ctl_name = pdev->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check = cpc925_mc_check;
mci->ctl_page_to_phys = NULL;
mci->scrub_mode = SCRUB_SW_SRC;
mci->set_sdram_scrub_rate = NULL;
mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
cpc925_init_csrows(mci);
/* Setup memory controller registers */
cpc925_mc_init(mci);
if (edac_mc_add_mc(mci) > 0) {
cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
goto err3;
}
cpc925_add_edac_devices(vbase);
/* get this far and it's successful */
edac_dbg(0, "success\n");
res = 0;
goto out;
err3:
cpc925_mc_exit(mci);
edac_mc_free(mci);
err2:
devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
err1:
devres_release_group(&pdev->dev, cpc925_probe);
out:
return res;
}
static int cpc925_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
/*
* Delete common edac devices before edac mc, because
* the former share the MMIO of the latter.
*/
cpc925_del_edac_devices();
cpc925_mc_exit(mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver cpc925_edac_driver = {
.probe = cpc925_probe,
.remove = cpc925_remove,
.driver = {
.name = "cpc925_edac",
}
};
static int __init cpc925_edac_init(void)
{
int ret = 0;
printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
/* Only support POLL mode so far */
edac_op_state = EDAC_OPSTATE_POLL;
ret = platform_driver_register(&cpc925_edac_driver);
if (ret) {
printk(KERN_WARNING "Failed to register %s\n",
CPC925_EDAC_MOD_STR);
}
return ret;
}
static void __exit cpc925_edac_exit(void)
{
platform_driver_unregister(&cpc925_edac_driver);
}
module_init(cpc925_edac_init);
module_exit(cpc925_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <[email protected]>");
MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
| linux-master | drivers/edac/cpc925_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SiFive Platform EDAC Driver
*
* Copyright (C) 2018-2022 SiFive, Inc.
*
* This driver is partially based on octeon_edac-pc.c
*
*/
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
#include <soc/sifive/sifive_ccache.h>
#define DRVNAME "sifive_edac"
struct sifive_edac_priv {
struct notifier_block notifier;
struct edac_device_ctl_info *dci;
};
/*
* EDAC error callback
*
* @event: non-zero if unrecoverable.
*/
static
int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr)
{
const char *msg = (char *)ptr;
struct sifive_edac_priv *p;
p = container_of(this, struct sifive_edac_priv, notifier);
if (event == SIFIVE_CCACHE_ERR_TYPE_UE)
edac_device_handle_ue(p->dci, 0, 0, msg);
else if (event == SIFIVE_CCACHE_ERR_TYPE_CE)
edac_device_handle_ce(p->dci, 0, 0, msg);
return NOTIFY_OK;
}
static int ecc_register(struct platform_device *pdev)
{
struct sifive_edac_priv *p;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->notifier.notifier_call = ecc_err_event;
platform_set_drvdata(pdev, p);
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
1, 1, NULL, 0,
edac_device_alloc_index());
if (!p->dci)
return -ENOMEM;
p->dci->dev = &pdev->dev;
p->dci->mod_name = "Sifive ECC Manager";
p->dci->ctl_name = dev_name(&pdev->dev);
p->dci->dev_name = dev_name(&pdev->dev);
if (edac_device_add_device(p->dci)) {
dev_err(p->dci->dev, "failed to register with EDAC core\n");
goto err;
}
register_sifive_ccache_error_notifier(&p->notifier);
return 0;
err:
edac_device_free_ctl_info(p->dci);
return -ENXIO;
}
static int ecc_unregister(struct platform_device *pdev)
{
struct sifive_edac_priv *p = platform_get_drvdata(pdev);
unregister_sifive_ccache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->dci);
return 0;
}
static struct platform_device *sifive_pdev;
static int __init sifive_edac_init(void)
{
int ret;
sifive_pdev = platform_device_register_simple(DRVNAME, 0, NULL, 0);
if (IS_ERR(sifive_pdev))
return PTR_ERR(sifive_pdev);
ret = ecc_register(sifive_pdev);
if (ret)
platform_device_unregister(sifive_pdev);
return ret;
}
static void __exit sifive_edac_exit(void)
{
ecc_unregister(sifive_pdev);
platform_device_unregister(sifive_pdev);
}
module_init(sifive_edac_init);
module_exit(sifive_edac_exit);
MODULE_AUTHOR("SiFive Inc.");
MODULE_DESCRIPTION("SiFive platform EDAC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/sifive_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale Memory Controller kernel module
*
* Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
* ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
* split out from mpc85xx_edac EDAC driver.
*
* Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
*
* Author: Dave Jiang <[email protected]>
*
* 2006-2007 (c) MontaVista Software, Inc.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/edac.h>
#include <linux/smp.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include "edac_module.h"
#include "fsl_ddr_edac.h"
#define EDAC_MOD_STR "fsl_ddr_edac"
static int edac_mc_idx;
static u32 orig_ddr_err_disable;
static u32 orig_ddr_err_sbe;
static bool little_endian;
static inline u32 ddr_in32(void __iomem *addr)
{
return little_endian ? ioread32(addr) : ioread32be(addr);
}
static inline void ddr_out32(void __iomem *addr, u32 value)
{
if (little_endian)
iowrite32(value, addr);
else
iowrite32be(value, addr);
}
#ifdef CONFIG_EDAC_DEBUG
/************************ MC SYSFS parts ***********************************/
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
}
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
}
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
}
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
unsigned long val;
int rc;
if (isdigit(*data)) {
rc = kstrtoul(data, 0, &val);
if (rc)
return rc;
ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
return count;
}
return 0;
}
static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
unsigned long val;
int rc;
if (isdigit(*data)) {
rc = kstrtoul(data, 0, &val);
if (rc)
return rc;
ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
return count;
}
return 0;
}
static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
unsigned long val;
int rc;
if (isdigit(*data)) {
rc = kstrtoul(data, 0, &val);
if (rc)
return rc;
ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
return count;
}
return 0;
}
static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
#endif /* CONFIG_EDAC_DEBUG */
static struct attribute *fsl_ddr_dev_attrs[] = {
#ifdef CONFIG_EDAC_DEBUG
&dev_attr_inject_data_hi.attr,
&dev_attr_inject_data_lo.attr,
&dev_attr_inject_ctrl.attr,
#endif
NULL
};
ATTRIBUTE_GROUPS(fsl_ddr_dev);
/**************************** MC Err device ***************************/
/*
* Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
* MPC8572 User's Manual. Each line represents a syndrome bit column as a
* 64-bit value, but split into an upper and lower 32-bit chunk. The labels
* below correspond to Freescale's manuals.
*/
static unsigned int ecc_table[16] = {
/* MSB LSB */
/* [0:31] [32:63] */
0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
0x00ff00ff, 0x00fff0ff,
0x0f0f0f0f, 0x0f0fff00,
0x11113333, 0x7777000f,
0x22224444, 0x8888222f,
0x44448888, 0xffff4441,
0x8888ffff, 0x11118882,
0xffff1111, 0x22221114, /* Syndrome bit 0 */
};
/*
* Calculate the correct ECC value for a 64-bit value specified by high:low
*/
static u8 calculate_ecc(u32 high, u32 low)
{
u32 mask_low;
u32 mask_high;
int bit_cnt;
u8 ecc = 0;
int i;
int j;
for (i = 0; i < 8; i++) {
mask_high = ecc_table[i * 2];
mask_low = ecc_table[i * 2 + 1];
bit_cnt = 0;
for (j = 0; j < 32; j++) {
if ((mask_high >> j) & 1)
bit_cnt ^= (high >> j) & 1;
if ((mask_low >> j) & 1)
bit_cnt ^= (low >> j) & 1;
}
ecc |= bit_cnt << i;
}
return ecc;
}
/*
* Create the syndrome code which is generated if the data line specified by
* 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
* User's Manual and 9-61 in the MPC8572 User's Manual.
*/
static u8 syndrome_from_bit(unsigned int bit) {
int i;
u8 syndrome = 0;
/*
* Cycle through the upper or lower 32-bit portion of each value in
* ecc_table depending on if 'bit' is in the upper or lower half of
* 64-bit data.
*/
for (i = bit < 32; i < 16; i += 2)
syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
return syndrome;
}
/*
* Decode data and ecc syndrome to determine what went wrong
* Note: This can only decode single-bit errors
*/
static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
int *bad_data_bit, int *bad_ecc_bit)
{
int i;
u8 syndrome;
*bad_data_bit = -1;
*bad_ecc_bit = -1;
/*
* Calculate the ECC of the captured data and XOR it with the captured
* ECC to find an ECC syndrome value we can search for
*/
syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
/* Check if a data line is stuck... */
for (i = 0; i < 64; i++) {
if (syndrome == syndrome_from_bit(i)) {
*bad_data_bit = i;
return;
}
}
/* If data is correct, check ECC bits for errors... */
for (i = 0; i < 8; i++) {
if ((syndrome >> i) & 0x1) {
*bad_ecc_bit = i;
return;
}
}
}
#define make64(high, low) (((u64)(high) << 32) | (low))
static void fsl_mc_check(struct mem_ctl_info *mci)
{
struct fsl_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
u32 bus_width;
u32 err_detect;
u32 syndrome;
u64 err_addr;
u32 pfn;
int row_index;
u32 cap_high;
u32 cap_low;
int bad_data_bit;
int bad_ecc_bit;
err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
if (!err_detect)
return;
fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
err_detect);
/* no more processing if not ECC bit errors */
if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
return;
}
syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
/* Mask off appropriate bits of syndrome based on bus width */
bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
DSC_DBW_MASK) ? 32 : 64;
if (bus_width == 64)
syndrome &= 0xff;
else
syndrome &= 0xffff;
err_addr = make64(
ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
pfn = err_addr >> PAGE_SHIFT;
for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
csrow = mci->csrows[row_index];
if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
break;
}
cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
/*
* Analyze single-bit errors on 64-bit wide buses
* TODO: Add support for 32-bit wide buses
*/
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
sbe_ecc_decode(cap_high, cap_low, syndrome,
&bad_data_bit, &bad_ecc_bit);
if (bad_data_bit != -1)
fsl_mc_printk(mci, KERN_ERR,
"Faulty Data bit: %d\n", bad_data_bit);
if (bad_ecc_bit != -1)
fsl_mc_printk(mci, KERN_ERR,
"Faulty ECC bit: %d\n", bad_ecc_bit);
fsl_mc_printk(mci, KERN_ERR,
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
cap_high ^ (1 << (bad_data_bit - 32)),
cap_low ^ (1 << bad_data_bit),
syndrome ^ (1 << bad_ecc_bit));
}
fsl_mc_printk(mci, KERN_ERR,
"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
cap_high, cap_low, syndrome);
fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
/* we are out of range */
if (row_index == mci->nr_csrows)
fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, err_addr & ~PAGE_MASK, syndrome,
row_index, 0, -1,
mci->ctl_name, "");
if (err_detect & DDR_EDE_MBE)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
pfn, err_addr & ~PAGE_MASK, syndrome,
row_index, 0, -1,
mci->ctl_name, "");
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
}
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct fsl_mc_pdata *pdata = mci->pvt_info;
u32 err_detect;
err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
if (!err_detect)
return IRQ_NONE;
fsl_mc_check(mci);
return IRQ_HANDLED;
}
static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
{
struct fsl_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
struct dimm_info *dimm;
u32 sdram_ctl;
u32 sdtype;
enum mem_type mtype;
u32 cs_bnds;
int index;
sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
sdtype = sdram_ctl & DSC_SDTYPE_MASK;
if (sdram_ctl & DSC_RD_EN) {
switch (sdtype) {
case 0x02000000:
mtype = MEM_RDDR;
break;
case 0x03000000:
mtype = MEM_RDDR2;
break;
case 0x07000000:
mtype = MEM_RDDR3;
break;
case 0x05000000:
mtype = MEM_RDDR4;
break;
default:
mtype = MEM_UNKNOWN;
break;
}
} else {
switch (sdtype) {
case 0x02000000:
mtype = MEM_DDR;
break;
case 0x03000000:
mtype = MEM_DDR2;
break;
case 0x07000000:
mtype = MEM_DDR3;
break;
case 0x05000000:
mtype = MEM_DDR4;
break;
default:
mtype = MEM_UNKNOWN;
break;
}
}
for (index = 0; index < mci->nr_csrows; index++) {
u32 start;
u32 end;
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
(index * FSL_MC_CS_BNDS_OFS));
start = (cs_bnds & 0xffff0000) >> 16;
end = (cs_bnds & 0x0000ffff);
if (start == end)
continue; /* not populated */
start <<= (24 - PAGE_SHIFT);
end <<= (24 - PAGE_SHIFT);
end |= (1 << (24 - PAGE_SHIFT)) - 1;
csrow->first_page = start;
csrow->last_page = end;
dimm->nr_pages = end + 1 - start;
dimm->grain = 8;
dimm->mtype = mtype;
dimm->dtype = DEV_UNKNOWN;
if (sdram_ctl & DSC_X32_EN)
dimm->dtype = DEV_X32;
dimm->edac_mode = EDAC_SECDED;
}
}
int fsl_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct fsl_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
int res;
if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 4;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, fsl_mc_err_probe);
return -ENOMEM;
}
pdata = mci->pvt_info;
pdata->name = "fsl_mc_err";
mci->pdev = &op->dev;
pdata->edac_idx = edac_mc_idx++;
dev_set_drvdata(mci->pdev, mci);
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
/*
* Get the endianness of DDR controller registers.
* Default is big endian.
*/
little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
pr_err("%s: Unable to get resource for MC err regs\n",
__func__);
goto err;
}
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
pr_err("%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->mc_vbase) {
pr_err("%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;
goto err;
}
sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
if (!(sdram_ctl & DSC_ECC_EN)) {
/* no ECC */
pr_warn("%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
goto err;
}
edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check = fsl_mc_check;
mci->ctl_page_to_phys = NULL;
mci->scrub_mode = SCRUB_SW_SRC;
fsl_ddr_init_csrows(mci);
/* store the original error disable bits */
orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
/* clear all error bits */
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
if (res) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
DDR_EIE_MBEE | DDR_EIE_SBEE);
/* store the original error management threshold */
orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
FSL_MC_ERR_SBE) & 0xff0000;
/* set threshold to 1 error per interrupt */
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
/* register interrupts */
pdata->irq = platform_get_irq(op, 0);
res = devm_request_irq(&op->dev, pdata->irq,
fsl_mc_isr,
IRQF_SHARED,
"[EDAC] MC err", mci);
if (res < 0) {
pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
__func__, pdata->irq);
res = -ENODEV;
goto err2;
}
pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
pdata->irq);
}
devres_remove_group(&op->dev, fsl_mc_err_probe);
edac_dbg(3, "success\n");
pr_info(EDAC_MOD_STR " MC err registered\n");
return 0;
err2:
edac_mc_del_mc(&op->dev);
err:
devres_release_group(&op->dev, fsl_mc_err_probe);
edac_mc_free(mci);
return res;
}
int fsl_mc_err_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
}
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
orig_ddr_err_disable);
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
return 0;
}
| linux-master | drivers/edac/fsl_ddr_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
*
* This driver supports the memory controllers found on the Intel
* processor family Sandy Bridge.
*
* Copyright (c) 2011 by:
* Mauro Carvalho Chehab
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include "edac_module.h"
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
/*
* Alter this version for the module when modifications are made
*/
#define SBRIDGE_REVISION " Ver: 1.1.2 "
#define EDAC_MOD_STR "sb_edac"
/*
* Debug macros
*/
#define sbridge_printk(level, fmt, arg...) \
edac_printk(level, "sbridge", fmt, ##arg)
#define sbridge_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
/*
* Get a bit field at register value <v>, from bit <lo> to bit <hi>
*/
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL(hi, lo)) >> (lo))
/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
static const u32 sbridge_dram_rule[] = {
0x80, 0x88, 0x90, 0x98, 0xa0,
0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
};
static const u32 ibridge_dram_rule[] = {
0x60, 0x68, 0x70, 0x78, 0x80,
0x88, 0x90, 0x98, 0xa0, 0xa8,
0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
};
static const u32 knl_dram_rule[] = {
0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
0x100, 0x108, 0x110, 0x118, /* 20-23 */
};
#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
static char *show_dram_attr(u32 attr)
{
switch (attr) {
case 0:
return "DRAM";
case 1:
return "MMCFG";
case 2:
return "NXM";
default:
return "unknown";
}
}
static const u32 sbridge_interleave_list[] = {
0x84, 0x8c, 0x94, 0x9c, 0xa4,
0xac, 0xb4, 0xbc, 0xc4, 0xcc,
};
static const u32 ibridge_interleave_list[] = {
0x64, 0x6c, 0x74, 0x7c, 0x84,
0x8c, 0x94, 0x9c, 0xa4, 0xac,
0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
0xdc, 0xe4, 0xec, 0xf4, 0xfc,
};
static const u32 knl_interleave_list[] = {
0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
0x104, 0x10c, 0x114, 0x11c, /* 20-23 */
};
#define MAX_INTERLEAVE \
(max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \
max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \
ARRAY_SIZE(knl_interleave_list))))
struct interleave_pkg {
unsigned char start;
unsigned char end;
};
static const struct interleave_pkg sbridge_interleave_pkg[] = {
{ 0, 2 },
{ 3, 5 },
{ 8, 10 },
{ 11, 13 },
{ 16, 18 },
{ 19, 21 },
{ 24, 26 },
{ 27, 29 },
};
static const struct interleave_pkg ibridge_interleave_pkg[] = {
{ 0, 3 },
{ 4, 7 },
{ 8, 11 },
{ 12, 15 },
{ 16, 19 },
{ 20, 23 },
{ 24, 27 },
{ 28, 31 },
};
static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
int interleave)
{
return GET_BITFIELD(reg, table[interleave].start,
table[interleave].end);
}
/* Devices 12 Function 7 */
#define TOLM 0x80
#define TOHM 0x84
#define HASWELL_TOLM 0xd0
#define HASWELL_TOHM_0 0xd4
#define HASWELL_TOHM_1 0xd8
#define KNL_TOLM 0xd0
#define KNL_TOHM_0 0xd4
#define KNL_TOHM_1 0xd8
#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
/* Device 13 Function 6 */
#define SAD_TARGET 0xf0
#define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
#define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14)
#define SAD_CONTROL 0xf4
/* Device 14 function 0 */
static const u32 tad_dram_rule[] = {
0x40, 0x44, 0x48, 0x4c,
0x50, 0x54, 0x58, 0x5c,
0x60, 0x64, 0x68, 0x6c,
};
#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
#define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
#define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
#define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
#define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
#define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
#define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
#define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
/* Device 15, function 0 */
#define MCMTR 0x7c
#define KNL_MCMTR 0x624
#define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
#define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
#define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
/* Device 15, function 1 */
#define RASENABLES 0xac
#define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
/* Device 15, functions 2-5 */
static const int mtr_regs[] = {
0x80, 0x84, 0x88,
};
static const int knl_mtr_reg = 0xb60;
#define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
#define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
#define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
#define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
#define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
static const u32 tad_ch_nilv_offset[] = {
0x90, 0x94, 0x98, 0x9c,
0xa0, 0xa4, 0xa8, 0xac,
0xb0, 0xb4, 0xb8, 0xbc,
};
#define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
#define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
static const u32 rir_way_limit[] = {
0x108, 0x10c, 0x110, 0x114, 0x118,
};
#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
#define MAX_RIR_WAY 8
static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
};
#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
/* Device 16, functions 2-7 */
/*
* FIXME: Implement the error count reads directly
*/
#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
#if 0 /* Currently unused*/
static const u32 correrrcnt[] = {
0x104, 0x108, 0x10c, 0x110,
};
static const u32 correrrthrsld[] = {
0x11c, 0x120, 0x124, 0x128,
};
#endif
#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
/* Device 17, function 0 */
#define SB_RANK_CFG_A 0x0328
#define IB_RANK_CFG_A 0x0320
/*
* sbridge structs
*/
#define NUM_CHANNELS 6 /* Max channels per MC */
#define MAX_DIMMS 3 /* Max DIMMS per channel */
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
#define KNL_MAX_EDCS 8 /* Embedded DRAM controllers */
#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
enum type {
SANDY_BRIDGE,
IVY_BRIDGE,
HASWELL,
BROADWELL,
KNIGHTS_LANDING,
};
enum domain {
IMC0 = 0,
IMC1,
SOCK,
};
enum mirroring_mode {
NON_MIRRORING,
ADDR_RANGE_MIRRORING,
FULL_MIRRORING,
};
struct sbridge_pvt;
struct sbridge_info {
enum type type;
u32 mcmtr;
u32 rankcfgr;
u64 (*get_tolm)(struct sbridge_pvt *pvt);
u64 (*get_tohm)(struct sbridge_pvt *pvt);
u64 (*rir_limit)(u32 reg);
u64 (*sad_limit)(u32 reg);
u32 (*interleave_mode)(u32 reg);
u32 (*dram_attr)(u32 reg);
const u32 *dram_rule;
const u32 *interleave_list;
const struct interleave_pkg *interleave_pkg;
u8 max_sad;
u8 (*get_node_id)(struct sbridge_pvt *pvt);
u8 (*get_ha)(u8 bank);
enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
struct pci_dev *pci_vtd;
};
struct sbridge_channel {
u32 ranks;
u32 dimms;
struct dimm {
u32 rowbits;
u32 colbits;
u32 bank_xor_enable;
u32 amap_fine;
} dimm[MAX_DIMMS];
};
struct pci_id_descr {
int dev_id;
int optional;
enum domain dom;
};
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs_per_imc;
int n_devs_per_sock;
int n_imcs_per_sock;
enum type type;
};
struct sbridge_dev {
struct list_head list;
int seg;
u8 bus, mc;
u8 node_id, source_id;
struct pci_dev **pdev;
enum domain dom;
int n_devs;
int i_devs;
struct mem_ctl_info *mci;
};
struct knl_pvt {
struct pci_dev *pci_cha[KNL_MAX_CHAS];
struct pci_dev *pci_channel[KNL_MAX_CHANNELS];
struct pci_dev *pci_mc0;
struct pci_dev *pci_mc1;
struct pci_dev *pci_mc0_misc;
struct pci_dev *pci_mc1_misc;
struct pci_dev *pci_mc_info; /* tolm, tohm */
};
struct sbridge_pvt {
/* Devices per socket */
struct pci_dev *pci_ddrio;
struct pci_dev *pci_sad0, *pci_sad1;
struct pci_dev *pci_br0, *pci_br1;
/* Devices per memory controller */
struct pci_dev *pci_ha, *pci_ta, *pci_ras;
struct pci_dev *pci_tad[NUM_CHANNELS];
struct sbridge_dev *sbridge_dev;
struct sbridge_info info;
struct sbridge_channel channel[NUM_CHANNELS];
/* Memory type detection */
bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
bool is_chan_hash;
enum mirroring_mode mirror_mode;
/* Memory description */
u64 tolm, tohm;
struct knl_pvt knl;
};
#define PCI_DESCR(device_id, opt, domain) \
.dev_id = (device_id), \
.optional = opt, \
.dom = domain
static const struct pci_id_descr pci_dev_descr_sbridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
/* System Address Decoder */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
/* Broadcast Registers */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
};
#define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
.descr = A, \
.n_devs_per_imc = N, \
.n_devs_per_sock = ARRAY_SIZE(A), \
.n_imcs_per_sock = M, \
.type = T \
}
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
/* This changes depending if 1HA or 2HA:
* 1HA:
* 0x0eb8 (17.0) is DDRIO0
* 2HA:
* 0x0ebc (17.4) is DDRIO0
*/
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
/* pci ids */
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
/* Optional, mode 2HA */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
/* System Address Decoder */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
/* Broadcast Registers */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
/* Haswell support */
/* EN processor:
* - 1 IMC
* - 3 DDR3 channels, 2 DPC per channel
* EP processor:
* - 1 or 2 IMC
* - 4 DDR4 channels, 3 DPC per channel
* EP 4S processor:
* - 2 IMC
* - 4 DDR4 channels, 3 DPC per channel
* EX processor:
* - 2 IMC
* - each IMC interfaces with a SMI 2 channel
* - each SMI channel interfaces with a scalable memory buffer
* - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
*/
#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
#define HASWELL_HASYSDEFEATURE2 0x84
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
static const struct pci_id_descr pci_dev_descr_haswell[] = {
/* first item must be the HA */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
{0,} /* 0 terminated list. */
};
/* Knight's Landing Support */
/*
* KNL's memory channels are swizzled between memory controllers.
* MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
*/
#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a
/* SAD target - 1-29-1 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b
/* Caching / Home Agent */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c
/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
/*
* KNL differs from SB, IB, and Haswell in that it has multiple
* instances of the same device with the same device ID, so we handle that
* by creating as many copies in the table as we expect to find.
* (Like device ID must be grouped together.)
*/
static const struct pci_id_descr pci_dev_descr_knl[] = {
[0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
[2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
[8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
[9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
[10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
[11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
{0,}
};
/*
* Broadwell support
*
* DE processor:
* - 1 IMC
* - 2 DDR3 channels, 2 DPC per channel
* EP processor:
* - 1 or 2 IMC
* - 4 DDR4 channels, 3 DPC per channel
* EP 4S processor:
* - 2 IMC
* - 4 DDR4 channels, 3 DPC per channel
* EX processor:
* - 2 IMC
* - each IMC interfaces with a SMI 2 channel
* - each SMI channel interfaces with a scalable memory buffer
* - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
*/
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
static const struct pci_id_descr pci_dev_descr_broadwell[] = {
/* first item must be the HA */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
{0,} /* 0 terminated list. */
};
/****************************************************************************
Ancillary status routines
****************************************************************************/
static inline int numrank(enum type type, u32 mtr)
{
int ranks = (1 << RANK_CNT_BITS(mtr));
int max = 4;
if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
max = 8;
if (ranks > max) {
edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
return -EINVAL;
}
return ranks;
}
static inline int numrow(u32 mtr)
{
int rows = (RANK_WIDTH_BITS(mtr) + 12);
if (rows < 13 || rows > 18) {
edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
return 1 << rows;
}
static inline int numcol(u32 mtr)
{
int cols = (COL_WIDTH_BITS(mtr) + 10);
if (cols > 12) {
edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
return 1 << cols;
}
static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
int multi_bus,
struct sbridge_dev *prev)
{
struct sbridge_dev *sbridge_dev;
/*
* If we have devices scattered across several busses that pertain
* to the same memory controller, we'll lump them all together.
*/
if (multi_bus) {
return list_first_entry_or_null(&sbridge_edac_list,
struct sbridge_dev, list);
}
sbridge_dev = list_entry(prev ? prev->list.next
: sbridge_edac_list.next, struct sbridge_dev, list);
list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
(dom == SOCK || dom == sbridge_dev->dom))
return sbridge_dev;
}
return NULL;
}
static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
const struct pci_id_table *table)
{
struct sbridge_dev *sbridge_dev;
sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
if (!sbridge_dev)
return NULL;
sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
sizeof(*sbridge_dev->pdev),
GFP_KERNEL);
if (!sbridge_dev->pdev) {
kfree(sbridge_dev);
return NULL;
}
sbridge_dev->seg = seg;
sbridge_dev->bus = bus;
sbridge_dev->dom = dom;
sbridge_dev->n_devs = table->n_devs_per_imc;
list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
return sbridge_dev;
}
static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
{
list_del(&sbridge_dev->list);
kfree(sbridge_dev->pdev);
kfree(sbridge_dev);
}
static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
{
u32 reg;
/* Address range is 32:28 */
pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
return GET_TOLM(reg);
}
static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
return GET_TOHM(reg);
}
static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_br1, TOLM, ®);
return GET_TOLM(reg);
}
static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_br1, TOHM, ®);
return GET_TOHM(reg);
}
static u64 rir_limit(u32 reg)
{
return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
}
static u64 sad_limit(u32 reg)
{
return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
}
static u32 interleave_mode(u32 reg)
{
return GET_BITFIELD(reg, 1, 1);
}
static u32 dram_attr(u32 reg)
{
return GET_BITFIELD(reg, 2, 3);
}
static u64 knl_sad_limit(u32 reg)
{
return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
}
static u32 knl_interleave_mode(u32 reg)
{
return GET_BITFIELD(reg, 1, 2);
}
static const char * const knl_intlv_mode[] = {
"[8:6]", "[10:8]", "[14:12]", "[32:30]"
};
static const char *get_intlv_mode_str(u32 reg, enum type t)
{
if (t == KNIGHTS_LANDING)
return knl_intlv_mode[knl_interleave_mode(reg)];
else
return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
}
static u32 dram_attr_knl(u32 reg)
{
return GET_BITFIELD(reg, 3, 4);
}
static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
{
u32 reg;
enum mem_type mtype;
if (pvt->pci_ddrio) {
pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
®);
if (GET_BITFIELD(reg, 11, 11))
/* FIXME: Can also be LRDIMM */
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
} else
mtype = MEM_UNKNOWN;
return mtype;
}
static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
{
u32 reg;
bool registered = false;
enum mem_type mtype = MEM_UNKNOWN;
if (!pvt->pci_ddrio)
goto out;
pci_read_config_dword(pvt->pci_ddrio,
HASWELL_DDRCRCLKCONTROLS, ®);
/* Is_Rdimm */
if (GET_BITFIELD(reg, 16, 16))
registered = true;
pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
if (GET_BITFIELD(reg, 14, 14)) {
if (registered)
mtype = MEM_RDDR4;
else
mtype = MEM_DDR4;
} else {
if (registered)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
}
out:
return mtype;
}
static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
{
/* for KNL value is fixed */
return DEV_X16;
}
static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{
/* there's no way to figure out */
return DEV_UNKNOWN;
}
static enum dev_type __ibridge_get_width(u32 mtr)
{
enum dev_type type = DEV_UNKNOWN;
switch (mtr) {
case 2:
type = DEV_X16;
break;
case 1:
type = DEV_X8;
break;
case 0:
type = DEV_X4;
break;
}
return type;
}
static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{
/*
* ddr3_width on the documentation but also valid for DDR4 on
* Haswell
*/
return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
}
static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
{
/* ddr3_width on the documentation but also valid for DDR4 */
return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
}
static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
{
/* DDR4 RDIMMS and LRDIMMS are supported */
return MEM_RDDR4;
}
static u8 get_node_id(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
return GET_BITFIELD(reg, 0, 2);
}
static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
return GET_BITFIELD(reg, 0, 3);
}
static u8 knl_get_node_id(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
return GET_BITFIELD(reg, 0, 2);
}
/*
* Use the reporting bank number to determine which memory
* controller (also known as "ha" for "home agent"). Sandy
* Bridge only has one memory controller per socket, so the
* answer is always zero.
*/
static u8 sbridge_get_ha(u8 bank)
{
return 0;
}
/*
* On Ivy Bridge, Haswell and Broadwell the error may be in a
* home agent bank (7, 8), or one of the per-channel memory
* controller banks (9 .. 16).
*/
static u8 ibridge_get_ha(u8 bank)
{
switch (bank) {
case 7 ... 8:
return bank - 7;
case 9 ... 16:
return (bank - 9) / 4;
default:
return 0xff;
}
}
/* Not used, but included for safety/symmetry */
static u8 knl_get_ha(u8 bank)
{
return 0xff;
}
static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®);
return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
}
static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
{
u64 rc;
u32 reg;
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
rc = GET_BITFIELD(reg, 26, 31);
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
rc = ((reg << 6) | rc) << 26;
return rc | 0x3ffffff;
}
static u64 knl_get_tolm(struct sbridge_pvt *pvt)
{
u32 reg;
pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®);
return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
}
static u64 knl_get_tohm(struct sbridge_pvt *pvt)
{
u64 rc;
u32 reg_lo, reg_hi;
pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo);
pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi);
rc = ((u64)reg_hi << 32) | reg_lo;
return rc | 0x3ffffff;
}
static u64 haswell_rir_limit(u32 reg)
{
return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
}
static inline u8 sad_pkg_socket(u8 pkg)
{
/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
return ((pkg >> 3) << 2) | (pkg & 0x3);
}
static inline u8 sad_pkg_ha(u8 pkg)
{
return (pkg >> 2) & 0x1;
}
static int haswell_chan_hash(int idx, u64 addr)
{
int i;
/*
* XOR even bits from 12:26 to bit0 of idx,
* odd bits from 13:27 to bit1
*/
for (i = 12; i < 28; i += 2)
idx ^= (addr >> i) & 3;
return idx;
}
/* Low bits of TAD limit, and some metadata. */
static const u32 knl_tad_dram_limit_lo[] = {
0x400, 0x500, 0x600, 0x700,
0x800, 0x900, 0xa00, 0xb00,
};
/* Low bits of TAD offset. */
static const u32 knl_tad_dram_offset_lo[] = {
0x404, 0x504, 0x604, 0x704,
0x804, 0x904, 0xa04, 0xb04,
};
/* High 16 bits of TAD limit and offset. */
static const u32 knl_tad_dram_hi[] = {
0x408, 0x508, 0x608, 0x708,
0x808, 0x908, 0xa08, 0xb08,
};
/* Number of ways a tad entry is interleaved. */
static const u32 knl_tad_ways[] = {
8, 6, 4, 3, 2, 1,
};
/*
* Retrieve the n'th Target Address Decode table entry
* from the memory controller's TAD table.
*
* @pvt: driver private data
* @entry: which entry you want to retrieve
* @mc: which memory controller (0 or 1)
* @offset: output tad range offset
* @limit: output address of first byte above tad range
* @ways: output number of interleave ways
*
* The offset value has curious semantics. It's a sort of running total
* of the sizes of all the memory regions that aren't mapped in this
* tad table.
*/
static int knl_get_tad(const struct sbridge_pvt *pvt,
const int entry,
const int mc,
u64 *offset,
u64 *limit,
int *ways)
{
u32 reg_limit_lo, reg_offset_lo, reg_hi;
struct pci_dev *pci_mc;
int way_id;
switch (mc) {
case 0:
pci_mc = pvt->knl.pci_mc0;
break;
case 1:
pci_mc = pvt->knl.pci_mc1;
break;
default:
WARN_ON(1);
return -EINVAL;
}
pci_read_config_dword(pci_mc,
knl_tad_dram_limit_lo[entry], ®_limit_lo);
pci_read_config_dword(pci_mc,
knl_tad_dram_offset_lo[entry], ®_offset_lo);
pci_read_config_dword(pci_mc,
knl_tad_dram_hi[entry], ®_hi);
/* Is this TAD entry enabled? */
if (!GET_BITFIELD(reg_limit_lo, 0, 0))
return -ENODEV;
way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
if (way_id < ARRAY_SIZE(knl_tad_ways)) {
*ways = knl_tad_ways[way_id];
} else {
*ways = 0;
sbridge_printk(KERN_ERR,
"Unexpected value %d in mc_tad_limit_lo wayness field\n",
way_id);
return -ENODEV;
}
/*
* The least significant 6 bits of base and limit are truncated.
* For limit, we fill the missing bits with 1s.
*/
*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
*limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
return 0;
}
/* Determine which memory controller is responsible for a given channel. */
static int knl_channel_mc(int channel)
{
WARN_ON(channel < 0 || channel >= 6);
return channel < 3 ? 1 : 0;
}
/*
* Get the Nth entry from EDC_ROUTE_TABLE register.
* (This is the per-tile mapping of logical interleave targets to
* physical EDC modules.)
*
* entry 0: 0:2
* 1: 3:5
* 2: 6:8
* 3: 9:11
* 4: 12:14
* 5: 15:17
* 6: 18:20
* 7: 21:23
* reserved: 24:31
*/
static u32 knl_get_edc_route(int entry, u32 reg)
{
WARN_ON(entry >= KNL_MAX_EDCS);
return GET_BITFIELD(reg, entry*3, (entry*3)+2);
}
/*
* Get the Nth entry from MC_ROUTE_TABLE register.
* (This is the per-tile mapping of logical interleave targets to
* physical DRAM channels modules.)
*
* entry 0: mc 0:2 channel 18:19
* 1: mc 3:5 channel 20:21
* 2: mc 6:8 channel 22:23
* 3: mc 9:11 channel 24:25
* 4: mc 12:14 channel 26:27
* 5: mc 15:17 channel 28:29
* reserved: 30:31
*
* Though we have 3 bits to identify the MC, we should only see
* the values 0 or 1.
*/
static u32 knl_get_mc_route(int entry, u32 reg)
{
int mc, chan;
WARN_ON(entry >= KNL_MAX_CHANNELS);
mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
return knl_channel_remap(mc, chan);
}
/*
* Render the EDC_ROUTE register in human-readable form.
* Output string s should be at least KNL_MAX_EDCS*2 bytes.
*/
static void knl_show_edc_route(u32 reg, char *s)
{
int i;
for (i = 0; i < KNL_MAX_EDCS; i++) {
s[i*2] = knl_get_edc_route(i, reg) + '0';
s[i*2+1] = '-';
}
s[KNL_MAX_EDCS*2 - 1] = '\0';
}
/*
* Render the MC_ROUTE register in human-readable form.
* Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
*/
static void knl_show_mc_route(u32 reg, char *s)
{
int i;
for (i = 0; i < KNL_MAX_CHANNELS; i++) {
s[i*2] = knl_get_mc_route(i, reg) + '0';
s[i*2+1] = '-';
}
s[KNL_MAX_CHANNELS*2 - 1] = '\0';
}
#define KNL_EDC_ROUTE 0xb8
#define KNL_MC_ROUTE 0xb4
/* Is this dram rule backed by regular DRAM in flat mode? */
#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
/* Is this dram rule cached? */
#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
/* Is this rule backed by edc ? */
#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
/* Is this rule backed by DRAM, cacheable in EDRAM? */
#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
/* Is this rule mod3? */
#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
/*
* Figure out how big our RAM modules are.
*
* The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
* have to figure this out from the SAD rules, interleave lists, route tables,
* and TAD rules.
*
* SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
* inspect the TAD rules to figure out how large the SAD regions really are.
*
* When we know the real size of a SAD region and how many ways it's
* interleaved, we know the individual contribution of each channel to
* TAD is size/ways.
*
* Finally, we have to check whether each channel participates in each SAD
* region.
*
* Fortunately, KNL only supports one DIMM per channel, so once we know how
* much memory the channel uses, we know the DIMM is at least that large.
* (The BIOS might possibly choose not to map all available memory, in which
* case we will underreport the size of the DIMM.)
*
* In theory, we could try to determine the EDC sizes as well, but that would
* only work in flat mode, not in cache mode.
*
* @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
* elements)
*/
static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
{
u64 sad_base, sad_limit = 0;
u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
int sad_rule = 0;
int tad_rule = 0;
int intrlv_ways, tad_ways;
u32 first_pkg, pkg;
int i;
u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
u32 dram_rule, interleave_reg;
u32 mc_route_reg[KNL_MAX_CHAS];
u32 edc_route_reg[KNL_MAX_CHAS];
int edram_only;
char edc_route_string[KNL_MAX_EDCS*2];
char mc_route_string[KNL_MAX_CHANNELS*2];
int cur_reg_start;
int mc;
int channel;
int participants[KNL_MAX_CHANNELS];
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
/* Read the EDC route table in each CHA. */
cur_reg_start = 0;
for (i = 0; i < KNL_MAX_CHAS; i++) {
pci_read_config_dword(pvt->knl.pci_cha[i],
KNL_EDC_ROUTE, &edc_route_reg[i]);
if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
knl_show_edc_route(edc_route_reg[i-1],
edc_route_string);
if (cur_reg_start == i-1)
edac_dbg(0, "edc route table for CHA %d: %s\n",
cur_reg_start, edc_route_string);
else
edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, edc_route_string);
cur_reg_start = i;
}
}
knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
if (cur_reg_start == i-1)
edac_dbg(0, "edc route table for CHA %d: %s\n",
cur_reg_start, edc_route_string);
else
edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, edc_route_string);
/* Read the MC route table in each CHA. */
cur_reg_start = 0;
for (i = 0; i < KNL_MAX_CHAS; i++) {
pci_read_config_dword(pvt->knl.pci_cha[i],
KNL_MC_ROUTE, &mc_route_reg[i]);
if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
if (cur_reg_start == i-1)
edac_dbg(0, "mc route table for CHA %d: %s\n",
cur_reg_start, mc_route_string);
else
edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, mc_route_string);
cur_reg_start = i;
}
}
knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
if (cur_reg_start == i-1)
edac_dbg(0, "mc route table for CHA %d: %s\n",
cur_reg_start, mc_route_string);
else
edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, mc_route_string);
/* Process DRAM rules */
for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
/* previous limit becomes the new base */
sad_base = sad_limit;
pci_read_config_dword(pvt->pci_sad0,
pvt->info.dram_rule[sad_rule], &dram_rule);
if (!DRAM_RULE_ENABLE(dram_rule))
break;
edram_only = KNL_EDRAM_ONLY(dram_rule);
sad_limit = pvt->info.sad_limit(dram_rule)+1;
pci_read_config_dword(pvt->pci_sad0,
pvt->info.interleave_list[sad_rule], &interleave_reg);
/*
* Find out how many ways this dram rule is interleaved.
* We stop when we see the first channel again.
*/
first_pkg = sad_pkg(pvt->info.interleave_pkg,
interleave_reg, 0);
for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
pkg = sad_pkg(pvt->info.interleave_pkg,
interleave_reg, intrlv_ways);
if ((pkg & 0x8) == 0) {
/*
* 0 bit means memory is non-local,
* which KNL doesn't support
*/
edac_dbg(0, "Unexpected interleave target %d\n",
pkg);
return -1;
}
if (pkg == first_pkg)
break;
}
if (KNL_MOD3(dram_rule))
intrlv_ways *= 3;
edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
sad_rule,
sad_base,
sad_limit,
intrlv_ways,
edram_only ? ", EDRAM" : "");
/*
* Find out how big the SAD region really is by iterating
* over TAD tables (SAD regions may contain holes).
* Each memory controller might have a different TAD table, so
* we have to look at both.
*
* Livespace is the memory that's mapped in this TAD table,
* deadspace is the holes (this could be the MMIO hole, or it
* could be memory that's mapped by the other TAD table but
* not this one).
*/
for (mc = 0; mc < 2; mc++) {
sad_actual_size[mc] = 0;
tad_livespace = 0;
for (tad_rule = 0;
tad_rule < ARRAY_SIZE(
knl_tad_dram_limit_lo);
tad_rule++) {
if (knl_get_tad(pvt,
tad_rule,
mc,
&tad_deadspace,
&tad_limit,
&tad_ways))
break;
tad_size = (tad_limit+1) -
(tad_livespace + tad_deadspace);
tad_livespace += tad_size;
tad_base = (tad_limit+1) - tad_size;
if (tad_base < sad_base) {
if (tad_limit > sad_base)
edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
} else if (tad_base < sad_limit) {
if (tad_limit+1 > sad_limit) {
edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
} else {
/* TAD region is completely inside SAD region */
edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
tad_rule, tad_base,
tad_limit, tad_size,
mc);
sad_actual_size[mc] += tad_size;
}
}
}
}
for (mc = 0; mc < 2; mc++) {
edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
mc, sad_actual_size[mc], sad_actual_size[mc]);
}
/* Ignore EDRAM rule */
if (edram_only)
continue;
/* Figure out which channels participate in interleave. */
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
participants[channel] = 0;
/* For each channel, does at least one CHA have
* this channel mapped to the given target?
*/
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
int target;
int cha;
for (target = 0; target < KNL_MAX_CHANNELS; target++) {
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
participants[channel] = 1;
break;
}
}
}
}
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel);
if (participants[channel]) {
edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
channel,
sad_actual_size[mc]/intrlv_ways,
sad_rule);
mc_sizes[channel] +=
sad_actual_size[mc]/intrlv_ways;
}
}
}
return 0;
}
static void get_source_id(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
u32 reg;
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
pvt->info.type == KNIGHTS_LANDING)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
else
pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
if (pvt->info.type == KNIGHTS_LANDING)
pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
else
pvt->sbridge_dev->source_id = SOURCE_ID(reg);
}
static int __populate_dimms(struct mem_ctl_info *mci,
u64 knl_mc_sizes[KNL_MAX_CHANNELS],
enum edac_type mode)
{
struct sbridge_pvt *pvt = mci->pvt_info;
int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
: NUM_CHANNELS;
unsigned int i, j, banks, ranks, rows, cols, npages;
struct dimm_info *dimm;
enum mem_type mtype;
u64 size;
mtype = pvt->info.get_memory_type(pvt);
if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
edac_dbg(0, "Memory is registered\n");
else if (mtype == MEM_UNKNOWN)
edac_dbg(0, "Cannot determine memory type\n");
else
edac_dbg(0, "Memory is unregistered\n");
if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
banks = 16;
else
banks = 8;
for (i = 0; i < channels; i++) {
u32 mtr, amap = 0;
int max_dimms_per_channel;
if (pvt->info.type == KNIGHTS_LANDING) {
max_dimms_per_channel = 1;
if (!pvt->knl.pci_channel[i])
continue;
} else {
max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
if (!pvt->pci_tad[i])
continue;
pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
}
for (j = 0; j < max_dimms_per_channel; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
if (pvt->info.type == KNIGHTS_LANDING) {
pci_read_config_dword(pvt->knl.pci_channel[i],
knl_mtr_reg, &mtr);
} else {
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
}
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
if (IS_DIMM_PRESENT(mtr)) {
if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
pvt->sbridge_dev->source_id,
pvt->sbridge_dev->dom, i);
return -ENODEV;
}
pvt->channel[i].dimms++;
ranks = numrank(pvt->info.type, mtr);
if (pvt->info.type == KNIGHTS_LANDING) {
/* For DDR4, this is fixed. */
cols = 1 << 10;
rows = knl_mc_sizes[i] /
((u64) cols * ranks * banks * 8);
} else {
rows = numrow(mtr);
cols = numcol(mtr);
}
size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
size, npages,
banks, ranks, rows, cols);
dimm->nr_pages = npages;
dimm->grain = 32;
dimm->dtype = pvt->info.get_width(pvt, mtr);
dimm->mtype = mtype;
dimm->edac_mode = mode;
pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
pvt->channel[i].dimm[j].colbits = order_base_2(cols);
pvt->channel[i].dimm[j].bank_xor_enable =
GET_BITFIELD(pvt->info.mcmtr, 9, 9);
pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
}
}
}
return 0;
}
static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
enum edac_type mode;
u32 reg;
pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
pvt->sbridge_dev->mc,
pvt->sbridge_dev->node_id,
pvt->sbridge_dev->source_id);
/* KNL doesn't support mirroring or lockstep,
* and is always closed page
*/
if (pvt->info.type == KNIGHTS_LANDING) {
mode = EDAC_S4ECD4ED;
pvt->mirror_mode = NON_MIRRORING;
pvt->is_cur_addr_mirrored = false;
if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
return -1;
if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
edac_dbg(0, "Failed to read KNL_MCMTR register\n");
return -ENODEV;
}
} else {
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®)) {
edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
return -ENODEV;
}
pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
if (GET_BITFIELD(reg, 28, 28)) {
pvt->mirror_mode = ADDR_RANGE_MIRRORING;
edac_dbg(0, "Address range partial memory mirroring is enabled\n");
goto next;
}
}
if (pci_read_config_dword(pvt->pci_ras, RASENABLES, ®)) {
edac_dbg(0, "Failed to read RASENABLES register\n");
return -ENODEV;
}
if (IS_MIRROR_ENABLED(reg)) {
pvt->mirror_mode = FULL_MIRRORING;
edac_dbg(0, "Full memory mirroring is enabled\n");
} else {
pvt->mirror_mode = NON_MIRRORING;
edac_dbg(0, "Memory mirroring is disabled\n");
}
next:
if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
edac_dbg(0, "Failed to read MCMTR register\n");
return -ENODEV;
}
if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
edac_dbg(0, "Lockstep is enabled\n");
mode = EDAC_S8ECD8ED;
pvt->is_lockstep = true;
} else {
edac_dbg(0, "Lockstep is disabled\n");
mode = EDAC_S4ECD4ED;
pvt->is_lockstep = false;
}
if (IS_CLOSE_PG(pvt->info.mcmtr)) {
edac_dbg(0, "address map is on closed page mode\n");
pvt->is_close_pg = true;
} else {
edac_dbg(0, "address map is on open page mode\n");
pvt->is_close_pg = false;
}
}
return __populate_dimms(mci, knl_mc_sizes, mode);
}
static void get_memory_layout(const struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
int i, j, k, n_sads, n_tads, sad_interl;
u32 reg;
u64 limit, prv = 0;
u64 tmp_mb;
u32 gb, mb;
u32 rir_way;
/*
* Step 1) Get TOLM/TOHM ranges
*/
pvt->tolm = pvt->info.get_tolm(pvt);
tmp_mb = (1 + pvt->tolm) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
gb, (mb*1000)/1024, (u64)pvt->tolm);
/* Address range is already 45:25 */
pvt->tohm = pvt->info.get_tohm(pvt);
tmp_mb = (1 + pvt->tohm) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
gb, (mb*1000)/1024, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
* TAD registers contain the interleave wayness. However, it
* seems simpler to just discover it indirectly, with the
* algorithm bellow.
*/
prv = 0;
for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
/* SAD_LIMIT Address range is 45:26 */
pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
®);
limit = pvt->info.sad_limit(reg);
if (!DRAM_RULE_ENABLE(reg))
continue;
if (limit <= prv)
break;
tmp_mb = (limit + 1) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
n_sads,
show_dram_attr(pvt->info.dram_attr(reg)),
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
get_intlv_mode_str(reg, pvt->info.type),
reg);
prv = limit;
pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
®);
sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
for (j = 0; j < 8; j++) {
u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
if (j > 0 && sad_interl == pkg)
break;
edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
n_sads, j, pkg);
}
}
if (pvt->info.type == KNIGHTS_LANDING)
return;
/*
* Step 3) Get TAD range
*/
prv = 0;
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®);
limit = TAD_LIMIT(reg);
if (limit <= prv)
break;
tmp_mb = (limit + 1) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)(1 << TAD_SOCK(reg)),
(u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
(u32)TAD_TGT3(reg),
reg);
prv = limit;
}
/*
* Step 4) Get TAD offsets, per each channel
*/
for (i = 0; i < NUM_CHANNELS; i++) {
if (!pvt->channel[i].dimms)
continue;
for (j = 0; j < n_tads; j++) {
pci_read_config_dword(pvt->pci_tad[i],
tad_ch_nilv_offset[j],
®);
tmp_mb = TAD_OFFSET(reg) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
i, j,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
reg);
}
}
/*
* Step 6) Get RIR Wayness/Limit, per each channel
*/
for (i = 0; i < NUM_CHANNELS; i++) {
if (!pvt->channel[i].dimms)
continue;
for (j = 0; j < MAX_RIR_RANGES; j++) {
pci_read_config_dword(pvt->pci_tad[i],
rir_way_limit[j],
®);
if (!IS_RIR_VALID(reg))
continue;
tmp_mb = pvt->info.rir_limit(reg) >> 20;
rir_way = 1 << RIR_WAY(reg);
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
i, j,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
rir_way,
reg);
for (k = 0; k < rir_way; k++) {
pci_read_config_dword(pvt->pci_tad[i],
rir_offset[j][k],
®);
tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)RIR_RNK_TGT(pvt->info.type, reg),
reg);
}
}
}
}
static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
{
struct sbridge_dev *sbridge_dev;
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
return sbridge_dev->mci;
}
return NULL;
}
static u8 sb_close_row[] = {
15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
};
static u8 sb_close_column[] = {
3, 4, 5, 14, 19, 23, 24, 25, 26, 27
};
static u8 sb_open_row[] = {
14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
};
static u8 sb_open_column[] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 12
};
static u8 sb_open_fine_column[] = {
3, 4, 5, 7, 8, 9, 10, 11, 12, 13
};
static int sb_bits(u64 addr, int nbits, u8 *bits)
{
int i, res = 0;
for (i = 0; i < nbits; i++)
res |= ((addr >> bits[i]) & 1) << i;
return res;
}
static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
{
int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
if (do_xor)
ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
return ret;
}
static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
u64 rank_addr, char *msg)
{
int dimmno = 0;
int row, col, bank_address, bank_group;
struct sbridge_pvt *pvt;
u32 bg0 = 0, rowbits = 0, colbits = 0;
u32 amap_fine = 0, bank_xor_enable = 0;
dimmno = (rank < 12) ? rank / 4 : 2;
pvt = mci->pvt_info;
amap_fine = pvt->channel[ch].dimm[dimmno].amap_fine;
bg0 = amap_fine ? 6 : 13;
rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
colbits = pvt->channel[ch].dimm[dimmno].colbits;
bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
if (pvt->is_lockstep) {
pr_warn_once("LockStep row/column decode is not supported yet!\n");
msg[0] = '\0';
return false;
}
if (pvt->is_close_pg) {
row = sb_bits(rank_addr, rowbits, sb_close_row);
col = sb_bits(rank_addr, colbits, sb_close_column);
col |= 0x400; /* C10 is autoprecharge, always set */
bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
} else {
row = sb_bits(rank_addr, rowbits, sb_open_row);
if (amap_fine)
col = sb_bits(rank_addr, colbits, sb_open_fine_column);
else
col = sb_bits(rank_addr, colbits, sb_open_column);
bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
}
row &= (1u << rowbits) - 1;
sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
row, col, bank_address, bank_group);
return true;
}
static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
u64 rank_addr, char *msg)
{
pr_warn_once("DDR3 row/column decode not support yet!\n");
msg[0] = '\0';
return false;
}
static int get_memory_error_data(struct mem_ctl_info *mci,
u64 addr,
u8 *socket, u8 *ha,
long *channel_mask,
u8 *rank,
char **area_type, char *msg)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pci_ha;
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode, shiftup = 0;
unsigned int sad_interleave[MAX_INTERLEAVE];
u32 reg, dram_rule;
u8 ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
u32 tad_offset;
u32 rir_way;
u32 mb, gb;
u64 ch_addr, offset, limit = 0, prv = 0;
u64 rank_addr;
enum mem_type mtype;
/*
* Step 0) Check if the address is at special memory ranges
* The check bellow is probably enough to fill all cases where
* the error is not inside a memory, except for the legacy
* range (e. g. VGA addresses). It is unlikely, however, that the
* memory controller would generate an error on that range.
*/
if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
return -EINVAL;
}
if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
return -EINVAL;
}
/*
* Step 1) Get socket
*/
for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
®);
if (!DRAM_RULE_ENABLE(reg))
continue;
limit = pvt->info.sad_limit(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket");
return -EINVAL;
}
if (addr <= limit)
break;
prv = limit;
}
if (n_sads == pvt->info.max_sad) {
sprintf(msg, "Can't discover the memory socket");
return -EINVAL;
}
dram_rule = reg;
*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
interleave_mode = pvt->info.interleave_mode(dram_rule);
pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
®);
if (pvt->info.type == SANDY_BRIDGE) {
sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
for (sad_way = 0; sad_way < 8; sad_way++) {
u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
if (sad_way > 0 && sad_interl == pkg)
break;
sad_interleave[sad_way] = pkg;
edac_dbg(0, "SAD interleave #%d: %d\n",
sad_way, sad_interleave[sad_way]);
}
edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
pvt->sbridge_dev->mc,
n_sads,
addr,
limit,
sad_way + 7,
!interleave_mode ? "" : "XOR[18:16]");
if (interleave_mode)
idx = ((addr >> 6) ^ (addr >> 16)) & 7;
else
idx = (addr >> 6) & 7;
switch (sad_way) {
case 1:
idx = 0;
break;
case 2:
idx = idx & 1;
break;
case 4:
idx = idx & 3;
break;
case 8:
break;
default:
sprintf(msg, "Can't discover socket interleave");
return -EINVAL;
}
*socket = sad_interleave[idx];
edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
idx, sad_way, *socket);
} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
int bits, a7mode = A7MODE(dram_rule);
if (a7mode) {
/* A7 mode swaps P9 with P6 */
bits = GET_BITFIELD(addr, 7, 8) << 1;
bits |= GET_BITFIELD(addr, 9, 9);
} else
bits = GET_BITFIELD(addr, 6, 8);
if (interleave_mode == 0) {
/* interleave mode will XOR {8,7,6} with {18,17,16} */
idx = GET_BITFIELD(addr, 16, 18);
idx ^= bits;
} else
idx = bits;
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
*socket = sad_pkg_socket(pkg);
sad_ha = sad_pkg_ha(pkg);
if (a7mode) {
/* MCChanShiftUpEnable */
pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
shiftup = GET_BITFIELD(reg, 22, 22);
}
edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
idx, *socket, sad_ha, shiftup);
} else {
/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
idx = (addr >> 6) & 7;
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
*socket = sad_pkg_socket(pkg);
sad_ha = sad_pkg_ha(pkg);
edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
idx, *socket, sad_ha);
}
*ha = sad_ha;
/*
* Move to the proper node structure, in order to access the
* right PCI registers
*/
new_mci = get_mci_for_node_id(*socket, sad_ha);
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
return -EINVAL;
}
mci = new_mci;
pvt = mci->pvt_info;
/*
* Step 2) Get memory channel
*/
prv = 0;
pci_ha = pvt->pci_ha;
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
return -EINVAL;
}
if (addr <= limit)
break;
prv = limit;
}
if (n_tads == MAX_TAD) {
sprintf(msg, "Can't discover the memory channel");
return -EINVAL;
}
ch_way = TAD_CH(reg) + 1;
sck_way = TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
else {
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
if (pvt->is_chan_hash)
idx = haswell_chan_hash(idx, addr);
}
idx = idx % ch_way;
/*
* FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
*/
switch (idx) {
case 0:
base_ch = TAD_TGT0(reg);
break;
case 1:
base_ch = TAD_TGT1(reg);
break;
case 2:
base_ch = TAD_TGT2(reg);
break;
case 3:
base_ch = TAD_TGT3(reg);
break;
default:
sprintf(msg, "Can't discover the TAD target");
return -EINVAL;
}
*channel_mask = 1 << base_ch;
pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
if (pvt->mirror_mode == FULL_MIRRORING ||
(pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
*channel_mask |= 1 << ((base_ch + 2) % 4);
switch(ch_way) {
case 2:
case 4:
sck_xch = (1 << sck_way) * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
return -EINVAL;
}
pvt->is_cur_addr_mirrored = true;
} else {
sck_xch = (1 << sck_way) * ch_way;
pvt->is_cur_addr_mirrored = false;
}
if (pvt->is_lockstep)
*channel_mask |= 1 << ((base_ch + 1) % 4);
offset = TAD_OFFSET(tad_offset);
edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
n_tads,
addr,
limit,
sck_way,
ch_way,
offset,
idx,
base_ch,
*channel_mask);
/* Calculate channel address */
/* Remove the TAD offset */
if (offset > addr) {
sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
offset, addr);
return -EINVAL;
}
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
ch_addr /= sck_xch;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
*/
for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®);
if (!IS_RIR_VALID(reg))
continue;
limit = pvt->info.rir_limit(reg);
gb = div_u64_rem(limit >> 20, 1024, &mb);
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
n_rir,
gb, (mb*1000)/1024,
limit,
1 << RIR_WAY(reg));
if (ch_addr <= limit)
break;
}
if (n_rir == MAX_RIR_RANGES) {
sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
ch_addr);
return -EINVAL;
}
rir_way = RIR_WAY(reg);
if (pvt->is_close_pg)
idx = (ch_addr >> 6);
else
idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
idx %= 1 << rir_way;
pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®);
*rank = RIR_RNK_TGT(pvt->info.type, reg);
if (pvt->info.type == BROADWELL) {
if (pvt->is_close_pg)
shiftup = 6;
else
shiftup = 13;
rank_addr = ch_addr >> shiftup;
rank_addr /= (1 << rir_way);
rank_addr <<= shiftup;
rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
rank_addr -= RIR_OFFSET(pvt->info.type, reg);
mtype = pvt->info.get_memory_type(pvt);
rankid = *rank;
if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
else
sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
} else {
msg[0] = '\0';
}
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
ch_addr,
limit,
rir_way,
idx);
return 0;
}
static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
const struct mce *m, u8 *socket,
u8 *ha, long *channel_mask,
char *msg)
{
u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt;
struct pci_dev *pci_ha;
bool tad0;
if (channel >= NUM_CHANNELS) {
sprintf(msg, "Invalid channel 0x%x", channel);
return -EINVAL;
}
pvt = mci->pvt_info;
if (!pvt->info.get_ha) {
sprintf(msg, "No get_ha()");
return -EINVAL;
}
*ha = pvt->info.get_ha(m->bank);
if (*ha != 0 && *ha != 1) {
sprintf(msg, "Impossible bank %d", m->bank);
return -EINVAL;
}
*socket = m->socketid;
new_mci = get_mci_for_node_id(*socket, *ha);
if (!new_mci) {
strcpy(msg, "mci socket got corrupted!");
return -EINVAL;
}
pvt = new_mci->pvt_info;
pci_ha = pvt->pci_ha;
pci_read_config_dword(pci_ha, tad_dram_rule[0], ®);
tad0 = m->addr <= TAD_LIMIT(reg);
*channel_mask = 1 << channel;
if (pvt->mirror_mode == FULL_MIRRORING ||
(pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
*channel_mask |= 1 << ((channel + 2) % 4);
pvt->is_cur_addr_mirrored = true;
} else {
pvt->is_cur_addr_mirrored = false;
}
if (pvt->is_lockstep)
*channel_mask |= 1 << ((channel + 1) % 4);
return 0;
}
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
/*
* sbridge_put_all_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
{
int i;
edac_dbg(0, "\n");
for (i = 0; i < sbridge_dev->n_devs; i++) {
struct pci_dev *pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
edac_dbg(0, "Removing dev %02x:%02x.%d\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
static void sbridge_put_all_devices(void)
{
struct sbridge_dev *sbridge_dev, *tmp;
list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
sbridge_put_devices(sbridge_dev);
free_sbridge_dev(sbridge_dev);
}
}
static int sbridge_get_onedevice(struct pci_dev **prev,
u8 *num_mc,
const struct pci_id_table *table,
const unsigned devno,
const int multi_bus)
{
struct sbridge_dev *sbridge_dev = NULL;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
int seg = 0;
u8 bus = 0;
int i = 0;
sbridge_printk(KERN_DEBUG,
"Seeking for: PCI ID %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
dev_descr->dev_id, *prev);
if (!pdev) {
if (*prev) {
*prev = pdev;
return 0;
}
if (dev_descr->optional)
return 0;
/* if the HA wasn't found */
if (devno == 0)
return -ENODEV;
sbridge_printk(KERN_INFO,
"Device not found: %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/* End of list, leave */
return -ENODEV;
}
seg = pci_domain_nr(pdev->bus);
bus = pdev->bus->number;
next_imc:
sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
multi_bus, sbridge_dev);
if (!sbridge_dev) {
/* If the HA1 wasn't found, don't create EDAC second memory controller */
if (dev_descr->dom == IMC1 && devno != 1) {
edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pci_dev_put(pdev);
return 0;
}
if (dev_descr->dom == SOCK)
goto out_imc;
sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
if (!sbridge_dev) {
pci_dev_put(pdev);
return -ENOMEM;
}
(*num_mc)++;
}
if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
sbridge_printk(KERN_ERR,
"Duplicated device for %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pci_dev_put(pdev);
return -ENODEV;
}
sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
/* pdev belongs to more than one IMC, do extra gets */
if (++i > 1)
pci_dev_get(pdev);
if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
goto next_imc;
out_imc:
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
sbridge_printk(KERN_ERR,
"Couldn't enable %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
return -ENODEV;
}
edac_dbg(0, "Detected %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
* @from is always decremented if it is not %NULL. So, as we need
* to get all devices up to null, we need to do a get for the device
*/
pci_dev_get(pdev);
*prev = pdev;
return 0;
}
/*
* sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
* devices we want to reference for this driver.
* @num_mc: pointer to the memory controllers count, to be incremented in case
* of success.
* @table: model specific table
*
* returns 0 in case of success or error code
*/
static int sbridge_get_all_devices(u8 *num_mc,
const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
int allow_dups = 0;
int multi_bus = 0;
if (table->type == KNIGHTS_LANDING)
allow_dups = multi_bus = 1;
while (table && table->descr) {
for (i = 0; i < table->n_devs_per_sock; i++) {
if (!allow_dups || i == 0 ||
table->descr[i].dev_id !=
table->descr[i-1].dev_id) {
pdev = NULL;
}
do {
rc = sbridge_get_onedevice(&pdev, num_mc,
table, i, multi_bus);
if (rc < 0) {
if (i == 0) {
i = table->n_devs_per_sock;
break;
}
sbridge_put_all_devices();
return -ENODEV;
}
} while (pdev && !allow_dups);
}
table++;
}
return 0;
}
/*
* Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
* the format: XXXa. So we can convert from a device to the corresponding
* channel like this
*/
#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
u8 saw_chan_mask = 0;
int i;
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
pvt->pci_sad0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
pvt->pci_br0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
{
int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
pvt->pci_ddrio = pdev;
break;
default:
goto error;
}
edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
pdev->vendor, pdev->device,
sbridge_dev->bus,
pdev);
}
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
!pvt->pci_ras || !pvt->pci_ta)
goto enodev;
if (saw_chan_mask != 0x0f)
goto enodev;
return 0;
enodev:
sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
return -ENODEV;
error:
sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
PCI_VENDOR_ID_INTEL, pdev->device);
return -EINVAL;
}
static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
u8 saw_chan_mask = 0;
int i;
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
{
int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
pvt->pci_ddrio = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
pvt->pci_ddrio = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
pvt->pci_sad0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
pvt->pci_br0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
pvt->pci_br1 = pdev;
break;
default:
goto error;
}
edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
sbridge_dev->bus,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev);
}
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
!pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
goto enodev;
if (saw_chan_mask != 0x0f && /* -EN/-EX */
saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
enodev:
sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
return -ENODEV;
error:
sbridge_printk(KERN_ERR,
"Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
pdev->device);
return -EINVAL;
}
static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
u8 saw_chan_mask = 0;
int i;
/* there's only one device per system; not tied to any bus */
if (pvt->info.pci_vtd == NULL)
/* result will be checked later */
pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
NULL);
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
pvt->pci_sad0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
{
int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
if (!pvt->pci_ddrio)
pvt->pci_ddrio = pdev;
break;
default:
break;
}
edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
sbridge_dev->bus,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev);
}
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
goto enodev;
if (saw_chan_mask != 0x0f && /* -EN/-EX */
saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
enodev:
sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
return -ENODEV;
}
static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
u8 saw_chan_mask = 0;
int i;
/* there's only one device per system; not tied to any bus */
if (pvt->info.pci_vtd == NULL)
/* result will be checked later */
pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
NULL);
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
pvt->pci_sad0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
{
int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
pvt->pci_ddrio = pdev;
break;
default:
break;
}
edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
sbridge_dev->bus,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev);
}
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
goto enodev;
if (saw_chan_mask != 0x0f && /* -EN/-EX */
saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
enodev:
sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
return -ENODEV;
}
static int knl_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
int dev, func;
int i;
int devidx;
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
/* Extract PCI device and function. */
dev = (pdev->devfn >> 3) & 0x1f;
func = pdev->devfn & 0x7;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
if (dev == 8)
pvt->knl.pci_mc0 = pdev;
else if (dev == 9)
pvt->knl.pci_mc1 = pdev;
else {
sbridge_printk(KERN_ERR,
"Memory controller in unexpected place! (dev %d, fn %d)\n",
dev, func);
continue;
}
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
pvt->pci_sad0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
/* There are one of these per tile, and range from
* 1.14.0 to 1.18.5.
*/
devidx = ((dev-14)*8)+func;
if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
sbridge_printk(KERN_ERR,
"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
dev, func);
continue;
}
WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
pvt->knl.pci_cha[devidx] = pdev;
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
devidx = -1;
/*
* MC0 channels 0-2 are device 9 function 2-4,
* MC1 channels 3-5 are device 8 function 2-4.
*/
if (dev == 9)
devidx = func-2;
else if (dev == 8)
devidx = 3 + (func-2);
if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
sbridge_printk(KERN_ERR,
"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
dev, func);
continue;
}
WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
pvt->knl.pci_channel[devidx] = pdev;
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
pvt->knl.pci_mc_info = pdev;
break;
case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
pvt->pci_ta = pdev;
break;
default:
sbridge_printk(KERN_ERR, "Unexpected device %d\n",
pdev->device);
break;
}
}
if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 ||
!pvt->pci_sad0 || !pvt->pci_sad1 ||
!pvt->pci_ta) {
goto enodev;
}
for (i = 0; i < KNL_MAX_CHANNELS; i++) {
if (!pvt->knl.pci_channel[i]) {
sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
goto enodev;
}
}
for (i = 0; i < KNL_MAX_CHAS; i++) {
if (!pvt->knl.pci_cha[i]) {
sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
goto enodev;
}
}
return 0;
enodev:
sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
return -ENODEV;
}
/****************************************************************************
Error check routines
****************************************************************************/
/*
* While Sandy Bridge has error count registers, SMI BIOS read values from
* and resets the counters. So, they are not reliable for the OS to read
* from them. So, we have no option but to just trust on whatever MCE is
* telling us about the errors.
*/
static void sbridge_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
char *optype, msg[256], msg_full[512];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
bool recoverable;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 channel = GET_BITFIELD(m->status, 0, 3);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
/*
* Bits 5-0 of MCi_MISC give the least significant bit that is valid.
* A value 6 is for cache line aligned address, a value 12 is for page
* aligned address reported by patrol scrubber.
*/
u32 lsb = GET_BITFIELD(m->misc, 0, 5);
long channel_mask, first_channel;
u8 rank = 0xff, socket, ha;
int rc, dimm;
char *area_type = "DRAM";
if (pvt->info.type != SANDY_BRIDGE)
recoverable = true;
else
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
tp_event = HW_EVENT_ERR_UNCORRECTED;
} else {
tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}
/*
* According with Table 15-9 of the Intel Architecture spec vol 3A,
* memory errors should fit in this mask:
* 000f 0000 1mmm cccc (binary)
* where:
* f = Correction Report Filtering Bit. If 1, subsequent errors
* won't be shown
* mmm = error type
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
switch (optypenum) {
case 0:
optype = "generic undef request error";
break;
case 1:
optype = "memory read error";
break;
case 2:
optype = "memory write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "memory scrubbing error";
break;
default:
optype = "reserved";
break;
}
if (pvt->info.type == KNIGHTS_LANDING) {
if (channel == 14) {
edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable)
? " recoverable" : "",
mscod, errcode,
m->bank);
} else {
char A = *("A");
/*
* Reported channel is in range 0-2, so we can't map it
* back to mc. To figure out mc we check machine check
* bank register that reported this error.
* bank15 means mc0 and bank16 means mc1.
*/
channel = knl_channel_remap(m->bank == 16, channel);
channel_mask = 1 << channel;
snprintf(msg, sizeof(msg),
"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable)
? " recoverable" : " ",
mscod, errcode, channel, A + channel);
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, 0, -1,
optype, msg);
}
return;
} else if (lsb < 12) {
rc = get_memory_error_data(mci, m->addr, &socket, &ha,
&channel_mask, &rank,
&area_type, msg);
} else {
rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
&channel_mask, msg);
}
if (rc < 0)
goto err_parsing;
new_mci = get_mci_for_node_id(socket, ha);
if (!new_mci) {
strcpy(msg, "Error: socket got corrupted!");
goto err_parsing;
}
mci = new_mci;
pvt = mci->pvt_info;
first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
if (rank == 0xff)
dimm = -1;
else if (rank < 4)
dimm = 0;
else if (rank < 8)
dimm = 1;
else
dimm = 2;
/*
* FIXME: On some memory configurations (mirror, lockstep), the
* Memory Controller can't point the error to a single DIMM. The
* EDAC core should be handling the channel mask, in order to point
* to the group of dimm's where the error may be happening.
*/
if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
snprintf(msg_full, sizeof(msg_full),
"%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
area_type,
mscod, errcode,
socket, ha,
channel_mask,
rank, msg);
edac_dbg(0, "%s\n", msg_full);
/* FIXME: need support for channel mask */
if (channel == CHANNEL_UNSPECIFIED)
channel = -1;
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, dimm, -1,
optype, msg_full);
return;
err_parsing:
edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
-1, -1, -1,
msg, "");
}
/*
* Check that logging is enabled and that this is the right type
* of error for us to handle.
*/
static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
char *type;
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
/*
* Just let mcelog handle it if the error is
* outside the memory controller. A memory error
* is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
* bit 12 has an special meaning.
*/
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
/* Check ADDRV bit in STATUS */
if (!GET_BITFIELD(mce->status, 58, 58))
return NOTIFY_DONE;
/* Check MISCV bit in STATUS */
if (!GET_BITFIELD(mce->status, 59, 59))
return NOTIFY_DONE;
/* Check address type in MISC (physical address only) */
if (GET_BITFIELD(mce->misc, 6, 8) != 2)
return NOTIFY_DONE;
mci = get_mci_for_node_id(mce->socketid, IMC0);
if (!mci)
return NOTIFY_DONE;
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
type = "Event";
sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
"Bank %d: %016Lx\n", mce->extcpu, type,
mce->mcgstatus, mce->bank, mce->status);
sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
"%u APIC %x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
sbridge_mce_output_error(mci, mce);
/* Advice mcelog that the error were handled */
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_OK;
}
static struct notifier_block sbridge_mce_dec = {
.notifier_call = sbridge_mce_check_error,
.priority = MCE_PRIO_EDAC,
};
/****************************************************************************
EDAC register/unregister logic
****************************************************************************/
static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci = sbridge_dev->mci;
if (unlikely(!mci || !mci->pvt_info)) {
edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
}
edac_dbg(0, "MC: mci = %p, dev = %p\n",
mci, &sbridge_dev->pdev[0]->dev);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->pdev);
edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
sbridge_dev->mci = NULL;
}
static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
struct pci_dev *pdev = sbridge_dev->pdev[0];
int rc;
/* allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = type == KNIGHTS_LANDING ?
KNL_MAX_CHANNELS : NUM_CHANNELS;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
edac_dbg(0, "MC: mci = %p, dev = %p\n",
mci, &pdev->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
/* Associate sbridge_dev and mci for future usage */
pvt->sbridge_dev = sbridge_dev;
sbridge_dev->mci = mci;
mci->mtype_cap = type == KNIGHTS_LANDING ?
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
pvt->info.type = type;
switch (type) {
case IVY_BRIDGE:
pvt->info.rankcfgr = IB_RANK_CFG_A;
pvt->info.get_tolm = ibridge_get_tolm;
pvt->info.get_tohm = ibridge_get_tohm;
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = ibridge_get_width;
/* Store pci devices at mci for faster access */
rc = ibridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
get_source_id(mci);
mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case SANDY_BRIDGE:
pvt->info.rankcfgr = SB_RANK_CFG_A;
pvt->info.get_tolm = sbridge_get_tolm;
pvt->info.get_tohm = sbridge_get_tohm;
pvt->info.dram_rule = sbridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
pvt->info.get_ha = sbridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
pvt->info.interleave_list = sbridge_interleave_list;
pvt->info.interleave_pkg = sbridge_interleave_pkg;
pvt->info.get_width = sbridge_get_width;
/* Store pci devices at mci for faster access */
rc = sbridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
get_source_id(mci);
mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case HASWELL:
/* rankcfgr isn't used */
pvt->info.get_tolm = haswell_get_tolm;
pvt->info.get_tohm = haswell_get_tohm;
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = ibridge_get_width;
/* Store pci devices at mci for faster access */
rc = haswell_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
get_source_id(mci);
mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case BROADWELL:
/* rankcfgr isn't used */
pvt->info.get_tolm = haswell_get_tolm;
pvt->info.get_tohm = haswell_get_tohm;
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = broadwell_get_width;
/* Store pci devices at mci for faster access */
rc = broadwell_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
get_source_id(mci);
mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case KNIGHTS_LANDING:
/* pvt->info.rankcfgr == ??? */
pvt->info.get_tolm = knl_get_tolm;
pvt->info.get_tohm = knl_get_tohm;
pvt->info.dram_rule = knl_dram_rule;
pvt->info.get_memory_type = knl_get_memory_type;
pvt->info.get_node_id = knl_get_node_id;
pvt->info.get_ha = knl_get_ha;
pvt->info.rir_limit = NULL;
pvt->info.sad_limit = knl_sad_limit;
pvt->info.interleave_mode = knl_interleave_mode;
pvt->info.dram_attr = dram_attr_knl;
pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
pvt->info.interleave_list = knl_interleave_list;
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = knl_get_width;
rc = knl_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
get_source_id(mci);
mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
}
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
}
/* Get dimm basic config and the memory layout */
rc = get_dimm_config(mci);
if (rc < 0) {
edac_dbg(0, "MC: failed to get_dimm_config()\n");
goto fail;
}
get_memory_layout(mci);
/* record ptr to the generic device */
mci->pdev = &pdev->dev;
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
rc = -EINVAL;
goto fail;
}
return 0;
fail:
kfree(mci->ctl_name);
fail0:
edac_mc_free(mci);
sbridge_dev->mci = NULL;
return rc;
}
static const struct x86_cpu_id sbridge_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
/*
* sbridge_probe Get all devices and register memory controllers
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
static int sbridge_probe(const struct x86_cpu_id *id)
{
int rc;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
/* get the pci devices we want to reserve for our use */
rc = sbridge_get_all_devices(&num_mc, ptable);
if (unlikely(rc < 0)) {
edac_dbg(0, "couldn't get all devices\n");
goto fail0;
}
mc = 0;
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
edac_dbg(0, "Registering MC#%d (%d of %d)\n",
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}
sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
return 0;
fail1:
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
sbridge_unregister_mci(sbridge_dev);
sbridge_put_all_devices();
fail0:
return rc;
}
/*
* sbridge_remove cleanup
*
*/
static void sbridge_remove(void)
{
struct sbridge_dev *sbridge_dev;
edac_dbg(0, "\n");
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
sbridge_unregister_mci(sbridge_dev);
/* Release PCI resources */
sbridge_put_all_devices();
}
/*
* sbridge_init Module entry function
* Try to initialize this module for its devices
*/
static int __init sbridge_init(void)
{
const struct x86_cpu_id *id;
const char *owner;
int rc;
edac_dbg(2, "\n");
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
rc = sbridge_probe(id);
if (rc >= 0) {
mce_register_decode_chain(&sbridge_mce_dec);
return 0;
}
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
rc);
return rc;
}
/*
* sbridge_exit() Module exit function
* Unregister the driver
*/
static void __exit sbridge_exit(void)
{
edac_dbg(2, "\n");
sbridge_remove();
mce_unregister_decode_chain(&sbridge_mce_dec);
}
module_init(sbridge_init);
module_exit(sbridge_exit);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
SBRIDGE_REVISION);
| linux-master | drivers/edac/sb_edac.c |
/*
* edac_mc kernel module
* (C) 2005, 2006 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Thayne Harbaugh
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
* Modified by Dave Peterson and Doug Thompson
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/ctype.h>
#include <linux/edac.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include "edac_mc.h"
#include "edac_module.h"
#include <ras/ras_event.h>
#ifdef CONFIG_EDAC_ATOMIC_SCRUB
#include <asm/edac.h>
#else
#define edac_atomic_scrub(va, size) do { } while (0)
#endif
int edac_op_state = EDAC_OPSTATE_INVAL;
EXPORT_SYMBOL_GPL(edac_op_state);
/* lock to memory controller's control array */
static DEFINE_MUTEX(mem_ctls_mutex);
static LIST_HEAD(mc_devices);
/*
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
* apei/ghes and i7core_edac to be used at the same time.
*/
static const char *edac_mc_owner;
static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e)
{
return container_of(e, struct mem_ctl_info, error_desc);
}
unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned int len)
{
struct mem_ctl_info *mci = dimm->mci;
int i, n, count = 0;
char *p = buf;
for (i = 0; i < mci->n_layers; i++) {
n = scnprintf(p, len, "%s %d ",
edac_layer_name[mci->layers[i].type],
dimm->location[i]);
p += n;
len -= n;
count += n;
}
return count;
}
#ifdef CONFIG_EDAC_DEBUG
static void edac_mc_dump_channel(struct rank_info *chan)
{
edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
edac_dbg(4, " channel = %p\n", chan);
edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
}
static void edac_mc_dump_dimm(struct dimm_info *dimm)
{
char location[80];
if (!dimm->nr_pages)
return;
edac_dimm_info_location(dimm, location, sizeof(location));
edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
dimm->mci->csbased ? "rank" : "dimm",
dimm->idx, location, dimm->csrow, dimm->cschannel);
edac_dbg(4, " dimm = %p\n", dimm);
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
{
edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
edac_dbg(4, " csrow = %p\n", csrow);
edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
}
static void edac_mc_dump_mci(struct mem_ctl_info *mci)
{
edac_dbg(3, "\tmci = %p\n", mci);
edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
mci->tot_dimms, mci->dimms);
edac_dbg(3, "\tdev = %p\n", mci->pdev);
edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
mci->mod_name, mci->ctl_name);
edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
const char * const edac_mem_types[] = {
[MEM_EMPTY] = "Empty",
[MEM_RESERVED] = "Reserved",
[MEM_UNKNOWN] = "Unknown",
[MEM_FPM] = "FPM",
[MEM_EDO] = "EDO",
[MEM_BEDO] = "BEDO",
[MEM_SDR] = "Unbuffered-SDR",
[MEM_RDR] = "Registered-SDR",
[MEM_DDR] = "Unbuffered-DDR",
[MEM_RDDR] = "Registered-DDR",
[MEM_RMBS] = "RMBS",
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
[MEM_XDR] = "XDR",
[MEM_DDR3] = "Unbuffered-DDR3",
[MEM_RDDR3] = "Registered-DDR3",
[MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
[MEM_LPDDR3] = "Low-Power-DDR3-RAM",
[MEM_DDR4] = "Unbuffered-DDR4",
[MEM_RDDR4] = "Registered-DDR4",
[MEM_LPDDR4] = "Low-Power-DDR4-RAM",
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_DDR5] = "Unbuffered-DDR5",
[MEM_RDDR5] = "Registered-DDR5",
[MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_WIO2] = "Wide-IO-2",
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
};
EXPORT_SYMBOL_GPL(edac_mem_types);
static void _edac_mc_free(struct mem_ctl_info *mci)
{
put_device(&mci->dev);
}
static void mci_release(struct device *dev)
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
struct csrow_info *csr;
int i, chn, row;
if (mci->dimms) {
for (i = 0; i < mci->tot_dimms; i++)
kfree(mci->dimms[i]);
kfree(mci->dimms);
}
if (mci->csrows) {
for (row = 0; row < mci->nr_csrows; row++) {
csr = mci->csrows[row];
if (!csr)
continue;
if (csr->channels) {
for (chn = 0; chn < mci->num_cschannel; chn++)
kfree(csr->channels[chn]);
kfree(csr->channels);
}
kfree(csr);
}
kfree(mci->csrows);
}
kfree(mci->pvt_info);
kfree(mci->layers);
kfree(mci);
}
static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
{
unsigned int tot_channels = mci->num_cschannel;
unsigned int tot_csrows = mci->nr_csrows;
unsigned int row, chn;
/*
* Alocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
return -ENOMEM;
for (row = 0; row < tot_csrows; row++) {
struct csrow_info *csr;
csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
if (!csr)
return -ENOMEM;
mci->csrows[row] = csr;
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
return -ENOMEM;
for (chn = 0; chn < tot_channels; chn++) {
struct rank_info *chan;
chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
if (!chan)
return -ENOMEM;
csr->channels[chn] = chan;
chan->chan_idx = chn;
chan->csrow = csr;
}
}
return 0;
}
static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
{
unsigned int pos[EDAC_MAX_LAYERS];
unsigned int row, chn, idx;
int layer;
void *p;
/*
* Allocate and fill the dimm structs
*/
mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
return -ENOMEM;
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
for (idx = 0; idx < mci->tot_dimms; idx++) {
struct dimm_info *dimm;
struct rank_info *chan;
int n, len;
chan = mci->csrows[row]->channels[chn];
dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
if (!dimm)
return -ENOMEM;
mci->dimms[idx] = dimm;
dimm->mci = mci;
dimm->idx = idx;
/*
* Copy DIMM location and initialize it.
*/
len = sizeof(dimm->label);
p = dimm->label;
n = scnprintf(p, len, "mc#%u", mci->mc_idx);
p += n;
len -= n;
for (layer = 0; layer < mci->n_layers; layer++) {
n = scnprintf(p, len, "%s#%u",
edac_layer_name[mci->layers[layer].type],
pos[layer]);
p += n;
len -= n;
dimm->location[layer] = pos[layer];
}
/* Link it to the csrows old API data */
chan->dimm = dimm;
dimm->csrow = row;
dimm->cschannel = chn;
/* Increment csrow location */
if (mci->layers[0].is_virt_csrow) {
chn++;
if (chn == mci->num_cschannel) {
chn = 0;
row++;
}
} else {
row++;
if (row == mci->nr_csrows) {
row = 0;
chn++;
}
}
/* Increment dimm location */
for (layer = mci->n_layers - 1; layer >= 0; layer--) {
pos[layer]++;
if (pos[layer] < mci->layers[layer].size)
break;
pos[layer] = 0;
}
}
return 0;
}
struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
unsigned int n_layers,
struct edac_mc_layer *layers,
unsigned int sz_pvt)
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
unsigned int idx, tot_dimms = 1;
unsigned int tot_csrows = 1, tot_channels = 1;
bool per_rank = false;
if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
return NULL;
/*
* Calculate the total amount of dimms and csrows/cschannels while
* in the old API emulation mode
*/
for (idx = 0; idx < n_layers; idx++) {
tot_dimms *= layers[idx].size;
if (layers[idx].is_virt_csrow)
tot_csrows *= layers[idx].size;
else
tot_channels *= layers[idx].size;
if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
per_rank = true;
}
mci = kzalloc(sizeof(struct mem_ctl_info), GFP_KERNEL);
if (!mci)
return NULL;
mci->layers = kcalloc(n_layers, sizeof(struct edac_mc_layer), GFP_KERNEL);
if (!mci->layers)
goto error;
mci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL);
if (!mci->pvt_info)
goto error;
mci->dev.release = mci_release;
device_initialize(&mci->dev);
/* setup index and various internal pointers */
mci->mc_idx = mc_num;
mci->tot_dimms = tot_dimms;
mci->n_layers = n_layers;
memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
mci->nr_csrows = tot_csrows;
mci->num_cschannel = tot_channels;
mci->csbased = per_rank;
if (edac_mc_alloc_csrows(mci))
goto error;
if (edac_mc_alloc_dimms(mci))
goto error;
mci->op_state = OP_ALLOC;
return mci;
error:
_edac_mc_free(mci);
return NULL;
}
EXPORT_SYMBOL_GPL(edac_mc_alloc);
void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
_edac_mc_free(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
bool edac_has_mcs(void)
{
bool ret;
mutex_lock(&mem_ctls_mutex);
ret = list_empty(&mc_devices);
mutex_unlock(&mem_ctls_mutex);
return !ret;
}
EXPORT_SYMBOL_GPL(edac_has_mcs);
/* Caller must hold mem_ctls_mutex */
static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
struct list_head *item;
edac_dbg(3, "\n");
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
if (mci->pdev == dev)
return mci;
}
return NULL;
}
/**
* find_mci_by_dev
*
* scan list of controllers looking for the one that manages
* the 'dev' device
* @dev: pointer to a struct device related with the MCI
*/
struct mem_ctl_info *find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *ret;
mutex_lock(&mem_ctls_mutex);
ret = __find_mci_by_dev(dev);
mutex_unlock(&mem_ctls_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(find_mci_by_dev);
/*
* edac_mc_workq_function
* performs the operation scheduled by a workq request
*/
static void edac_mc_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = to_delayed_work(work_req);
struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
mutex_lock(&mem_ctls_mutex);
if (mci->op_state != OP_RUNNING_POLL) {
mutex_unlock(&mem_ctls_mutex);
return;
}
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check(mci);
mutex_unlock(&mem_ctls_mutex);
/* Queue ourselves again. */
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
}
/*
* edac_mc_reset_delay_period(unsigned long value)
*
* user space has updated our poll period value, need to
* reset our workq delays
*/
void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
if (mci->op_state == OP_RUNNING_POLL)
edac_mod_work(&mci->work, value);
}
mutex_unlock(&mem_ctls_mutex);
}
/* Return 0 on success, 1 on failure.
* Before calling this function, caller must
* assign a unique value to mci->mc_idx.
*
* locking model:
*
* called with the mem_ctls_mutex lock held
*/
static int add_mc_to_global_list(struct mem_ctl_info *mci)
{
struct list_head *item, *insert_before;
struct mem_ctl_info *p;
insert_before = &mc_devices;
p = __find_mci_by_dev(mci->pdev);
if (unlikely(p != NULL))
goto fail0;
list_for_each(item, &mc_devices) {
p = list_entry(item, struct mem_ctl_info, link);
if (p->mc_idx >= mci->mc_idx) {
if (unlikely(p->mc_idx == mci->mc_idx))
goto fail1;
insert_before = item;
break;
}
}
list_add_tail_rcu(&mci->link, insert_before);
return 0;
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
"%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
return 1;
fail1:
edac_printk(KERN_WARNING, EDAC_MC,
"bug in low-level driver: attempt to assign\n"
" duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
return 1;
}
static int del_mc_from_global_list(struct mem_ctl_info *mci)
{
list_del_rcu(&mci->link);
/* these are for safe removal of devices from global list while
* NMI handlers may be traversing list
*/
synchronize_rcu();
INIT_LIST_HEAD(&mci->link);
return list_empty(&mc_devices);
}
struct mem_ctl_info *edac_mc_find(int idx)
{
struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
if (mci->mc_idx == idx)
goto unlock;
}
mci = NULL;
unlock:
mutex_unlock(&mem_ctls_mutex);
return mci;
}
EXPORT_SYMBOL(edac_mc_find);
const char *edac_get_owner(void)
{
return edac_mc_owner;
}
EXPORT_SYMBOL_GPL(edac_get_owner);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
int ret = -EINVAL;
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_mc_dump_mci(mci);
if (edac_debug_level >= 4) {
struct dimm_info *dimm;
int i;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = mci->csrows[i];
u32 nr_pages = 0;
int j;
for (j = 0; j < csrow->nr_channels; j++)
nr_pages += csrow->channels[j]->dimm->nr_pages;
if (!nr_pages)
continue;
edac_mc_dump_csrow(csrow);
for (j = 0; j < csrow->nr_channels; j++)
if (csrow->channels[j]->dimm->nr_pages)
edac_mc_dump_channel(csrow->channels[j]);
}
mci_for_each_dimm(mci, dimm)
edac_mc_dump_dimm(dimm);
}
#endif
mutex_lock(&mem_ctls_mutex);
if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
ret = -EPERM;
goto fail0;
}
if (add_mc_to_global_list(mci))
goto fail0;
/* set load time so that error rate can be tracked */
mci->start_time = jiffies;
mci->bus = edac_get_sysfs_subsys();
if (edac_create_sysfs_mci_device(mci, groups)) {
edac_mc_printk(mci, KERN_WARNING,
"failed to create sysfs device\n");
goto fail1;
}
if (mci->edac_check) {
mci->op_state = OP_RUNNING_POLL;
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
/* Report action taken */
edac_mc_printk(mci, KERN_INFO,
"Giving out device to module %s controller %s: DEV %s (%s)\n",
mci->mod_name, mci->ctl_name, mci->dev_name,
edac_op_state_to_string(mci->op_state));
edac_mc_owner = mci->mod_name;
mutex_unlock(&mem_ctls_mutex);
return 0;
fail1:
del_mc_from_global_list(mci);
fail0:
mutex_unlock(&mem_ctls_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
mutex_lock(&mem_ctls_mutex);
/* find the requested mci struct in the global list */
mci = __find_mci_by_dev(dev);
if (mci == NULL) {
mutex_unlock(&mem_ctls_mutex);
return NULL;
}
/* mark MCI offline: */
mci->op_state = OP_OFFLINE;
if (del_mc_from_global_list(mci))
edac_mc_owner = NULL;
mutex_unlock(&mem_ctls_mutex);
if (mci->edac_check)
edac_stop_work(&mci->work);
/* remove from sysfs */
edac_remove_sysfs_mci_device(mci);
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
mci->mod_name, mci->ctl_name, edac_dev_name(mci));
return mci;
}
EXPORT_SYMBOL_GPL(edac_mc_del_mc);
static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
u32 size)
{
struct page *pg;
void *virt_addr;
unsigned long flags = 0;
edac_dbg(3, "\n");
/* ECC error page was not in our memory. Ignore it. */
if (!pfn_valid(page))
return;
/* Find the actual page structure then map it and fix */
pg = pfn_to_page(page);
if (PageHighMem(pg))
local_irq_save(flags);
virt_addr = kmap_atomic(pg);
/* Perform architecture specific atomic scrub operation */
edac_atomic_scrub(virt_addr + offset, size);
/* Unmap and complete */
kunmap_atomic(virt_addr);
if (PageHighMem(pg))
local_irq_restore(flags);
}
/* FIXME - should return -1 */
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info **csrows = mci->csrows;
int row, i, j, n;
edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = csrows[i];
n = 0;
for (j = 0; j < csrow->nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j]->dimm;
n += dimm->nr_pages;
}
if (n == 0)
continue;
edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
mci->mc_idx,
csrow->first_page, page, csrow->last_page,
csrow->page_mask);
if ((page >= csrow->first_page) &&
(page <= csrow->last_page) &&
((page & csrow->page_mask) ==
(csrow->first_page & csrow->page_mask))) {
row = i;
break;
}
}
if (row == -1)
edac_mc_printk(mci, KERN_ERR,
"could not look up page error address %lx\n",
(unsigned long)page);
return row;
}
EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
const char *edac_layer_name[] = {
[EDAC_MC_LAYER_BRANCH] = "branch",
[EDAC_MC_LAYER_CHANNEL] = "channel",
[EDAC_MC_LAYER_SLOT] = "slot",
[EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
[EDAC_MC_LAYER_ALL_MEM] = "memory",
};
EXPORT_SYMBOL_GPL(edac_layer_name);
static void edac_inc_ce_error(struct edac_raw_error_desc *e)
{
int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
struct mem_ctl_info *mci = error_desc_to_mci(e);
struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
mci->ce_mc += e->error_count;
if (dimm)
dimm->ce_count += e->error_count;
else
mci->ce_noinfo_count += e->error_count;
}
static void edac_inc_ue_error(struct edac_raw_error_desc *e)
{
int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
struct mem_ctl_info *mci = error_desc_to_mci(e);
struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
mci->ue_mc += e->error_count;
if (dimm)
dimm->ue_count += e->error_count;
else
mci->ue_noinfo_count += e->error_count;
}
static void edac_ce_error(struct edac_raw_error_desc *e)
{
struct mem_ctl_info *mci = error_desc_to_mci(e);
unsigned long remapped_page;
if (edac_mc_get_log_ce()) {
edac_mc_printk(mci, KERN_WARNING,
"%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n",
e->error_count, e->msg,
*e->msg ? " " : "",
e->label, e->location, e->page_frame_number, e->offset_in_page,
e->grain, e->syndrome,
*e->other_detail ? " - " : "",
e->other_detail);
}
edac_inc_ce_error(e);
if (mci->scrub_mode == SCRUB_SW_SRC) {
/*
* Some memory controllers (called MCs below) can remap
* memory so that it is still available at a different
* address when PCI devices map into memory.
* MC's that can't do this, lose the memory where PCI
* devices are mapped. This mapping is MC-dependent
* and so we call back into the MC driver for it to
* map the MC page to a physical (CPU) page which can
* then be mapped to a virtual page - which can then
* be scrubbed.
*/
remapped_page = mci->ctl_page_to_phys ?
mci->ctl_page_to_phys(mci, e->page_frame_number) :
e->page_frame_number;
edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain);
}
}
static void edac_ue_error(struct edac_raw_error_desc *e)
{
struct mem_ctl_info *mci = error_desc_to_mci(e);
if (edac_mc_get_log_ue()) {
edac_mc_printk(mci, KERN_WARNING,
"%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
e->error_count, e->msg,
*e->msg ? " " : "",
e->label, e->location, e->page_frame_number, e->offset_in_page,
e->grain,
*e->other_detail ? " - " : "",
e->other_detail);
}
edac_inc_ue_error(e);
if (edac_mc_get_panic_on_ue()) {
panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
e->msg,
*e->msg ? " " : "",
e->label, e->location, e->page_frame_number, e->offset_in_page,
e->grain,
*e->other_detail ? " - " : "",
e->other_detail);
}
}
static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
{
struct mem_ctl_info *mci = error_desc_to_mci(e);
enum hw_event_mc_err_type type = e->type;
u16 count = e->error_count;
if (row < 0)
return;
edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
if (type == HW_EVENT_ERR_CORRECTED) {
mci->csrows[row]->ce_count += count;
if (chan >= 0)
mci->csrows[row]->channels[chan]->ce_count += count;
} else {
mci->csrows[row]->ue_count += count;
}
}
void edac_raw_mc_handle_error(struct edac_raw_error_desc *e)
{
struct mem_ctl_info *mci = error_desc_to_mci(e);
u8 grain_bits;
/* Sanity-check driver-supplied grain value. */
if (WARN_ON_ONCE(!e->grain))
e->grain = 1;
grain_bits = fls_long(e->grain - 1);
/* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
trace_mc_event(e->type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,
e->low_layer,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, e->other_detail);
if (e->type == HW_EVENT_ERR_CORRECTED)
edac_ce_error(e);
else
edac_ue_error(e);
}
EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
const u16 error_count,
const unsigned long page_frame_number,
const unsigned long offset_in_page,
const unsigned long syndrome,
const int top_layer,
const int mid_layer,
const int low_layer,
const char *msg,
const char *other_detail)
{
struct dimm_info *dimm;
char *p, *end;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
struct edac_raw_error_desc *e = &mci->error_desc;
bool any_memory = true;
const char *prefix;
edac_dbg(3, "MC%d\n", mci->mc_idx);
/* Fills the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = error_count;
e->type = type;
e->top_layer = top_layer;
e->mid_layer = mid_layer;
e->low_layer = low_layer;
e->page_frame_number = page_frame_number;
e->offset_in_page = offset_in_page;
e->syndrome = syndrome;
/* need valid strings here for both: */
e->msg = msg ?: "";
e->other_detail = other_detail ?: "";
/*
* Check if the event report is consistent and if the memory location is
* known. If it is, the DIMM(s) label info will be filled and the DIMM's
* error counters will be incremented.
*/
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] >= (int)mci->layers[i].size) {
edac_mc_printk(mci, KERN_ERR,
"INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
edac_layer_name[mci->layers[i].type],
pos[i], mci->layers[i].size);
/*
* Instead of just returning it, let's use what's
* known about the error. The increment routines and
* the DIMM filter logic will do the right thing by
* pointing the likely damaged DIMMs.
*/
pos[i] = -1;
}
if (pos[i] >= 0)
any_memory = false;
}
/*
* Get the dimm label/grain that applies to the match criteria.
* As the error algorithm may not be able to point to just one memory
* stick, the logic here will get all possible labels that could
* pottentially be affected by the error.
* On FB-DIMM memory controllers, for uncorrected errors, it is common
* to have only the MC channel and the MC dimm (also called "branch")
* but the channel is not known, as the memory is arranged in pairs,
* where each memory belongs to a separate channel within the same
* branch.
*/
p = e->label;
*p = '\0';
end = p + sizeof(e->label);
prefix = "";
mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
continue;
if (mid_layer >= 0 && mid_layer != dimm->location[1])
continue;
if (low_layer >= 0 && low_layer != dimm->location[2])
continue;
/* get the max grain, over the error match range */
if (dimm->grain > e->grain)
e->grain = dimm->grain;
/*
* If the error is memory-controller wide, there's no need to
* seek for the affected DIMMs because the whole channel/memory
* controller/... may be affected. Also, don't show errors for
* empty DIMM slots.
*/
if (!dimm->nr_pages)
continue;
n_labels++;
if (n_labels > EDAC_MAX_LABELS) {
p = e->label;
*p = '\0';
} else {
p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
prefix = OTHER_LABEL;
}
/*
* get csrow/channel of the DIMM, in order to allow
* incrementing the compat API counters
*/
edac_dbg(4, "%s csrows map: (%d,%d)\n",
mci->csbased ? "rank" : "dimm",
dimm->csrow, dimm->cschannel);
if (row == -1)
row = dimm->csrow;
else if (row >= 0 && row != dimm->csrow)
row = -2;
if (chan == -1)
chan = dimm->cschannel;
else if (chan >= 0 && chan != dimm->cschannel)
chan = -2;
}
if (any_memory)
strscpy(e->label, "any memory", sizeof(e->label));
else if (!*e->label)
strscpy(e->label, "unknown memory", sizeof(e->label));
edac_inc_csrow(e, row, chan);
/* Fill the RAM location data */
p = e->location;
end = p + sizeof(e->location);
prefix = "";
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
continue;
p += scnprintf(p, end - p, "%s%s:%d", prefix,
edac_layer_name[mci->layers[i].type], pos[i]);
prefix = " ";
}
edac_raw_mc_handle_error(e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
| linux-master | drivers/edac/edac_mc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys DDR ECC Driver
* This driver is based on ppc4xx_edac.c drivers
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
*/
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include "edac_module.h"
/* Number of cs_rows needed per memory controller */
#define SYNPS_EDAC_NR_CSROWS 1
/* Number of channels per memory controller */
#define SYNPS_EDAC_NR_CHANS 1
/* Granularity of reported error in bytes */
#define SYNPS_EDAC_ERR_GRAIN 1
#define SYNPS_EDAC_MSG_SIZE 256
#define SYNPS_EDAC_MOD_STRING "synps_edac"
#define SYNPS_EDAC_MOD_VER "1"
/* Synopsys DDR memory controller registers that are relevant to ECC */
#define CTRL_OFST 0x0
#define T_ZQ_OFST 0xA4
/* ECC control register */
#define ECC_CTRL_OFST 0xC4
/* ECC log register */
#define CE_LOG_OFST 0xC8
/* ECC address register */
#define CE_ADDR_OFST 0xCC
/* ECC data[31:0] register */
#define CE_DATA_31_0_OFST 0xD0
/* Uncorrectable error info registers */
#define UE_LOG_OFST 0xDC
#define UE_ADDR_OFST 0xE0
#define UE_DATA_31_0_OFST 0xE4
#define STAT_OFST 0xF0
#define SCRUB_OFST 0xF4
/* Control register bit field definitions */
#define CTRL_BW_MASK 0xC
#define CTRL_BW_SHIFT 2
#define DDRCTL_WDTH_16 1
#define DDRCTL_WDTH_32 0
/* ZQ register bit field definitions */
#define T_ZQ_DDRMODE_MASK 0x2
/* ECC control register bit field definitions */
#define ECC_CTRL_CLR_CE_ERR 0x2
#define ECC_CTRL_CLR_UE_ERR 0x1
/* ECC correctable/uncorrectable error log register definitions */
#define LOG_VALID 0x1
#define CE_LOG_BITPOS_MASK 0xFE
#define CE_LOG_BITPOS_SHIFT 1
/* ECC correctable/uncorrectable error address register definitions */
#define ADDR_COL_MASK 0xFFF
#define ADDR_ROW_MASK 0xFFFF000
#define ADDR_ROW_SHIFT 12
#define ADDR_BANK_MASK 0x70000000
#define ADDR_BANK_SHIFT 28
/* ECC statistic register definitions */
#define STAT_UECNT_MASK 0xFF
#define STAT_CECNT_MASK 0xFF00
#define STAT_CECNT_SHIFT 8
/* ECC scrub register definitions */
#define SCRUB_MODE_MASK 0x7
#define SCRUB_MODE_SECDED 0x4
/* DDR ECC Quirks */
#define DDR_ECC_INTR_SUPPORT BIT(0)
#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
/* ECC Configuration Registers */
#define ECC_CFG0_OFST 0x70
#define ECC_CFG1_OFST 0x74
/* ECC Status Register */
#define ECC_STAT_OFST 0x78
/* ECC Clear Register */
#define ECC_CLR_OFST 0x7C
/* ECC Error count Register */
#define ECC_ERRCNT_OFST 0x80
/* ECC Corrected Error Address Register */
#define ECC_CEADDR0_OFST 0x84
#define ECC_CEADDR1_OFST 0x88
/* ECC Syndrome Registers */
#define ECC_CSYND0_OFST 0x8C
#define ECC_CSYND1_OFST 0x90
#define ECC_CSYND2_OFST 0x94
/* ECC Bit Mask0 Address Register */
#define ECC_BITMASK0_OFST 0x98
#define ECC_BITMASK1_OFST 0x9C
#define ECC_BITMASK2_OFST 0xA0
/* ECC UnCorrected Error Address Register */
#define ECC_UEADDR0_OFST 0xA4
#define ECC_UEADDR1_OFST 0xA8
/* ECC Syndrome Registers */
#define ECC_UESYND0_OFST 0xAC
#define ECC_UESYND1_OFST 0xB0
#define ECC_UESYND2_OFST 0xB4
/* ECC Poison Address Reg */
#define ECC_POISON0_OFST 0xB8
#define ECC_POISON1_OFST 0xBC
#define ECC_ADDRMAP0_OFFSET 0x200
/* Control register bitfield definitions */
#define ECC_CTRL_BUSWIDTH_MASK 0x3000
#define ECC_CTRL_BUSWIDTH_SHIFT 12
#define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
#define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
/* DDR Control Register width definitions */
#define DDRCTL_EWDTH_16 2
#define DDRCTL_EWDTH_32 1
#define DDRCTL_EWDTH_64 0
/* ECC status register definitions */
#define ECC_STAT_UECNT_MASK 0xF0000
#define ECC_STAT_UECNT_SHIFT 16
#define ECC_STAT_CECNT_MASK 0xF00
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
/* ECC error count register definitions */
#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
#define ECC_ERRCNT_UECNT_SHIFT 16
#define ECC_ERRCNT_CECNT_MASK 0xFFFF
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
#define DDR_QOSCE_MASK 0x2
#define ECC_CE_UE_INTR_MASK 0x6
#define DDR_QOS_IRQ_EN_OFST 0x20208
#define DDR_QOS_IRQ_DB_OFST 0x2020C
/* DDR QOS Interrupt register definitions */
#define DDR_UE_MASK BIT(9)
#define DDR_CE_MASK BIT(8)
/* ECC Corrected Error Register Mask and Shifts*/
#define ECC_CEADDR0_RW_MASK 0x3FFFF
#define ECC_CEADDR0_RNK_MASK BIT(24)
#define ECC_CEADDR1_BNKGRP_MASK 0x3000000
#define ECC_CEADDR1_BNKNR_MASK 0x70000
#define ECC_CEADDR1_BLKNR_MASK 0xFFF
#define ECC_CEADDR1_BNKGRP_SHIFT 24
#define ECC_CEADDR1_BNKNR_SHIFT 16
/* ECC Poison register shifts */
#define ECC_POISON0_RANK_SHIFT 24
#define ECC_POISON0_RANK_MASK BIT(24)
#define ECC_POISON0_COLUMN_SHIFT 0
#define ECC_POISON0_COLUMN_MASK 0xFFF
#define ECC_POISON1_BG_SHIFT 28
#define ECC_POISON1_BG_MASK 0x30000000
#define ECC_POISON1_BANKNR_SHIFT 24
#define ECC_POISON1_BANKNR_MASK 0x7000000
#define ECC_POISON1_ROW_SHIFT 0
#define ECC_POISON1_ROW_MASK 0x3FFFF
/* DDR Memory type defines */
#define MEM_TYPE_DDR3 0x1
#define MEM_TYPE_LPDDR3 0x8
#define MEM_TYPE_DDR2 0x4
#define MEM_TYPE_DDR4 0x10
#define MEM_TYPE_LPDDR4 0x20
/* DDRC Software control register */
#define DDRC_SWCTL 0x320
/* DDRC ECC CE & UE poison mask */
#define ECC_CEPOISON_MASK 0x3
#define ECC_UEPOISON_MASK 0x1
/* DDRC Device config masks */
#define DDRC_MSTR_CFG_MASK 0xC0000000
#define DDRC_MSTR_CFG_SHIFT 30
#define DDRC_MSTR_CFG_X4_MASK 0x0
#define DDRC_MSTR_CFG_X8_MASK 0x1
#define DDRC_MSTR_CFG_X16_MASK 0x2
#define DDRC_MSTR_CFG_X32_MASK 0x3
#define DDR_MAX_ROW_SHIFT 18
#define DDR_MAX_COL_SHIFT 14
#define DDR_MAX_BANK_SHIFT 3
#define DDR_MAX_BANKGRP_SHIFT 2
#define ROW_MAX_VAL_MASK 0xF
#define COL_MAX_VAL_MASK 0xF
#define BANK_MAX_VAL_MASK 0x1F
#define BANKGRP_MAX_VAL_MASK 0x1F
#define RANK_MAX_VAL_MASK 0x1F
#define ROW_B0_BASE 6
#define ROW_B1_BASE 7
#define ROW_B2_BASE 8
#define ROW_B3_BASE 9
#define ROW_B4_BASE 10
#define ROW_B5_BASE 11
#define ROW_B6_BASE 12
#define ROW_B7_BASE 13
#define ROW_B8_BASE 14
#define ROW_B9_BASE 15
#define ROW_B10_BASE 16
#define ROW_B11_BASE 17
#define ROW_B12_BASE 18
#define ROW_B13_BASE 19
#define ROW_B14_BASE 20
#define ROW_B15_BASE 21
#define ROW_B16_BASE 22
#define ROW_B17_BASE 23
#define COL_B2_BASE 2
#define COL_B3_BASE 3
#define COL_B4_BASE 4
#define COL_B5_BASE 5
#define COL_B6_BASE 6
#define COL_B7_BASE 7
#define COL_B8_BASE 8
#define COL_B9_BASE 9
#define COL_B10_BASE 10
#define COL_B11_BASE 11
#define COL_B12_BASE 12
#define COL_B13_BASE 13
#define BANK_B0_BASE 2
#define BANK_B1_BASE 3
#define BANK_B2_BASE 4
#define BANKGRP_B0_BASE 2
#define BANKGRP_B1_BASE 3
#define RANK_B0_BASE 6
/**
* struct ecc_error_info - ECC error log information.
* @row: Row number.
* @col: Column number.
* @bank: Bank number.
* @bitpos: Bit position.
* @data: Data causing the error.
* @bankgrpnr: Bank group number.
* @blknr: Block number.
*/
struct ecc_error_info {
u32 row;
u32 col;
u32 bank;
u32 bitpos;
u32 data;
u32 bankgrpnr;
u32 blknr;
};
/**
* struct synps_ecc_status - ECC status information to report.
* @ce_cnt: Correctable error count.
* @ue_cnt: Uncorrectable error count.
* @ceinfo: Correctable error log information.
* @ueinfo: Uncorrectable error log information.
*/
struct synps_ecc_status {
u32 ce_cnt;
u32 ue_cnt;
struct ecc_error_info ceinfo;
struct ecc_error_info ueinfo;
};
/**
* struct synps_edac_priv - DDR memory controller private instance data.
* @baseaddr: Base address of the DDR controller.
* @message: Buffer for framing the event specific info.
* @stat: ECC status information.
* @p_data: Platform data.
* @ce_cnt: Correctable Error count.
* @ue_cnt: Uncorrectable Error count.
* @poison_addr: Data poison address.
* @row_shift: Bit shifts for row bit.
* @col_shift: Bit shifts for column bit.
* @bank_shift: Bit shifts for bank bit.
* @bankgrp_shift: Bit shifts for bank group bit.
* @rank_shift: Bit shifts for rank bit.
*/
struct synps_edac_priv {
void __iomem *baseaddr;
char message[SYNPS_EDAC_MSG_SIZE];
struct synps_ecc_status stat;
const struct synps_platform_data *p_data;
u32 ce_cnt;
u32 ue_cnt;
#ifdef CONFIG_EDAC_DEBUG
ulong poison_addr;
u32 row_shift[18];
u32 col_shift[14];
u32 bank_shift[3];
u32 bankgrp_shift[2];
u32 rank_shift[1];
#endif
};
/**
* struct synps_platform_data - synps platform data structure.
* @get_error_info: Get EDAC error info.
* @get_mtype: Get mtype.
* @get_dtype: Get dtype.
* @get_ecc_state: Get ECC state.
* @quirks: To differentiate IPs.
*/
struct synps_platform_data {
int (*get_error_info)(struct synps_edac_priv *priv);
enum mem_type (*get_mtype)(const void __iomem *base);
enum dev_type (*get_dtype)(const void __iomem *base);
bool (*get_ecc_state)(void __iomem *base);
int quirks;
};
/**
* zynq_get_error_info - Get the current ECC error info.
* @priv: DDR memory controller private instance data.
*
* Return: one if there is no error, otherwise zero.
*/
static int zynq_get_error_info(struct synps_edac_priv *priv)
{
struct synps_ecc_status *p;
u32 regval, clearval = 0;
void __iomem *base;
base = priv->baseaddr;
p = &priv->stat;
regval = readl(base + STAT_OFST);
if (!regval)
return 1;
p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
p->ue_cnt = regval & STAT_UECNT_MASK;
regval = readl(base + CE_LOG_OFST);
if (!(p->ce_cnt && (regval & LOG_VALID)))
goto ue_err;
p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
regval = readl(base + CE_ADDR_OFST);
p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
p->ceinfo.col = regval & ADDR_COL_MASK;
p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
p->ceinfo.data);
clearval = ECC_CTRL_CLR_CE_ERR;
ue_err:
regval = readl(base + UE_LOG_OFST);
if (!(p->ue_cnt && (regval & LOG_VALID)))
goto out;
regval = readl(base + UE_ADDR_OFST);
p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
p->ueinfo.col = regval & ADDR_COL_MASK;
p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
clearval |= ECC_CTRL_CLR_UE_ERR;
out:
writel(clearval, base + ECC_CTRL_OFST);
writel(0x0, base + ECC_CTRL_OFST);
return 0;
}
/**
* zynqmp_get_error_info - Get the current ECC error info.
* @priv: DDR memory controller private instance data.
*
* Return: one if there is no error otherwise returns zero.
*/
static int zynqmp_get_error_info(struct synps_edac_priv *priv)
{
struct synps_ecc_status *p;
u32 regval, clearval = 0;
void __iomem *base;
base = priv->baseaddr;
p = &priv->stat;
regval = readl(base + ECC_ERRCNT_OFST);
p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);
p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
regval = readl(base + ECC_CEADDR1_OFST);
p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
ECC_CEADDR1_BNKNR_SHIFT;
p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
ECC_CEADDR1_BNKGRP_SHIFT;
p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
readl(base + ECC_CSYND2_OFST));
ue_err:
if (!p->ue_cnt)
goto out;
regval = readl(base + ECC_UEADDR0_OFST);
p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
regval = readl(base + ECC_UEADDR1_OFST);
p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
ECC_CEADDR1_BNKGRP_SHIFT;
p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
ECC_CEADDR1_BNKNR_SHIFT;
p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
out:
clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
writel(clearval, base + ECC_CLR_OFST);
writel(0x0, base + ECC_CLR_OFST);
return 0;
}
/**
* handle_error - Handle Correctable and Uncorrectable errors.
* @mci: EDAC memory controller instance.
* @p: Synopsys ECC status structure.
*
* Handles ECC correctable and uncorrectable errors.
*/
static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
{
struct synps_edac_priv *priv = mci->pvt_info;
struct ecc_error_info *pinf;
if (p->ce_cnt) {
pinf = &p->ceinfo;
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
"DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
"CE", pinf->row, pinf->bank,
pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
"DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
"CE", pinf->row, pinf->bank, pinf->col,
pinf->bitpos, pinf->data);
}
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
p->ce_cnt, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
if (p->ue_cnt) {
pinf = &p->ueinfo;
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
"DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
"UE", pinf->row, pinf->bank,
pinf->bankgrpnr, pinf->blknr);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
"DDR ECC error type :%s Row %d Bank %d Col %d ",
"UE", pinf->row, pinf->bank, pinf->col);
}
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
p->ue_cnt, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
memset(p, 0, sizeof(*p));
}
static void enable_intr(struct synps_edac_priv *priv)
{
/* Enable UE/CE Interrupts */
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
writel(DDR_UE_MASK | DDR_CE_MASK,
priv->baseaddr + ECC_CLR_OFST);
else
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
}
static void disable_intr(struct synps_edac_priv *priv)
{
/* Disable UE/CE Interrupts */
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
writel(0x0, priv->baseaddr + ECC_CLR_OFST);
else
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
}
/**
* intr_handler - Interrupt Handler for ECC interrupts.
* @irq: IRQ number.
* @dev_id: Device ID.
*
* Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
*/
static irqreturn_t intr_handler(int irq, void *dev_id)
{
const struct synps_platform_data *p_data;
struct mem_ctl_info *mci = dev_id;
struct synps_edac_priv *priv;
int status, regval;
priv = mci->pvt_info;
p_data = priv->p_data;
/*
* v3.0 of the controller has the ce/ue bits cleared automatically,
* so this condition does not apply.
*/
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
if (!(regval & ECC_CE_UE_INTR_MASK))
return IRQ_NONE;
}
status = p_data->get_error_info(priv);
if (status)
return IRQ_NONE;
priv->ce_cnt += priv->stat.ce_cnt;
priv->ue_cnt += priv->stat.ue_cnt;
handle_error(mci, &priv->stat);
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
/* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
else
enable_intr(priv);
return IRQ_HANDLED;
}
/**
* check_errors - Check controller for ECC errors.
* @mci: EDAC memory controller instance.
*
* Check and post ECC errors. Called by the polling thread.
*/
static void check_errors(struct mem_ctl_info *mci)
{
const struct synps_platform_data *p_data;
struct synps_edac_priv *priv;
int status;
priv = mci->pvt_info;
p_data = priv->p_data;
status = p_data->get_error_info(priv);
if (status)
return;
priv->ce_cnt += priv->stat.ce_cnt;
priv->ue_cnt += priv->stat.ue_cnt;
handle_error(mci, &priv->stat);
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
}
/**
* zynq_get_dtype - Return the controller memory width.
* @base: DDR memory controller base address.
*
* Get the EDAC device type width appropriate for the current controller
* configuration.
*
* Return: a device type width enumeration.
*/
static enum dev_type zynq_get_dtype(const void __iomem *base)
{
enum dev_type dt;
u32 width;
width = readl(base + CTRL_OFST);
width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
switch (width) {
case DDRCTL_WDTH_16:
dt = DEV_X2;
break;
case DDRCTL_WDTH_32:
dt = DEV_X4;
break;
default:
dt = DEV_UNKNOWN;
}
return dt;
}
/**
* zynqmp_get_dtype - Return the controller memory width.
* @base: DDR memory controller base address.
*
* Get the EDAC device type width appropriate for the current controller
* configuration.
*
* Return: a device type width enumeration.
*/
static enum dev_type zynqmp_get_dtype(const void __iomem *base)
{
enum dev_type dt;
u32 width;
width = readl(base + CTRL_OFST);
width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
switch (width) {
case DDRCTL_EWDTH_16:
dt = DEV_X2;
break;
case DDRCTL_EWDTH_32:
dt = DEV_X4;
break;
case DDRCTL_EWDTH_64:
dt = DEV_X8;
break;
default:
dt = DEV_UNKNOWN;
}
return dt;
}
/**
* zynq_get_ecc_state - Return the controller ECC enable/disable status.
* @base: DDR memory controller base address.
*
* Get the ECC enable/disable status of the controller.
*
* Return: true if enabled, otherwise false.
*/
static bool zynq_get_ecc_state(void __iomem *base)
{
enum dev_type dt;
u32 ecctype;
dt = zynq_get_dtype(base);
if (dt == DEV_UNKNOWN)
return false;
ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
return true;
return false;
}
/**
* zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
* @base: DDR memory controller base address.
*
* Get the ECC enable/disable status for the controller.
*
* Return: a ECC status boolean i.e true/false - enabled/disabled.
*/
static bool zynqmp_get_ecc_state(void __iomem *base)
{
enum dev_type dt;
u32 ecctype;
dt = zynqmp_get_dtype(base);
if (dt == DEV_UNKNOWN)
return false;
ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
if ((ecctype == SCRUB_MODE_SECDED) &&
((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
return true;
return false;
}
/**
* get_memsize - Read the size of the attached memory device.
*
* Return: the memory size in bytes.
*/
static u32 get_memsize(void)
{
struct sysinfo inf;
si_meminfo(&inf);
return inf.totalram * inf.mem_unit;
}
/**
* zynq_get_mtype - Return the controller memory type.
* @base: Synopsys ECC status structure.
*
* Get the EDAC memory type appropriate for the current controller
* configuration.
*
* Return: a memory type enumeration.
*/
static enum mem_type zynq_get_mtype(const void __iomem *base)
{
enum mem_type mt;
u32 memtype;
memtype = readl(base + T_ZQ_OFST);
if (memtype & T_ZQ_DDRMODE_MASK)
mt = MEM_DDR3;
else
mt = MEM_DDR2;
return mt;
}
/**
* zynqmp_get_mtype - Returns controller memory type.
* @base: Synopsys ECC status structure.
*
* Get the EDAC memory type appropriate for the current controller
* configuration.
*
* Return: a memory type enumeration.
*/
static enum mem_type zynqmp_get_mtype(const void __iomem *base)
{
enum mem_type mt;
u32 memtype;
memtype = readl(base + CTRL_OFST);
if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
mt = MEM_DDR3;
else if (memtype & MEM_TYPE_DDR2)
mt = MEM_RDDR2;
else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
mt = MEM_DDR4;
else
mt = MEM_EMPTY;
return mt;
}
/**
* init_csrows - Initialize the csrow data.
* @mci: EDAC memory controller instance.
*
* Initialize the chip select rows associated with the EDAC memory
* controller instance.
*/
static void init_csrows(struct mem_ctl_info *mci)
{
struct synps_edac_priv *priv = mci->pvt_info;
const struct synps_platform_data *p_data;
struct csrow_info *csi;
struct dimm_info *dimm;
u32 size, row;
int j;
p_data = priv->p_data;
for (row = 0; row < mci->nr_csrows; row++) {
csi = mci->csrows[row];
size = get_memsize();
for (j = 0; j < csi->nr_channels; j++) {
dimm = csi->channels[j]->dimm;
dimm->edac_mode = EDAC_SECDED;
dimm->mtype = p_data->get_mtype(priv->baseaddr);
dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
dimm->grain = SYNPS_EDAC_ERR_GRAIN;
dimm->dtype = p_data->get_dtype(priv->baseaddr);
}
}
}
/**
* mc_init - Initialize one driver instance.
* @mci: EDAC memory controller instance.
* @pdev: platform device.
*
* Perform initialization of the EDAC memory controller instance and
* related driver-private data associated with the memory controller the
* instance is bound to.
*/
static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
{
struct synps_edac_priv *priv;
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
platform_set_drvdata(pdev, mci);
/* Initialize controller capabilities and configuration */
mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->scrub_cap = SCRUB_HW_SRC;
mci->scrub_mode = SCRUB_NONE;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->ctl_name = "synps_ddr_controller";
mci->dev_name = SYNPS_EDAC_MOD_STRING;
mci->mod_name = SYNPS_EDAC_MOD_VER;
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
edac_op_state = EDAC_OPSTATE_INT;
} else {
edac_op_state = EDAC_OPSTATE_POLL;
mci->edac_check = check_errors;
}
mci->ctl_page_to_phys = NULL;
init_csrows(mci);
}
static int setup_irq(struct mem_ctl_info *mci,
struct platform_device *pdev)
{
struct synps_edac_priv *priv = mci->pvt_info;
int ret, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No IRQ %d in DT\n", irq);
return irq;
}
ret = devm_request_irq(&pdev->dev, irq, intr_handler,
0, dev_name(&pdev->dev), mci);
if (ret < 0) {
edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
return ret;
}
enable_intr(priv);
return 0;
}
static const struct synps_platform_data zynq_edac_def = {
.get_error_info = zynq_get_error_info,
.get_mtype = zynq_get_mtype,
.get_dtype = zynq_get_dtype,
.get_ecc_state = zynq_get_ecc_state,
.quirks = 0,
};
static const struct synps_platform_data zynqmp_edac_def = {
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
.get_ecc_state = zynqmp_get_ecc_state,
.quirks = (DDR_ECC_INTR_SUPPORT
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
#endif
),
};
static const struct synps_platform_data synopsys_edac_def = {
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
.get_ecc_state = zynqmp_get_ecc_state,
.quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
#endif
),
};
static const struct of_device_id synps_edac_match[] = {
{
.compatible = "xlnx,zynq-ddrc-a05",
.data = (void *)&zynq_edac_def
},
{
.compatible = "xlnx,zynqmp-ddrc-2.40a",
.data = (void *)&zynqmp_edac_def
},
{
.compatible = "snps,ddrc-3.80a",
.data = (void *)&synopsys_edac_def
},
{
/* end of table */
}
};
MODULE_DEVICE_TABLE(of, synps_edac_match);
#ifdef CONFIG_EDAC_DEBUG
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/**
* ddr_poison_setup - Update poison registers.
* @priv: DDR memory controller private instance data.
*
* Update poison registers as per DDR mapping.
* Return: none.
*/
static void ddr_poison_setup(struct synps_edac_priv *priv)
{
int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
int index;
ulong hif_addr = 0;
hif_addr = priv->poison_addr >> 3;
for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
if (priv->row_shift[index])
row |= (((hif_addr >> priv->row_shift[index]) &
BIT(0)) << index);
else
break;
}
for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
if (priv->col_shift[index] || index < 3)
col |= (((hif_addr >> priv->col_shift[index]) &
BIT(0)) << index);
else
break;
}
for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
if (priv->bank_shift[index])
bank |= (((hif_addr >> priv->bank_shift[index]) &
BIT(0)) << index);
else
break;
}
for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
if (priv->bankgrp_shift[index])
bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
& BIT(0)) << index);
else
break;
}
if (priv->rank_shift[0])
rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
writel(regval, priv->baseaddr + ECC_POISON0_OFST);
regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
writel(regval, priv->baseaddr + ECC_POISON1_OFST);
}
static ssize_t inject_data_error_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct synps_edac_priv *priv = mci->pvt_info;
return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
"Error injection Address: 0x%lx\n\r",
readl(priv->baseaddr + ECC_POISON0_OFST),
readl(priv->baseaddr + ECC_POISON1_OFST),
priv->poison_addr);
}
static ssize_t inject_data_error_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct synps_edac_priv *priv = mci->pvt_info;
if (kstrtoul(data, 0, &priv->poison_addr))
return -EINVAL;
ddr_poison_setup(priv);
return count;
}
static ssize_t inject_data_poison_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct synps_edac_priv *priv = mci->pvt_info;
return sprintf(data, "Data Poisoning: %s\n\r",
(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
? ("Correctable Error") : ("UnCorrectable Error"));
}
static ssize_t inject_data_poison_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct synps_edac_priv *priv = mci->pvt_info;
writel(0, priv->baseaddr + DDRC_SWCTL);
if (strncmp(data, "CE", 2) == 0)
writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
else
writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
writel(1, priv->baseaddr + DDRC_SWCTL);
return count;
}
static DEVICE_ATTR_RW(inject_data_error);
static DEVICE_ATTR_RW(inject_data_poison);
static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
{
int rc;
rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
if (rc < 0)
return rc;
rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
if (rc < 0)
return rc;
return 0;
}
static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
{
device_remove_file(&mci->dev, &dev_attr_inject_data_error);
device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
}
static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
{
u32 addrmap_row_b2_10;
int index;
priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
priv->row_shift[1] = ((addrmap[5] >> 8) &
ROW_MAX_VAL_MASK) + ROW_B1_BASE;
addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
for (index = 2; index < 11; index++)
priv->row_shift[index] = addrmap_row_b2_10 +
index + ROW_B0_BASE;
} else {
priv->row_shift[2] = (addrmap[9] &
ROW_MAX_VAL_MASK) + ROW_B2_BASE;
priv->row_shift[3] = ((addrmap[9] >> 8) &
ROW_MAX_VAL_MASK) + ROW_B3_BASE;
priv->row_shift[4] = ((addrmap[9] >> 16) &
ROW_MAX_VAL_MASK) + ROW_B4_BASE;
priv->row_shift[5] = ((addrmap[9] >> 24) &
ROW_MAX_VAL_MASK) + ROW_B5_BASE;
priv->row_shift[6] = (addrmap[10] &
ROW_MAX_VAL_MASK) + ROW_B6_BASE;
priv->row_shift[7] = ((addrmap[10] >> 8) &
ROW_MAX_VAL_MASK) + ROW_B7_BASE;
priv->row_shift[8] = ((addrmap[10] >> 16) &
ROW_MAX_VAL_MASK) + ROW_B8_BASE;
priv->row_shift[9] = ((addrmap[10] >> 24) &
ROW_MAX_VAL_MASK) + ROW_B9_BASE;
priv->row_shift[10] = (addrmap[11] &
ROW_MAX_VAL_MASK) + ROW_B10_BASE;
}
priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
ROW_MAX_VAL_MASK) + ROW_B11_BASE);
priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
ROW_MAX_VAL_MASK) + ROW_B12_BASE);
priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
ROW_MAX_VAL_MASK) + ROW_B13_BASE);
priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
ROW_MAX_VAL_MASK) + ROW_B14_BASE);
priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
ROW_MAX_VAL_MASK) + ROW_B15_BASE);
priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
ROW_MAX_VAL_MASK) + ROW_B16_BASE);
priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
ROW_MAX_VAL_MASK) + ROW_B17_BASE);
}
static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
{
u32 width, memtype;
int index;
memtype = readl(priv->baseaddr + CTRL_OFST);
width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
priv->col_shift[0] = 0;
priv->col_shift[1] = 1;
priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
priv->col_shift[3] = ((addrmap[2] >> 8) &
COL_MAX_VAL_MASK) + COL_B3_BASE;
priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
COL_MAX_VAL_MASK) + COL_B4_BASE);
priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
COL_MAX_VAL_MASK) + COL_B5_BASE);
priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
COL_MAX_VAL_MASK) + COL_B6_BASE);
priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
COL_MAX_VAL_MASK) + COL_B7_BASE);
priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
COL_MAX_VAL_MASK) + COL_B8_BASE);
priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
COL_MAX_VAL_MASK) + COL_B9_BASE);
if (width == DDRCTL_EWDTH_64) {
if (memtype & MEM_TYPE_LPDDR3) {
priv->col_shift[10] = ((addrmap[4] &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
((addrmap[4] & COL_MAX_VAL_MASK) +
COL_B10_BASE);
priv->col_shift[11] = (((addrmap[4] >> 8) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
COL_B11_BASE);
} else {
priv->col_shift[11] = ((addrmap[4] &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
((addrmap[4] & COL_MAX_VAL_MASK) +
COL_B10_BASE);
priv->col_shift[13] = (((addrmap[4] >> 8) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
COL_B11_BASE);
}
} else if (width == DDRCTL_EWDTH_32) {
if (memtype & MEM_TYPE_LPDDR3) {
priv->col_shift[10] = (((addrmap[3] >> 24) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
COL_B9_BASE);
priv->col_shift[11] = ((addrmap[4] &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
((addrmap[4] & COL_MAX_VAL_MASK) +
COL_B10_BASE);
} else {
priv->col_shift[11] = (((addrmap[3] >> 24) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
COL_B9_BASE);
priv->col_shift[13] = ((addrmap[4] &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
((addrmap[4] & COL_MAX_VAL_MASK) +
COL_B10_BASE);
}
} else {
if (memtype & MEM_TYPE_LPDDR3) {
priv->col_shift[10] = (((addrmap[3] >> 16) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
COL_B8_BASE);
priv->col_shift[11] = (((addrmap[3] >> 24) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
COL_B9_BASE);
priv->col_shift[13] = ((addrmap[4] &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
((addrmap[4] & COL_MAX_VAL_MASK) +
COL_B10_BASE);
} else {
priv->col_shift[11] = (((addrmap[3] >> 16) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
COL_B8_BASE);
priv->col_shift[13] = (((addrmap[3] >> 24) &
COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
COL_B9_BASE);
}
}
if (width) {
for (index = 9; index > width; index--) {
priv->col_shift[index] = priv->col_shift[index - width];
priv->col_shift[index - width] = 0;
}
}
}
static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
{
priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
priv->bank_shift[1] = ((addrmap[1] >> 8) &
BANK_MAX_VAL_MASK) + BANK_B1_BASE;
priv->bank_shift[2] = (((addrmap[1] >> 16) &
BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
BANK_B2_BASE);
}
static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
{
priv->bankgrp_shift[0] = (addrmap[8] &
BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
}
static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
{
priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
RANK_MAX_VAL_MASK) + RANK_B0_BASE);
}
/**
* setup_address_map - Set Address Map by querying ADDRMAP registers.
* @priv: DDR memory controller private instance data.
*
* Set Address Map by querying ADDRMAP registers.
*
* Return: none.
*/
static void setup_address_map(struct synps_edac_priv *priv)
{
u32 addrmap[12];
int index;
for (index = 0; index < 12; index++) {
u32 addrmap_offset;
addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
addrmap[index] = readl(priv->baseaddr + addrmap_offset);
}
setup_row_address_map(priv, addrmap);
setup_column_address_map(priv, addrmap);
setup_bank_address_map(priv, addrmap);
setup_bg_address_map(priv, addrmap);
setup_rank_address_map(priv, addrmap);
}
#endif /* CONFIG_EDAC_DEBUG */
/**
* mc_probe - Check controller and bind driver.
* @pdev: platform device.
*
* Probe a specific controller instance for binding with the driver.
*
* Return: 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
static int mc_probe(struct platform_device *pdev)
{
const struct synps_platform_data *p_data;
struct edac_mc_layer layers[2];
struct synps_edac_priv *priv;
struct mem_ctl_info *mci;
void __iomem *baseaddr;
struct resource *res;
int rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
baseaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
p_data = of_device_get_match_data(&pdev->dev);
if (!p_data)
return -ENODEV;
if (!p_data->get_ecc_state(baseaddr)) {
edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
return -ENXIO;
}
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = SYNPS_EDAC_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = SYNPS_EDAC_NR_CHANS;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct synps_edac_priv));
if (!mci) {
edac_printk(KERN_ERR, EDAC_MC,
"Failed memory allocation for mc instance\n");
return -ENOMEM;
}
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
mc_init(mci, pdev);
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
rc = setup_irq(mci, pdev);
if (rc)
goto free_edac_mc;
}
rc = edac_mc_add_mc(mci);
if (rc) {
edac_printk(KERN_ERR, EDAC_MC,
"Failed to register with EDAC core\n");
goto free_edac_mc;
}
#ifdef CONFIG_EDAC_DEBUG
if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
rc = edac_create_sysfs_attributes(mci);
if (rc) {
edac_printk(KERN_ERR, EDAC_MC,
"Failed to create sysfs entries\n");
goto free_edac_mc;
}
}
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
setup_address_map(priv);
#endif
/*
* Start capturing the correctable and uncorrectable errors. A write of
* 0 starts the counters.
*/
if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
writel(0x0, baseaddr + ECC_CTRL_OFST);
return rc;
free_edac_mc:
edac_mc_free(mci);
return rc;
}
/**
* mc_remove - Unbind driver from controller.
* @pdev: Platform device.
*
* Return: Unconditionally 0
*/
static int mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct synps_edac_priv *priv = mci->pvt_info;
if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
disable_intr(priv);
#ifdef CONFIG_EDAC_DEBUG
if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
edac_remove_sysfs_attributes(mci);
#endif
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver synps_edac_mc_driver = {
.driver = {
.name = "synopsys-edac",
.of_match_table = synps_edac_match,
},
.probe = mc_probe,
.remove = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
MODULE_AUTHOR("Xilinx Inc");
MODULE_DESCRIPTION("Synopsys DDR ECC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/synopsys_edac.c |
/*
* Intel e752x Memory Controller kernel module
* (C) 2004 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
*
* Datasheets:
* https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
* ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
*
* Contributors:
* Thayne Harbaugh at realmsys.com (?)
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "e752x_edac"
static int report_non_memory_errors;
static int force_function_unhide;
static int sysbus_parity = -1;
static struct edac_pci_ctl_info *e752x_pci;
#define e752x_printk(level, fmt, arg...) \
edac_printk(level, "e752x", fmt, ##arg)
#define e752x_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_7520_0
#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7525_0
#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7320_0
#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_3100_0
#define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
#endif /* PCI_DEVICE_ID_INTEL_3100_0 */
#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
#endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
#define E752X_NR_CSROWS 8 /* number of csrows */
/* E752X register addresses - device 0 function 0 */
#define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
/*
* 6:5 Scrub Completion Count
* 3:2 Scrub Rate (i3100 only)
* 01=fast 10=normal
* 1:0 Scrub Mode enable
* 00=off 10=on
*/
#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
/*
* 31:30 Device width row 7
* 01=x8 10=x4 11=x8 DDR2
* 27:26 Device width row 6
* 23:22 Device width row 5
* 19:20 Device width row 4
* 15:14 Device width row 3
* 11:10 Device width row 2
* 7:6 Device width row 1
* 3:2 Device width row 0
*/
#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
/* FIXME:IS THIS RIGHT? */
/*
* 22 Number channels 0=1,1=2
* 19:18 DRB Granularity 32/64MB
*/
#define E752X_DRM 0x80 /* Dimm mapping register */
#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
/*
* 14:12 1 single A, 2 single B, 3 dual
*/
#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
/* E752X register addresses - device 0 function 1 */
#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6)
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6)
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
/* error syndrome register (16b) */
#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
/* error syndrome register (16b) */
#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
/* 3100 IMCH specific register addresses - device 0 function 1 */
#define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
#define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
#define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
#define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
/* ICH5R register addresses - device 30 function 0 */
#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
enum e752x_chips {
E7520 = 0,
E7525 = 1,
E7320 = 2,
I3100 = 3
};
/*
* Those chips Support single-rank and dual-rank memories only.
*
* On e752x chips, the odd rows are present only on dual-rank memories.
* Dividing the rank by two will provide the dimm#
*
* i3100 MC has a different mapping: it supports only 4 ranks.
*
* The mapping is (from 1 to n):
* slot single-ranked double-ranked
* dimm #1 -> rank #4 NA
* dimm #2 -> rank #3 NA
* dimm #3 -> rank #2 Ranks 2 and 3
* dimm #4 -> rank $1 Ranks 1 and 4
*
* FIXME: The current mapping for i3100 considers that it supports up to 8
* ranks/chanel, but datasheet says that the MC supports only 4 ranks.
*/
struct e752x_pvt {
struct pci_dev *dev_d0f0;
struct pci_dev *dev_d0f1;
u32 tolm;
u32 remapbase;
u32 remaplimit;
int mc_symmetric;
u8 map[8];
int map_type;
const struct e752x_dev_info *dev_info;
};
struct e752x_dev_info {
u16 err_dev;
u16 ctl_dev;
const char *ctl_name;
};
struct e752x_error_info {
u32 ferr_global;
u32 nerr_global;
u32 nsi_ferr; /* 3100 only */
u32 nsi_nerr; /* 3100 only */
u8 hi_ferr; /* all but 3100 */
u8 hi_nerr; /* all but 3100 */
u16 sysbus_ferr;
u16 sysbus_nerr;
u8 buf_ferr;
u8 buf_nerr;
u16 dram_ferr;
u16 dram_nerr;
u32 dram_sec1_add;
u32 dram_sec2_add;
u16 dram_sec1_syndrome;
u16 dram_sec2_syndrome;
u32 dram_ded_add;
u32 dram_scrb_add;
u32 dram_retr_add;
};
static const struct e752x_dev_info e752x_devs[] = {
[E7520] = {
.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
.ctl_name = "E7520"},
[E7525] = {
.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
.ctl_name = "E7525"},
[E7320] = {
.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
.ctl_name = "E7320"},
[I3100] = {
.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
.ctl_name = "3100"},
};
/* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
* map the scrubbing bandwidth to a hardware register value. The 'set'
* operation finds the 'matching or higher value'. Note that scrubbing
* on the e752x can only be enabled/disabled. The 3100 supports
* a normal and fast mode.
*/
#define SDRATE_EOT 0xFFFFFFFF
struct scrubrate {
u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
u16 scrubval; /* register value for scrub rate */
};
/* Rate below assumes same performance as i3100 using PC3200 DDR2 in
* normal mode. e752x bridges don't support choosing normal or fast mode,
* so the scrubbing bandwidth value isn't all that important - scrubbing is
* either on or off.
*/
static const struct scrubrate scrubrates_e752x[] = {
{0, 0x00}, /* Scrubbing Off */
{500000, 0x02}, /* Scrubbing On */
{SDRATE_EOT, 0x00} /* End of Table */
};
/* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
* Normal mode: 125 (32000 / 256) times slower than fast mode.
*/
static const struct scrubrate scrubrates_i3100[] = {
{0, 0x00}, /* Scrubbing Off */
{500000, 0x0a}, /* Normal mode - 32k clocks */
{62500000, 0x06}, /* Fast mode - 256 clocks */
{SDRATE_EOT, 0x00} /* End of Table */
};
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
unsigned long page)
{
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
edac_dbg(3, "\n");
if (page < pvt->tolm)
return page;
if ((page >= 0x100000) && (page < pvt->remapbase))
return page;
remap = (page - pvt->tolm) + pvt->remapbase;
if (remap < pvt->remaplimit)
return remap;
e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
return pvt->tolm - 1;
}
static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
u32 sec1_add, u16 sec1_syndrome)
{
u32 page;
int row;
int channel;
int i;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
edac_dbg(3, "\n");
/* convert the addr to 4k page */
page = sec1_add >> (PAGE_SHIFT - 4);
/* FIXME - check for -1 */
if (pvt->mc_symmetric) {
/* chip select are bits 14 & 13 */
row = ((page >> 1) & 3);
e752x_printk(KERN_WARNING,
"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
pvt->map[4], pvt->map[5], pvt->map[6],
pvt->map[7]);
/* test for channel remapping */
for (i = 0; i < 8; i++) {
if (pvt->map[i] == row)
break;
}
e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
if (i < 8)
row = i;
else
e752x_mc_printk(mci, KERN_WARNING,
"row %d not found in remap table\n",
row);
} else
row = edac_mc_find_csrow_by_page(mci, page);
/* 0 = channel A, 1 = channel B */
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset_in_page(sec1_add << 4), sec1_syndrome,
row, channel, -1,
"e752x CE", "");
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
u32 sec1_add, u16 sec1_syndrome, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
}
static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
u32 ded_add, u32 scrb_add)
{
u32 error_2b, block_page;
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
edac_dbg(3, "\n");
if (error_one & 0x0202) {
error_2b = ded_add;
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
/* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
block_page,
offset_in_page(error_2b << 4), 0,
row, -1, -1,
"e752x UE from Read", "");
}
if (error_one & 0x0404) {
error_2b = scrb_add;
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
/* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
block_page,
offset_in_page(error_2b << 4), 0,
row, -1, -1,
"e752x UE from Scruber", "");
}
}
static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
u32 ded_add, u32 scrb_add, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ue(mci, error_one, ded_add, scrb_add);
}
static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
int *error_found, int handle_error)
{
*error_found = 1;
if (!handle_error)
return;
edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
"e752x UE log memory write", "");
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
u32 retry_add)
{
u32 error_1b, page;
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
error_1b = retry_add;
page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
/* chip select are bits 14 & 13 */
row = pvt->mc_symmetric ? ((page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, page);
e752x_mc_printk(mci, KERN_WARNING,
"CE page 0x%lx, row %d : Memory read retry\n",
(long unsigned int)page, row);
}
static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
u32 retry_add, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ded_retry(mci, error, retry_add);
}
static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
int *error_found, int handle_error)
{
*error_found = 1;
if (handle_error)
e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
}
static char *global_message[11] = {
"PCI Express C1",
"PCI Express C",
"PCI Express B1",
"PCI Express B",
"PCI Express A1",
"PCI Express A",
"DMA Controller",
"HUB or NS Interface",
"System Bus",
"DRAM Controller", /* 9th entry */
"Internal Buffer"
};
#define DRAM_ENTRY 9
static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
static void do_global_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 11; i++) {
if (errors & (1 << i)) {
/* If the error is from DRAM Controller OR
* we are to report ALL errors, then
* report the error
*/
if ((i == DRAM_ENTRY) || report_non_memory_errors)
e752x_printk(KERN_WARNING, "%sError %s\n",
fatal_message[fatal],
global_message[i]);
}
}
}
static inline void global_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_global_error(fatal, errors);
}
static char *hub_message[7] = {
"HI Address or Command Parity", "HI Illegal Access",
"HI Internal Parity", "Out of Range Access",
"HI Data Parity", "Enhanced Config Access",
"Hub Interface Target Abort"
};
static void do_hub_error(int fatal, u8 errors)
{
int i;
for (i = 0; i < 7; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError %s\n",
fatal_message[fatal], hub_message[i]);
}
}
static inline void hub_error(int fatal, u8 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_hub_error(fatal, errors);
}
#define NSI_FATAL_MASK 0x0c080081
#define NSI_NON_FATAL_MASK 0x23a0ba64
#define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
static char *nsi_message[30] = {
"NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
"", /* reserved */
"NSI Parity Error", /* bit 2, non-fatal */
"", /* reserved */
"", /* reserved */
"Correctable Error Message", /* bit 5, non-fatal */
"Non-Fatal Error Message", /* bit 6, non-fatal */
"Fatal Error Message", /* bit 7, fatal */
"", /* reserved */
"Receiver Error", /* bit 9, non-fatal */
"", /* reserved */
"Bad TLP", /* bit 11, non-fatal */
"Bad DLLP", /* bit 12, non-fatal */
"REPLAY_NUM Rollover", /* bit 13, non-fatal */
"", /* reserved */
"Replay Timer Timeout", /* bit 15, non-fatal */
"", /* reserved */
"", /* reserved */
"", /* reserved */
"Data Link Protocol Error", /* bit 19, fatal */
"", /* reserved */
"Poisoned TLP", /* bit 21, non-fatal */
"", /* reserved */
"Completion Timeout", /* bit 23, non-fatal */
"Completer Abort", /* bit 24, non-fatal */
"Unexpected Completion", /* bit 25, non-fatal */
"Receiver Overflow", /* bit 26, fatal */
"Malformed TLP", /* bit 27, fatal */
"", /* reserved */
"Unsupported Request" /* bit 29, non-fatal */
};
static void do_nsi_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 30; i++) {
if (errors & (1 << i))
printk(KERN_WARNING "%sError %s\n",
fatal_message[fatal], nsi_message[i]);
}
}
static inline void nsi_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_nsi_error(fatal, errors);
}
static char *membuf_message[4] = {
"Internal PMWB to DRAM parity",
"Internal PMWB to System Bus Parity",
"Internal System Bus or IO to PMWB Parity",
"Internal DRAM to PMWB Parity"
};
static void do_membuf_error(u8 errors)
{
int i;
for (i = 0; i < 4; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
membuf_message[i]);
}
}
static inline void membuf_error(u8 errors, int *error_found, int handle_error)
{
*error_found = 1;
if (handle_error)
do_membuf_error(errors);
}
static char *sysbus_message[10] = {
"Addr or Request Parity",
"Data Strobe Glitch",
"Addr Strobe Glitch",
"Data Parity",
"Addr Above TOM",
"Non DRAM Lock Error",
"MCERR", "BINIT",
"Memory Parity",
"IO Subsystem Parity"
};
static void do_sysbus_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 10; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
fatal_message[fatal], sysbus_message[i]);
}
}
static inline void sysbus_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_sysbus_error(fatal, errors);
}
static void e752x_check_hub_interface(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u8 stat8;
//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
stat8 = info->hi_ferr;
if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
stat8 = info->hi_nerr;
if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
}
static void e752x_check_ns_interface(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u32 stat32;
stat32 = info->nsi_ferr;
if (stat32 & NSI_ERR_MASK) { /* Error, so process */
if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
handle_error);
if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
handle_error);
}
stat32 = info->nsi_nerr;
if (stat32 & NSI_ERR_MASK) {
if (stat32 & NSI_FATAL_MASK)
nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
handle_error);
if (stat32 & NSI_NON_FATAL_MASK)
nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
handle_error);
}
}
static void e752x_check_sysbus(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u32 stat32, error32;
//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
if (stat32 == 0)
return; /* no errors */
error32 = (stat32 >> 16) & 0x3ff;
stat32 = stat32 & 0x3ff;
if (stat32 & 0x087)
sysbus_error(1, stat32 & 0x087, error_found, handle_error);
if (stat32 & 0x378)
sysbus_error(0, stat32 & 0x378, error_found, handle_error);
if (error32 & 0x087)
sysbus_error(1, error32 & 0x087, error_found, handle_error);
if (error32 & 0x378)
sysbus_error(0, error32 & 0x378, error_found, handle_error);
}
static void e752x_check_membuf(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u8 stat8;
stat8 = info->buf_ferr;
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
stat8 = info->buf_nerr;
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
}
static void e752x_check_dram(struct mem_ctl_info *mci,
struct e752x_error_info *info, int *error_found,
int handle_error)
{
u16 error_one, error_next;
error_one = info->dram_ferr;
error_next = info->dram_nerr;
/* decode and report errors */
if (error_one & 0x0101) /* check first error correctable */
process_ce(mci, error_one, info->dram_sec1_add,
info->dram_sec1_syndrome, error_found, handle_error);
if (error_next & 0x0101) /* check next error correctable */
process_ce(mci, error_next, info->dram_sec2_add,
info->dram_sec2_syndrome, error_found, handle_error);
if (error_one & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
if (error_next & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
if (error_one & 0x2020)
process_ded_retry(mci, error_one, info->dram_retr_add,
error_found, handle_error);
if (error_next & 0x2020)
process_ded_retry(mci, error_next, info->dram_retr_add,
error_found, handle_error);
if (error_one & 0x0808)
process_threshold_ce(mci, error_one, error_found, handle_error);
if (error_next & 0x0808)
process_threshold_ce(mci, error_next, error_found,
handle_error);
if (error_one & 0x0606)
process_ue(mci, error_one, info->dram_ded_add,
info->dram_scrb_add, error_found, handle_error);
if (error_next & 0x0606)
process_ue(mci, error_next, info->dram_ded_add,
info->dram_scrb_add, error_found, handle_error);
}
static void e752x_get_error_info(struct mem_ctl_info *mci,
struct e752x_error_info *info)
{
struct pci_dev *dev;
struct e752x_pvt *pvt;
memset(info, 0, sizeof(*info));
pvt = (struct e752x_pvt *)mci->pvt_info;
dev = pvt->dev_d0f1;
pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
if (info->ferr_global) {
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_read_config_dword(dev, I3100_NSI_FERR,
&info->nsi_ferr);
info->hi_ferr = 0;
} else {
pci_read_config_byte(dev, E752X_HI_FERR,
&info->hi_ferr);
info->nsi_ferr = 0;
}
pci_read_config_word(dev, E752X_SYSBUS_FERR,
&info->sysbus_ferr);
pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
&info->dram_sec1_add);
pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
&info->dram_sec1_syndrome);
pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
&info->dram_ded_add);
pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
&info->dram_scrb_add);
pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
&info->dram_retr_add);
/* ignore the reserved bits just in case */
if (info->hi_ferr & 0x7f)
pci_write_config_byte(dev, E752X_HI_FERR,
info->hi_ferr);
if (info->nsi_ferr & NSI_ERR_MASK)
pci_write_config_dword(dev, I3100_NSI_FERR,
info->nsi_ferr);
if (info->sysbus_ferr)
pci_write_config_word(dev, E752X_SYSBUS_FERR,
info->sysbus_ferr);
if (info->buf_ferr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_FERR,
info->buf_ferr);
if (info->dram_ferr)
pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
info->ferr_global);
}
pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
if (info->nerr_global) {
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_read_config_dword(dev, I3100_NSI_NERR,
&info->nsi_nerr);
info->hi_nerr = 0;
} else {
pci_read_config_byte(dev, E752X_HI_NERR,
&info->hi_nerr);
info->nsi_nerr = 0;
}
pci_read_config_word(dev, E752X_SYSBUS_NERR,
&info->sysbus_nerr);
pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
&info->dram_sec2_add);
pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
&info->dram_sec2_syndrome);
if (info->hi_nerr & 0x7f)
pci_write_config_byte(dev, E752X_HI_NERR,
info->hi_nerr);
if (info->nsi_nerr & NSI_ERR_MASK)
pci_write_config_dword(dev, I3100_NSI_NERR,
info->nsi_nerr);
if (info->sysbus_nerr)
pci_write_config_word(dev, E752X_SYSBUS_NERR,
info->sysbus_nerr);
if (info->buf_nerr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_NERR,
info->buf_nerr);
if (info->dram_nerr)
pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
info->nerr_global);
}
}
static int e752x_process_error_info(struct mem_ctl_info *mci,
struct e752x_error_info *info,
int handle_errors)
{
u32 error32, stat32;
int error_found;
error_found = 0;
error32 = (info->ferr_global >> 18) & 0x3ff;
stat32 = (info->ferr_global >> 4) & 0x7ff;
if (error32)
global_error(1, error32, &error_found, handle_errors);
if (stat32)
global_error(0, stat32, &error_found, handle_errors);
error32 = (info->nerr_global >> 18) & 0x3ff;
stat32 = (info->nerr_global >> 4) & 0x7ff;
if (error32)
global_error(1, error32, &error_found, handle_errors);
if (stat32)
global_error(0, stat32, &error_found, handle_errors);
e752x_check_hub_interface(info, &error_found, handle_errors);
e752x_check_ns_interface(info, &error_found, handle_errors);
e752x_check_sysbus(info, &error_found, handle_errors);
e752x_check_membuf(info, &error_found, handle_errors);
e752x_check_dram(mci, info, &error_found, handle_errors);
return error_found;
}
static void e752x_check(struct mem_ctl_info *mci)
{
struct e752x_error_info info;
e752x_get_error_info(mci, &info);
e752x_process_error_info(mci, &info, 1);
}
/* Program byte/sec bandwidth scrub rate to hardware */
static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
{
const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
struct pci_dev *pdev = pvt->dev_d0f0;
int i;
if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
scrubrates = scrubrates_i3100;
else
scrubrates = scrubrates_e752x;
/* Translate the desired scrub rate to a e752x/3100 register value.
* Search for the bandwidth that is equal or greater than the
* desired rate and program the cooresponding register value.
*/
for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
if (scrubrates[i].bandwidth >= new_bw)
break;
if (scrubrates[i].bandwidth == SDRATE_EOT)
return -1;
pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
return scrubrates[i].bandwidth;
}
/* Convert current scrub rate value into byte/sec bandwidth */
static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
{
const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
struct pci_dev *pdev = pvt->dev_d0f0;
u16 scrubval;
int i;
if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
scrubrates = scrubrates_i3100;
else
scrubrates = scrubrates_e752x;
/* Find the bandwidth matching the memory scrubber configuration */
pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
scrubval = scrubval & 0x0f;
for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
if (scrubrates[i].scrubval == scrubval)
break;
if (scrubrates[i].bandwidth == SDRATE_EOT) {
e752x_printk(KERN_WARNING,
"Invalid sdram scrub control value: 0x%x\n", scrubval);
return -1;
}
return scrubrates[i].bandwidth;
}
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u16 ddrcsr)
{
return (((ddrcsr >> 12) & 3) == 3);
}
/* Remap csrow index numbers if map_type is "reverse"
*/
static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
{
struct e752x_pvt *pvt = mci->pvt_info;
if (!pvt->map_type)
return (7 - index);
return (index);
}
static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u16 ddrcsr)
{
struct csrow_info *csrow;
enum edac_type edac_mode;
unsigned long last_cumul_size;
int index, mem_dev, drc_chan;
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
u32 dra, drc, cumul_size, i, nr_pages;
dra = 0;
for (index = 0; index < 4; index++) {
u8 dra_reg;
pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
/* The dram row boundary (DRB) reg values are boundary address for
* each DRAM row with a granularity of 64 or 128MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 2)) & 0x3;
csrow = mci->csrows[remap_csrow_index(mci, index)];
mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* convert a 128 or 64 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
/*
* if single channel or x8 devices then SECDED
* if dual channel and x4 then S4ECD4ED
*/
if (drc_ddim) {
if (drc_chan && mem_dev) {
edac_mode = EDAC_S4ECD4ED;
mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
} else {
edac_mode = EDAC_SECDED;
mci->edac_cap |= EDAC_FLAG_SECDED;
}
} else
edac_mode = EDAC_NONE;
for (i = 0; i < csrow->nr_channels; i++) {
struct dimm_info *dimm = csrow->channels[i]->dimm;
edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
dimm->mtype = MEM_RDDR; /* only one type supported */
dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
dimm->edac_mode = edac_mode;
}
}
}
static void e752x_init_mem_map_table(struct pci_dev *pdev,
struct e752x_pvt *pvt)
{
int index;
u8 value, last, row;
last = 0;
row = 0;
for (index = 0; index < 8; index += 2) {
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* test if there is a dimm in this slot */
if (value == last) {
/* no dimm in the slot, so flag it as empty */
pvt->map[index] = 0xff;
pvt->map[index + 1] = 0xff;
} else { /* there is a dimm in the slot */
pvt->map[index] = row;
row++;
last = value;
/* test the next value to see if the dimm is double
* sided
*/
pci_read_config_byte(pdev, E752X_DRB + index + 1,
&value);
/* the dimm is single sided, so flag as empty */
/* this is a double sided dimm to save the next row #*/
pvt->map[index + 1] = (value == last) ? 0xff : row;
row++;
last = value;
}
}
}
/* Return 0 on success or 1 on failure. */
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
struct e752x_pvt *pvt)
{
pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, NULL);
if (pvt->dev_d0f1 == NULL) {
pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
pci_dev_get(pvt->dev_d0f1);
}
if (pvt->dev_d0f1 == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
e752x_devs[dev_idx].ctl_dev,
NULL);
if (pvt->dev_d0f0 == NULL)
goto fail;
return 0;
fail:
pci_dev_put(pvt->dev_d0f1);
return 1;
}
/* Setup system bus parity mask register.
* Sysbus parity supported on:
* e7320/e7520/e7525 + Xeon
*/
static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
{
char *cpu_id = cpu_data(0).x86_model_id;
struct pci_dev *dev = pvt->dev_d0f1;
int enable = 1;
/* Allow module parameter override, else see if CPU supports parity */
if (sysbus_parity != -1) {
enable = sysbus_parity;
} else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
e752x_printk(KERN_INFO, "System Bus Parity not "
"supported by CPU, disabling\n");
enable = 0;
}
if (enable)
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
else
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
}
static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
{
struct pci_dev *dev;
dev = pvt->dev_d0f1;
/* Turn off error disable & SMI in case the BIOS turned it on */
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
} else {
pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
}
e752x_init_sysbus_parity_mask(pvt);
pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
}
static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
edac_dbg(0, "mci\n");
edac_dbg(0, "Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
* fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
"E752x error registers can be safely un-hidden\n");
return -ENODEV;
}
stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
/* FIXME: should check >>12 or 0xf, true for all? */
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = E752X_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = drc_chan + 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
/* 3100 IMCH supports SECDEC only */
mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
pvt = (struct e752x_pvt *)mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
if (e752x_get_devs(pdev, dev_idx, pvt)) {
edac_mc_free(mci);
return -ENODEV;
}
edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
/* set the map type. 1 = normal, 0 = reversed
* Must be set before e752x_init_csrows in case csrow mapping
* is reversed.
*/
pci_read_config_byte(pdev, E752X_DRM, &stat8);
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
e752x_init_csrows(mci, pdev, ddrcsr);
e752x_init_mem_map_table(pdev, pvt);
if (dev_idx == I3100)
mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
else
mci->edac_cap |= EDAC_FLAG_NONE;
edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E752X_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
pvt->remapbase = ((u32) pci_data) << 14;
pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e752x_printk(KERN_INFO,
"tolm = %x, remapbase = %x, remaplimit = %x\n",
pvt->tolm, pvt->remapbase, pvt->remaplimit);
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
e752x_init_error_reporting_regs(pvt);
e752x_get_error_info(mci, &discard); /* clear other MCH errors */
/* allocating generic PCI control info */
e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!e752x_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n", __func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail:
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
/* wake up and enable device */
if (pci_enable_device(pdev) < 0)
return -EIO;
return e752x_probe1(pdev, ent->driver_data);
}
static void e752x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
edac_dbg(0, "\n");
if (e752x_pci)
edac_pci_release_generic_ctl(e752x_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
pvt = (struct e752x_pvt *)mci->pvt_info;
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
edac_mc_free(mci);
}
static const struct pci_device_id e752x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7520},
{
PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7525},
{
PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7320},
{
PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3100},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
static struct pci_driver e752x_driver = {
.name = EDAC_MOD_STR,
.probe = e752x_init_one,
.remove = e752x_remove_one,
.id_table = e752x_pci_tbl,
};
static int __init e752x_init(void)
{
int pci_rc;
edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&e752x_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
static void __exit e752x_exit(void)
{
edac_dbg(3, "\n");
pci_unregister_driver(&e752x_driver);
}
module_init(e752x_init);
module_exit(e752x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman");
MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
module_param(force_function_unhide, int, 0444);
MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
" 1=force unhide and hope BIOS doesn't fight driver for "
"Dev0:Fun1 access");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
module_param(sysbus_parity, int, 0444);
MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
" 1=enable system bus parity checking, default=auto-detect");
module_param(report_non_memory_errors, int, 0644);
MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
"reporting, 1=enable non-memory error reporting");
| linux-master | drivers/edac/e752x_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018, Intel Corporation. All rights reserved
* Copyright Altera Corporation (C) 2014-2016. All rights reserved.
* Copyright 2011-2012 Calxeda, Inc.
*/
#include <asm/cacheflush.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/edac.h>
#include <linux/firmware/intel/stratix10-smc.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/mfd/altera-sysmgr.h>
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "altera_edac.h"
#include "edac_module.h"
#define EDAC_MOD_STR "altera_edac"
#define EDAC_DEVICE "Altera"
#ifdef CONFIG_EDAC_ALTERA_SDRAM
static const struct altr_sdram_prv_data c5_data = {
.ecc_ctrl_offset = CV_CTLCFG_OFST,
.ecc_ctl_en_mask = CV_CTLCFG_ECC_AUTO_EN,
.ecc_stat_offset = CV_DRAMSTS_OFST,
.ecc_stat_ce_mask = CV_DRAMSTS_SBEERR,
.ecc_stat_ue_mask = CV_DRAMSTS_DBEERR,
.ecc_saddr_offset = CV_ERRADDR_OFST,
.ecc_daddr_offset = CV_ERRADDR_OFST,
.ecc_cecnt_offset = CV_SBECOUNT_OFST,
.ecc_uecnt_offset = CV_DBECOUNT_OFST,
.ecc_irq_en_offset = CV_DRAMINTR_OFST,
.ecc_irq_en_mask = CV_DRAMINTR_INTREN,
.ecc_irq_clr_offset = CV_DRAMINTR_OFST,
.ecc_irq_clr_mask = (CV_DRAMINTR_INTRCLR | CV_DRAMINTR_INTREN),
.ecc_cnt_rst_offset = CV_DRAMINTR_OFST,
.ecc_cnt_rst_mask = CV_DRAMINTR_INTRCLR,
.ce_ue_trgr_offset = CV_CTLCFG_OFST,
.ce_set_mask = CV_CTLCFG_GEN_SB_ERR,
.ue_set_mask = CV_CTLCFG_GEN_DB_ERR,
};
static const struct altr_sdram_prv_data a10_data = {
.ecc_ctrl_offset = A10_ECCCTRL1_OFST,
.ecc_ctl_en_mask = A10_ECCCTRL1_ECC_EN,
.ecc_stat_offset = A10_INTSTAT_OFST,
.ecc_stat_ce_mask = A10_INTSTAT_SBEERR,
.ecc_stat_ue_mask = A10_INTSTAT_DBEERR,
.ecc_saddr_offset = A10_SERRADDR_OFST,
.ecc_daddr_offset = A10_DERRADDR_OFST,
.ecc_irq_en_offset = A10_ERRINTEN_OFST,
.ecc_irq_en_mask = A10_ECC_IRQ_EN_MASK,
.ecc_irq_clr_offset = A10_INTSTAT_OFST,
.ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
.ecc_cnt_rst_offset = A10_ECCCTRL1_OFST,
.ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK,
.ce_ue_trgr_offset = A10_DIAGINTTEST_OFST,
.ce_set_mask = A10_DIAGINT_TSERRA_MASK,
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct altr_sdram_mc_data *drvdata = mci->pvt_info;
const struct altr_sdram_prv_data *priv = drvdata->data;
u32 status, err_count = 1, err_addr;
regmap_read(drvdata->mc_vbase, priv->ecc_stat_offset, &status);
if (status & priv->ecc_stat_ue_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_daddr_offset,
&err_addr);
if (priv->ecc_uecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_uecnt_offset,
&err_count);
panic("\nEDAC: [%d Uncorrectable errors @ 0x%08X]\n",
err_count, err_addr);
}
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
if (priv->ecc_uecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, 0,
0, 0, -1, mci->ctl_name, "");
/* Clear IRQ to resume */
regmap_write(drvdata->mc_vbase, priv->ecc_irq_clr_offset,
priv->ecc_irq_clr_mask);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct mem_ctl_info *mci = file->private_data;
struct altr_sdram_mc_data *drvdata = mci->pvt_info;
const struct altr_sdram_prv_data *priv = drvdata->data;
u32 *ptemp;
dma_addr_t dma_handle;
u32 reg, read_reg;
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
}
regmap_read(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
&read_reg);
read_reg &= ~(priv->ce_set_mask | priv->ue_set_mask);
/* Error are injected by writing a word while the SBE or DBE
* bit in the CTLCFG register is set. Reading the word will
* trigger the SBE or DBE error and the corresponding IRQ.
*/
if (count == 3) {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Double bit error\n");
local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ue_set_mask));
local_irq_enable();
} else {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Single bit error\n");
local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ce_set_mask));
local_irq_enable();
}
ptemp[0] = 0x5A5A5A5A;
ptemp[1] = 0xA5A5A5A5;
/* Clear the error injection bits */
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset, read_reg);
/* Ensure it has been written out */
wmb();
/*
* To trigger the error, we need to read the data back
* (the data was written with errors above).
* The READ_ONCE macros and printk are used to prevent the
* the compiler optimizing these reads out.
*/
reg = READ_ONCE(ptemp[0]);
read_reg = READ_ONCE(ptemp[1]);
/* Force Read */
rmb();
edac_printk(KERN_ALERT, EDAC_MC, "Read Data [0x%X, 0x%X]\n",
reg, read_reg);
dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
return count;
}
static const struct file_operations altr_sdr_mc_debug_inject_fops = {
.open = simple_open,
.write = altr_sdr_mc_err_inject_write,
.llseek = generic_file_llseek,
};
static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
{
if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
return;
if (!mci->debugfs)
return;
edac_debugfs_create_file("altr_trigger", S_IWUSR, mci->debugfs, mci,
&altr_sdr_mc_debug_inject_fops);
}
/* Get total memory size from Open Firmware DTB */
static unsigned long get_total_mem(void)
{
struct device_node *np = NULL;
struct resource res;
int ret;
unsigned long total_mem = 0;
for_each_node_by_type(np, "memory") {
ret = of_address_to_resource(np, 0, &res);
if (ret)
continue;
total_mem += resource_size(&res);
}
edac_dbg(0, "total_mem 0x%lx\n", total_mem);
return total_mem;
}
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
static int a10_init(struct regmap *mc_vbase)
{
if (regmap_update_bits(mc_vbase, A10_INTMODE_OFST,
A10_INTMODE_SB_INT, A10_INTMODE_SB_INT)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error setting SB IRQ mode\n");
return -ENODEV;
}
if (regmap_write(mc_vbase, A10_SERRCNTREG_OFST, 1)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error setting trigger count\n");
return -ENODEV;
}
return 0;
}
static int a10_unmask_irq(struct platform_device *pdev, u32 mask)
{
void __iomem *sm_base;
int ret = 0;
if (!request_mem_region(A10_SYMAN_INTMASK_CLR, sizeof(u32),
dev_name(&pdev->dev))) {
edac_printk(KERN_ERR, EDAC_MC,
"Unable to request mem region\n");
return -EBUSY;
}
sm_base = ioremap(A10_SYMAN_INTMASK_CLR, sizeof(u32));
if (!sm_base) {
edac_printk(KERN_ERR, EDAC_MC,
"Unable to ioremap device\n");
ret = -ENOMEM;
goto release;
}
iowrite32(mask, sm_base);
iounmap(sm_base);
release:
release_mem_region(A10_SYMAN_INTMASK_CLR, sizeof(u32));
return ret;
}
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct altr_sdram_mc_data *drvdata;
const struct altr_sdram_prv_data *priv;
struct regmap *mc_vbase;
struct dimm_info *dimm;
u32 read_reg;
int irq, irq2, res = 0;
unsigned long mem_size, irqflags = 0;
id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
if (!id)
return -ENODEV;
/* Grab the register range from the sdr controller in device tree */
mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sdr-syscon");
if (IS_ERR(mc_vbase)) {
edac_printk(KERN_ERR, EDAC_MC,
"regmap for altr,sdr-syscon lookup failed.\n");
return -ENODEV;
}
/* Check specific dependencies for the module */
priv = of_match_node(altr_sdram_ctrl_of_match,
pdev->dev.of_node)->data;
/* Validate the SDRAM controller has ECC enabled */
if (regmap_read(mc_vbase, priv->ecc_ctrl_offset, &read_reg) ||
((read_reg & priv->ecc_ctl_en_mask) != priv->ecc_ctl_en_mask)) {
edac_printk(KERN_ERR, EDAC_MC,
"No ECC/ECC disabled [0x%08X]\n", read_reg);
return -ENODEV;
}
/* Grab memory size from device tree. */
mem_size = get_total_mem();
if (!mem_size) {
edac_printk(KERN_ERR, EDAC_MC, "Unable to calculate memory size\n");
return -ENODEV;
}
/* Ensure the SDRAM Interrupt is disabled */
if (regmap_update_bits(mc_vbase, priv->ecc_irq_en_offset,
priv->ecc_irq_en_mask, 0)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error disabling SDRAM ECC IRQ\n");
return -ENODEV;
}
/* Toggle to clear the SDRAM Error count */
if (regmap_update_bits(mc_vbase, priv->ecc_cnt_rst_offset,
priv->ecc_cnt_rst_mask,
priv->ecc_cnt_rst_mask)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error clearing SDRAM ECC count\n");
return -ENODEV;
}
if (regmap_update_bits(mc_vbase, priv->ecc_cnt_rst_offset,
priv->ecc_cnt_rst_mask, 0)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error clearing SDRAM ECC count\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq);
return irq;
}
/* Arria10 has a 2nd IRQ */
irq2 = platform_get_irq(pdev, 1);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct altr_sdram_mc_data));
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
drvdata = mci->pvt_info;
drvdata->mc_vbase = mc_vbase;
drvdata->data = priv;
platform_set_drvdata(pdev, mci);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
edac_printk(KERN_ERR, EDAC_MC,
"Unable to get managed device resource\n");
res = -ENOMEM;
goto free;
}
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
mci->dev_name = dev_name(&pdev->dev);
dimm = *mci->dimms;
dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1;
dimm->grain = 8;
dimm->dtype = DEV_X8;
dimm->mtype = MEM_DDR3;
dimm->edac_mode = EDAC_SECDED;
res = edac_mc_add_mc(mci);
if (res < 0)
goto err;
/* Only the Arria10 has separate IRQs */
if (of_machine_is_compatible("altr,socfpga-arria10")) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
goto err2;
res = devm_request_irq(&pdev->dev, irq2,
altr_sdram_mc_err_handler,
IRQF_SHARED, dev_name(&pdev->dev), mci);
if (res < 0) {
edac_mc_printk(mci, KERN_ERR,
"Unable to request irq %d\n", irq2);
res = -ENODEV;
goto err2;
}
res = a10_unmask_irq(pdev, A10_DDR0_IRQ_MASK);
if (res < 0)
goto err2;
irqflags = IRQF_SHARED;
}
res = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler,
irqflags, dev_name(&pdev->dev), mci);
if (res < 0) {
edac_mc_printk(mci, KERN_ERR,
"Unable to request irq %d\n", irq);
res = -ENODEV;
goto err2;
}
/* Infrastructure ready - enable the IRQ */
if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
priv->ecc_irq_en_mask, priv->ecc_irq_en_mask)) {
edac_mc_printk(mci, KERN_ERR,
"Error enabling SDRAM ECC IRQ\n");
res = -ENODEV;
goto err2;
}
altr_sdr_mc_create_debugfs_nodes(mci);
devres_close_group(&pdev->dev, NULL);
return 0;
err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
free:
edac_mc_free(mci);
edac_printk(KERN_ERR, EDAC_MC,
"EDAC Probe Failed; Error %d\n", res);
return res;
}
static int altr_sdram_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
return 0;
}
/*
* If you want to suspend, need to disable EDAC by removing it
* from the device tree or defconfig.
*/
#ifdef CONFIG_PM
static int altr_sdram_prepare(struct device *dev)
{
pr_err("Suspend not allowed when EDAC is enabled.\n");
return -EPERM;
}
static const struct dev_pm_ops altr_sdram_pm_ops = {
.prepare = altr_sdram_prepare,
};
#endif
static struct platform_driver altr_sdram_edac_driver = {
.probe = altr_sdram_probe,
.remove = altr_sdram_remove,
.driver = {
.name = "altr_sdram_edac",
#ifdef CONFIG_PM
.pm = &altr_sdram_pm_ops,
#endif
.of_match_table = altr_sdram_ctrl_of_match,
},
};
module_platform_driver(altr_sdram_edac_driver);
#endif /* CONFIG_EDAC_ALTERA_SDRAM */
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
static const struct of_device_id altr_edac_of_match[] = {
{ .compatible = "altr,socfpga-ecc-manager" },
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_of_match);
static int altr_edac_probe(struct platform_device *pdev)
{
of_platform_populate(pdev->dev.of_node, altr_edac_device_of_match,
NULL, &pdev->dev);
return 0;
}
static struct platform_driver altr_edac_driver = {
.probe = altr_edac_probe,
.driver = {
.name = "socfpga_ecc_manager",
.of_match_table = altr_edac_of_match,
},
};
module_platform_driver(altr_edac_driver);
/************************* EDAC Device Functions *************************/
/*
* EDAC Device Functions (shared between various IPs).
* The discrete memories use the EDAC Device framework. The probe
* and error handling functions are very similar between memories
* so they are shared. The memory allocation and freeing for EDAC
* trigger testing are different for each memory.
*/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
static const struct edac_device_prv_data ocramecc_data;
#endif
#ifdef CONFIG_EDAC_ALTERA_L2C
static const struct edac_device_prv_data l2ecc_data;
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
static const struct edac_device_prv_data a10_ocramecc_data;
#endif
#ifdef CONFIG_EDAC_ALTERA_L2C
static const struct edac_device_prv_data a10_l2ecc_data;
#endif
static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
{
irqreturn_t ret_value = IRQ_NONE;
struct edac_device_ctl_info *dci = dev_id;
struct altr_edac_device_dev *drvdata = dci->pvt_info;
const struct edac_device_prv_data *priv = drvdata->data;
if (irq == drvdata->sb_irq) {
if (priv->ce_clear_mask)
writel(priv->ce_clear_mask, drvdata->base);
edac_device_handle_ce(dci, 0, 0, drvdata->edac_dev_name);
ret_value = IRQ_HANDLED;
} else if (irq == drvdata->db_irq) {
if (priv->ue_clear_mask)
writel(priv->ue_clear_mask, drvdata->base);
edac_device_handle_ue(dci, 0, 0, drvdata->edac_dev_name);
panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
ret_value = IRQ_HANDLED;
} else {
WARN_ON(1);
}
return ret_value;
}
static ssize_t __maybe_unused
altr_edac_device_trig(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
u32 *ptemp, i, error_mask;
int result = 0;
u8 trig_type;
unsigned long flags;
struct edac_device_ctl_info *edac_dci = file->private_data;
struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
const struct edac_device_prv_data *priv = drvdata->data;
void *generic_ptr = edac_dci->dev;
if (!user_buf || get_user(trig_type, user_buf))
return -EFAULT;
if (!priv->alloc_mem)
return -ENOMEM;
/*
* Note that generic_ptr is initialized to the device * but in
* some alloc_functions, this is overridden and returns data.
*/
ptemp = priv->alloc_mem(priv->trig_alloc_sz, &generic_ptr);
if (!ptemp) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
}
if (trig_type == ALTR_UE_TRIGGER_CHAR)
error_mask = priv->ue_set_mask;
else
error_mask = priv->ce_set_mask;
edac_printk(KERN_ALERT, EDAC_DEVICE,
"Trigger Error Mask (0x%X)\n", error_mask);
local_irq_save(flags);
/* write ECC corrupted data out. */
for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
/* Read data so we're in the correct state */
rmb();
if (READ_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
writel(error_mask, (drvdata->base + priv->set_err_ofst));
writel(priv->ecc_enable_mask, (drvdata->base +
priv->set_err_ofst));
ptemp[i] = i;
}
/* Ensure it has been written out */
wmb();
local_irq_restore(flags);
if (result)
edac_printk(KERN_ERR, EDAC_DEVICE, "Mem Not Cleared\n");
/* Read out written data. ECC error caused here */
for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
if (READ_ONCE(ptemp[i]) != i)
edac_printk(KERN_ERR, EDAC_DEVICE,
"Read doesn't match written data\n");
if (priv->free_mem)
priv->free_mem(ptemp, priv->trig_alloc_sz, generic_ptr);
return count;
}
static const struct file_operations altr_edac_device_inject_fops __maybe_unused = {
.open = simple_open,
.write = altr_edac_device_trig,
.llseek = generic_file_llseek,
};
static ssize_t __maybe_unused
altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
static const struct file_operations altr_edac_a10_device_inject_fops __maybe_unused = {
.open = simple_open,
.write = altr_edac_a10_device_trig,
.llseek = generic_file_llseek,
};
static ssize_t __maybe_unused
altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
static const struct file_operations altr_edac_a10_device_inject2_fops __maybe_unused = {
.open = simple_open,
.write = altr_edac_a10_device_trig2,
.llseek = generic_file_llseek,
};
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
return;
drvdata->debugfs_dir = edac_debugfs_create_dir(drvdata->edac_dev_name);
if (!drvdata->debugfs_dir)
return;
if (!edac_debugfs_create_file("altr_trigger", S_IWUSR,
drvdata->debugfs_dir, edac_dci,
priv->inject_fops))
debugfs_remove_recursive(drvdata->debugfs_dir);
}
static const struct of_device_id altr_edac_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
{ .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
{ .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data },
#endif
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_device_of_match);
/*
* altr_edac_device_probe()
* This is a generic EDAC device driver that will support
* various Altera memory devices such as the L2 cache ECC and
* OCRAM ECC as well as the memories for other peripherals.
* Module specific initialization is done by passing the
* function index in the device tree.
*/
static int altr_edac_device_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci;
struct altr_edac_device_dev *drvdata;
struct resource *r;
int res = 0;
struct device_node *np = pdev->dev.of_node;
char *ecc_name = (char *)np->name;
static int dev_instance;
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to open devm\n");
return -ENOMEM;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get mem resource\n");
res = -ENODEV;
goto fail;
}
if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
dev_name(&pdev->dev))) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s:Error requesting mem region\n", ecc_name);
res = -EBUSY;
goto fail;
}
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
1, ecc_name, 1, 0, NULL, 0,
dev_instance++);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: Unable to allocate EDAC device\n", ecc_name);
res = -ENOMEM;
goto fail;
}
drvdata = dci->pvt_info;
dci->dev = &pdev->dev;
platform_set_drvdata(pdev, dci);
drvdata->edac_dev_name = ecc_name;
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base) {
res = -ENOMEM;
goto fail1;
}
/* Get driver specific data for this EDAC device */
drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
/* Check specific dependencies for the module */
if (drvdata->data->setup) {
res = drvdata->data->setup(drvdata);
if (res)
goto fail1;
}
drvdata->sb_irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
altr_edac_device_handler,
0, dev_name(&pdev->dev), dci);
if (res)
goto fail1;
drvdata->db_irq = platform_get_irq(pdev, 1);
res = devm_request_irq(&pdev->dev, drvdata->db_irq,
altr_edac_device_handler,
0, dev_name(&pdev->dev), dci);
if (res)
goto fail1;
dci->mod_name = "Altera ECC Manager";
dci->dev_name = drvdata->edac_dev_name;
res = edac_device_add_device(dci);
if (res)
goto fail1;
altr_create_edacdev_dbgfs(dci, drvdata->data);
devres_close_group(&pdev->dev, NULL);
return 0;
fail1:
edac_device_free_ctl_info(dci);
fail:
devres_release_group(&pdev->dev, NULL);
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s:Error setting up EDAC device: %d\n", ecc_name, res);
return res;
}
static int altr_edac_device_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct altr_edac_device_dev *drvdata = dci->pvt_info;
debugfs_remove_recursive(drvdata->debugfs_dir);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
return 0;
}
static struct platform_driver altr_edac_device_driver = {
.probe = altr_edac_device_probe,
.remove = altr_edac_device_remove,
.driver = {
.name = "altr_edac_device",
.of_match_table = altr_edac_device_of_match,
},
};
module_platform_driver(altr_edac_device_driver);
/******************* Arria10 Device ECC Shared Functions *****************/
/*
* Test for memory's ECC dependencies upon entry because platform specific
* startup should have initialized the memory and enabled the ECC.
* Can't turn on ECC here because accessing un-initialized memory will
* cause CE/UE errors possibly causing an ABORT.
*/
static int __maybe_unused
altr_check_ecc_deps(struct altr_edac_device_dev *device)
{
void __iomem *base = device->base;
const struct edac_device_prv_data *prv = device->data;
if (readl(base + prv->ecc_en_ofst) & prv->ecc_enable_mask)
return 0;
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: No ECC present or ECC disabled.\n",
device->edac_dev_name);
return -ENODEV;
}
static irqreturn_t __maybe_unused altr_edac_a10_ecc_irq(int irq, void *dev_id)
{
struct altr_edac_device_dev *dci = dev_id;
void __iomem *base = dci->base;
if (irq == dci->sb_irq) {
writel(ALTR_A10_ECC_SERRPENA,
base + ALTR_A10_ECC_INTSTAT_OFST);
edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
return IRQ_HANDLED;
} else if (irq == dci->db_irq) {
writel(ALTR_A10_ECC_DERRPENA,
base + ALTR_A10_ECC_INTSTAT_OFST);
edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
if (dci->data->panic)
panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
return IRQ_HANDLED;
}
WARN_ON(1);
return IRQ_NONE;
}
/******************* Arria10 Memory Buffer Functions *********************/
static inline int a10_get_irq_mask(struct device_node *np)
{
int irq;
const u32 *handle = of_get_property(np, "interrupts", NULL);
if (!handle)
return -ENODEV;
irq = be32_to_cpup(handle);
return irq;
}
static inline void ecc_set_bits(u32 bit_mask, void __iomem *ioaddr)
{
u32 value = readl(ioaddr);
value |= bit_mask;
writel(value, ioaddr);
}
static inline void ecc_clear_bits(u32 bit_mask, void __iomem *ioaddr)
{
u32 value = readl(ioaddr);
value &= ~bit_mask;
writel(value, ioaddr);
}
static inline int ecc_test_bits(u32 bit_mask, void __iomem *ioaddr)
{
u32 value = readl(ioaddr);
return (value & bit_mask) ? 1 : 0;
}
/*
* This function uses the memory initialization block in the Arria10 ECC
* controller to initialize/clear the entire memory data and ECC data.
*/
static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
{
int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US;
u32 init_mask, stat_mask, clear_mask;
int ret = 0;
if (port) {
init_mask = ALTR_A10_ECC_INITB;
stat_mask = ALTR_A10_ECC_INITCOMPLETEB;
clear_mask = ALTR_A10_ECC_ERRPENB_MASK;
} else {
init_mask = ALTR_A10_ECC_INITA;
stat_mask = ALTR_A10_ECC_INITCOMPLETEA;
clear_mask = ALTR_A10_ECC_ERRPENA_MASK;
}
ecc_set_bits(init_mask, (ioaddr + ALTR_A10_ECC_CTRL_OFST));
while (limit--) {
if (ecc_test_bits(stat_mask,
(ioaddr + ALTR_A10_ECC_INITSTAT_OFST)))
break;
udelay(1);
}
if (limit < 0)
ret = -EBUSY;
/* Clear any pending ECC interrupts */
writel(clear_mask, (ioaddr + ALTR_A10_ECC_INTSTAT_OFST));
return ret;
}
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
{
int ret = 0;
void __iomem *ecc_block_base;
struct regmap *ecc_mgr_map;
char *ecc_name;
struct device_node *np_eccmgr;
ecc_name = (char *)np->name;
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
ecc_mgr_map =
altr_sysmgr_regmap_lookup_by_phandle(np_eccmgr,
"altr,sysmgr-syscon");
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get syscon altr,sysmgr-syscon\n");
return -ENODEV;
}
/* Map the ECC Block */
ecc_block_base = of_iomap(np, 0);
if (!ecc_block_base) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to map %s ECC block\n", ecc_name);
return -ENODEV;
}
/* Disable ECC */
regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, irq_mask);
writel(ALTR_A10_ECC_SERRINTEN,
(ecc_block_base + ALTR_A10_ECC_ERRINTENR_OFST));
ecc_clear_bits(ecc_ctrl_en_mask,
(ecc_block_base + ALTR_A10_ECC_CTRL_OFST));
/* Ensure all writes complete */
wmb();
/* Use HW initialization block to initialize memory for ECC */
ret = altr_init_memory_port(ecc_block_base, 0);
if (ret) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"ECC: cannot init %s PORTA memory\n", ecc_name);
goto out;
}
if (dual_port) {
ret = altr_init_memory_port(ecc_block_base, 1);
if (ret) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"ECC: cannot init %s PORTB memory\n",
ecc_name);
goto out;
}
}
/* Interrupt mode set to every SBERR */
regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
writel(ALTR_A10_ECC_SERRINTEN,
(ecc_block_base + ALTR_A10_ECC_ERRINTENS_OFST));
regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST, irq_mask);
/* Ensure all writes complete */
wmb();
out:
iounmap(ecc_block_base);
return ret;
}
static int validate_parent_available(struct device_node *np);
static const struct of_device_id altr_edac_a10_device_of_match[];
static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
{
int irq;
struct device_node *child, *np;
np = of_find_compatible_node(NULL, NULL,
"altr,socfpga-a10-ecc-manager");
if (!np) {
edac_printk(KERN_ERR, EDAC_DEVICE, "ECC Manager not found\n");
return -ENODEV;
}
for_each_child_of_node(np, child) {
const struct of_device_id *pdev_id;
const struct edac_device_prv_data *prv;
if (!of_device_is_available(child))
continue;
if (!of_device_is_compatible(child, compat))
continue;
if (validate_parent_available(child))
continue;
irq = a10_get_irq_mask(child);
if (irq < 0)
continue;
/* Get matching node and check for valid result */
pdev_id = of_match_node(altr_edac_a10_device_of_match, child);
if (IS_ERR_OR_NULL(pdev_id))
continue;
/* Validate private data pointer before dereferencing */
prv = pdev_id->data;
if (!prv)
continue;
altr_init_a10_ecc_block(child, BIT(irq),
prv->ecc_enable_mask, 0);
}
of_node_put(np);
return 0;
}
/*********************** SDRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_SDRAM
/*
* A legacy U-Boot bug only enabled memory mapped access to the ECC Enable
* register if ECC is enabled. Linux checks the ECC Enable register to
* determine ECC status.
* Use an SMC call (which always works) to determine ECC enablement.
*/
static int altr_s10_sdram_check_ecc_deps(struct altr_edac_device_dev *device)
{
const struct edac_device_prv_data *prv = device->data;
unsigned long sdram_ecc_addr;
struct arm_smccc_res result;
struct device_node *np;
phys_addr_t sdram_addr;
u32 read_reg;
int ret;
np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl");
if (!np)
goto sdram_err;
sdram_addr = of_translate_address(np, of_get_address(np, 0,
NULL, NULL));
of_node_put(np);
sdram_ecc_addr = (unsigned long)sdram_addr + prv->ecc_en_ofst;
arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sdram_ecc_addr,
0, 0, 0, 0, 0, 0, &result);
read_reg = (unsigned int)result.a1;
ret = (int)result.a0;
if (!ret && (read_reg & prv->ecc_enable_mask))
return 0;
sdram_err:
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: No ECC present or ECC disabled.\n",
device->edac_dev_name);
return -ENODEV;
}
static const struct edac_device_prv_data s10_sdramecc_data = {
.setup = altr_s10_sdram_check_ecc_deps,
.ce_clear_mask = ALTR_S10_ECC_SERRPENA,
.ue_clear_mask = ALTR_S10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_S10_ECC_EN,
.ecc_en_ofst = ALTR_S10_ECC_CTRL_SDRAM_OFST,
.ce_set_mask = ALTR_S10_ECC_TSERRA,
.ue_set_mask = ALTR_S10_ECC_TDERRA,
.set_err_ofst = ALTR_S10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_SDRAM */
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
static void *ocram_alloc_mem(size_t size, void **other)
{
struct device_node *np;
struct gen_pool *gp;
void *sram_addr;
np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc");
if (!np)
return NULL;
gp = of_gen_pool_get(np, "iram", 0);
of_node_put(np);
if (!gp)
return NULL;
sram_addr = (void *)gen_pool_alloc(gp, size);
if (!sram_addr)
return NULL;
memset(sram_addr, 0, size);
/* Ensure data is written out */
wmb();
/* Remember this handle for freeing later */
*other = gp;
return sram_addr;
}
static void ocram_free_mem(void *p, size_t size, void *other)
{
gen_pool_free((struct gen_pool *)other, (unsigned long)p, size);
}
static const struct edac_device_prv_data ocramecc_data = {
.setup = altr_check_ecc_deps,
.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
.alloc_mem = ocram_alloc_mem,
.free_mem = ocram_free_mem,
.ecc_enable_mask = ALTR_OCR_ECC_EN,
.ecc_en_ofst = ALTR_OCR_ECC_REG_OFFSET,
.ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
.ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
.set_err_ofst = ALTR_OCR_ECC_REG_OFFSET,
.trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
.inject_fops = &altr_edac_device_inject_fops,
};
static int __maybe_unused
altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
{
void __iomem *base = device->base;
int ret;
ret = altr_check_ecc_deps(device);
if (ret)
return ret;
/* Verify OCRAM has been initialized */
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
(base + ALTR_A10_ECC_INITSTAT_OFST)))
return -ENODEV;
/* Enable IRQ on Single Bit Error */
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
/* Ensure all writes complete */
wmb();
return 0;
}
static const struct edac_device_prv_data a10_ocramecc_data = {
.setup = altr_check_ocram_deps_init,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
.ecc_enable_mask = ALTR_A10_OCRAM_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject2_fops,
/*
* OCRAM panic on uncorrectable error because sleep/resume
* functions and FPGA contents are stored in OCRAM. Prefer
* a kernel panic over executing/loading corrupted data.
*/
.panic = true,
};
#endif /* CONFIG_EDAC_ALTERA_OCRAM */
/********************* L2 Cache EDAC Device Functions ********************/
#ifdef CONFIG_EDAC_ALTERA_L2C
static void *l2_alloc_mem(size_t size, void **other)
{
struct device *dev = *other;
void *ptemp = devm_kzalloc(dev, size, GFP_KERNEL);
if (!ptemp)
return NULL;
/* Make sure everything is written out */
wmb();
/*
* Clean all cache levels up to LoC (includes L2)
* This ensures the corrupted data is written into
* L2 cache for readback test (which causes ECC error).
*/
flush_cache_all();
return ptemp;
}
static void l2_free_mem(void *p, size_t size, void *other)
{
struct device *dev = other;
if (dev && p)
devm_kfree(dev, p);
}
/*
* altr_l2_check_deps()
* Test for L2 cache ECC dependencies upon entry because
* platform specific startup should have initialized the L2
* memory and enabled the ECC.
* Bail if ECC is not enabled.
* Note that L2 Cache Enable is forced at build time.
*/
static int altr_l2_check_deps(struct altr_edac_device_dev *device)
{
void __iomem *base = device->base;
const struct edac_device_prv_data *prv = device->data;
if ((readl(base) & prv->ecc_enable_mask) ==
prv->ecc_enable_mask)
return 0;
edac_printk(KERN_ERR, EDAC_DEVICE,
"L2: No ECC present, or ECC disabled\n");
return -ENODEV;
}
static irqreturn_t altr_edac_a10_l2_irq(int irq, void *dev_id)
{
struct altr_edac_device_dev *dci = dev_id;
if (irq == dci->sb_irq) {
regmap_write(dci->edac->ecc_mgr_map,
A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
A10_SYSGMR_MPU_CLEAR_L2_ECC_SB);
edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
return IRQ_HANDLED;
} else if (irq == dci->db_irq) {
regmap_write(dci->edac->ecc_mgr_map,
A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
A10_SYSGMR_MPU_CLEAR_L2_ECC_MB);
edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
return IRQ_HANDLED;
}
WARN_ON(1);
return IRQ_NONE;
}
static const struct edac_device_prv_data l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = 0,
.ue_clear_mask = 0,
.alloc_mem = l2_alloc_mem,
.free_mem = l2_free_mem,
.ecc_enable_mask = ALTR_L2_ECC_EN,
.ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
.ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
.set_err_ofst = ALTR_L2_ECC_REG_OFFSET,
.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
.inject_fops = &altr_edac_device_inject_fops,
};
static const struct edac_device_prv_data a10_l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
.ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_L2,
.alloc_mem = l2_alloc_mem,
.free_mem = l2_free_mem,
.ecc_enable_mask = ALTR_A10_L2_ECC_EN_CTL,
.ce_set_mask = ALTR_A10_L2_ECC_CE_INJ_MASK,
.ue_set_mask = ALTR_A10_L2_ECC_UE_INJ_MASK,
.set_err_ofst = ALTR_A10_L2_ECC_INJ_OFST,
.ecc_irq_handler = altr_edac_a10_l2_irq,
.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
.inject_fops = &altr_edac_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_L2C */
/********************* Ethernet Device Functions ********************/
#ifdef CONFIG_EDAC_ALTERA_ETHERNET
static int __init socfpga_init_ethernet_ecc(struct altr_edac_device_dev *dev)
{
int ret;
ret = altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
if (ret)
return ret;
return altr_check_ecc_deps(dev);
}
static const struct edac_device_prv_data a10_enetecc_data = {
.setup = socfpga_init_ethernet_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
/********************** NAND Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_NAND
static int __init socfpga_init_nand_ecc(struct altr_edac_device_dev *device)
{
int ret;
ret = altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
if (ret)
return ret;
return altr_check_ecc_deps(device);
}
static const struct edac_device_prv_data a10_nandecc_data = {
.setup = socfpga_init_nand_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_NAND */
/********************** DMA Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_DMA
static int __init socfpga_init_dma_ecc(struct altr_edac_device_dev *device)
{
int ret;
ret = altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
if (ret)
return ret;
return altr_check_ecc_deps(device);
}
static const struct edac_device_prv_data a10_dmaecc_data = {
.setup = socfpga_init_dma_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_DMA */
/********************** USB Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_USB
static int __init socfpga_init_usb_ecc(struct altr_edac_device_dev *device)
{
int ret;
ret = altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
if (ret)
return ret;
return altr_check_ecc_deps(device);
}
static const struct edac_device_prv_data a10_usbecc_data = {
.setup = socfpga_init_usb_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
#endif /* CONFIG_EDAC_ALTERA_USB */
/********************** QSPI Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_QSPI
static int __init socfpga_init_qspi_ecc(struct altr_edac_device_dev *device)
{
int ret;
ret = altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
if (ret)
return ret;
return altr_check_ecc_deps(device);
}
static const struct edac_device_prv_data a10_qspiecc_data = {
.setup = socfpga_init_qspi_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_QSPI */
/********************* SDMMC Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_SDMMC
static const struct edac_device_prv_data a10_sdmmceccb_data;
static int altr_portb_setup(struct altr_edac_device_dev *device)
{
struct edac_device_ctl_info *dci;
struct altr_edac_device_dev *altdev;
char *ecc_name = "sdmmcb-ecc";
int edac_idx, rc;
struct device_node *np;
const struct edac_device_prv_data *prv = &a10_sdmmceccb_data;
rc = altr_check_ecc_deps(device);
if (rc)
return rc;
np = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
if (!np) {
edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
return -ENODEV;
}
/* Create the PortB EDAC device */
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, 1,
ecc_name, 1, 0, NULL, 0, edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: Unable to allocate PortB EDAC device\n",
ecc_name);
return -ENOMEM;
}
/* Initialize the PortB EDAC device structure from PortA structure */
altdev = dci->pvt_info;
*altdev = *device;
if (!devres_open_group(&altdev->ddev, altr_portb_setup, GFP_KERNEL))
return -ENOMEM;
/* Update PortB specific values */
altdev->edac_dev_name = ecc_name;
altdev->edac_idx = edac_idx;
altdev->edac_dev = dci;
altdev->data = prv;
dci->dev = &altdev->ddev;
dci->ctl_name = "Altera ECC Manager";
dci->mod_name = ecc_name;
dci->dev_name = ecc_name;
/*
* Update the PortB IRQs - A10 has 4, S10 has 2, Index accordingly
*
* FIXME: Instead of ifdefs with different architectures the driver
* should properly use compatibles.
*/
#ifdef CONFIG_64BIT
altdev->sb_irq = irq_of_parse_and_map(np, 1);
#else
altdev->sb_irq = irq_of_parse_and_map(np, 2);
#endif
if (!altdev->sb_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB SBIRQ alloc\n");
rc = -ENODEV;
goto err_release_group_1;
}
rc = devm_request_irq(&altdev->ddev, altdev->sb_irq,
prv->ecc_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
ecc_name, altdev);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE, "PortB SBERR IRQ error\n");
goto err_release_group_1;
}
#ifdef CONFIG_64BIT
/* Use IRQ to determine SError origin instead of assigning IRQ */
rc = of_property_read_u32_index(np, "interrupts", 1, &altdev->db_irq);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Error PortB DBIRQ alloc\n");
goto err_release_group_1;
}
#else
altdev->db_irq = irq_of_parse_and_map(np, 3);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB DBIRQ alloc\n");
rc = -ENODEV;
goto err_release_group_1;
}
rc = devm_request_irq(&altdev->ddev, altdev->db_irq,
prv->ecc_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
ecc_name, altdev);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE, "PortB DBERR IRQ error\n");
goto err_release_group_1;
}
#endif
rc = edac_device_add_device(dci);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"edac_device_add_device portB failed\n");
rc = -ENOMEM;
goto err_release_group_1;
}
altr_create_edacdev_dbgfs(dci, prv);
list_add(&altdev->next, &altdev->edac->a10_ecc_devices);
devres_remove_group(&altdev->ddev, altr_portb_setup);
return 0;
err_release_group_1:
edac_device_free_ctl_info(dci);
devres_release_group(&altdev->ddev, altr_portb_setup);
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s:Error setting up EDAC device: %d\n", ecc_name, rc);
return rc;
}
static int __init socfpga_init_sdmmc_ecc(struct altr_edac_device_dev *device)
{
int rc = -ENODEV;
struct device_node *child;
child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
if (!child)
return -ENODEV;
if (!of_device_is_available(child))
goto exit;
if (validate_parent_available(child))
goto exit;
/* Init portB */
rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
a10_sdmmceccb_data.ecc_enable_mask, 1);
if (rc)
goto exit;
/* Setup portB */
return altr_portb_setup(device);
exit:
of_node_put(child);
return rc;
}
static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
{
struct altr_edac_device_dev *ad = dev_id;
void __iomem *base = ad->base;
const struct edac_device_prv_data *priv = ad->data;
if (irq == ad->sb_irq) {
writel(priv->ce_clear_mask,
base + ALTR_A10_ECC_INTSTAT_OFST);
edac_device_handle_ce(ad->edac_dev, 0, 0, ad->edac_dev_name);
return IRQ_HANDLED;
} else if (irq == ad->db_irq) {
writel(priv->ue_clear_mask,
base + ALTR_A10_ECC_INTSTAT_OFST);
edac_device_handle_ue(ad->edac_dev, 0, 0, ad->edac_dev_name);
return IRQ_HANDLED;
}
WARN_ONCE(1, "Unhandled IRQ%d on Port B.", irq);
return IRQ_NONE;
}
static const struct edac_device_prv_data a10_sdmmcecca_data = {
.setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_SERRPENA,
.ue_set_mask = ALTR_A10_ECC_DERRPENA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
static const struct edac_device_prv_data a10_sdmmceccb_data = {
.setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENB,
.ue_clear_mask = ALTR_A10_ECC_DERRPENB,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRB,
.ue_set_mask = ALTR_A10_ECC_TDERRB,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq_portb,
.inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_SDMMC */
/********************* Arria10 EDAC Device Functions *************************/
static const struct of_device_id altr_edac_a10_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
{ .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
{ .compatible = "altr,socfpga-a10-ocram-ecc",
.data = &a10_ocramecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_ETHERNET
{ .compatible = "altr,socfpga-eth-mac-ecc",
.data = &a10_enetecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_NAND
{ .compatible = "altr,socfpga-nand-ecc", .data = &a10_nandecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_DMA
{ .compatible = "altr,socfpga-dma-ecc", .data = &a10_dmaecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_USB
{ .compatible = "altr,socfpga-usb-ecc", .data = &a10_usbecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_QSPI
{ .compatible = "altr,socfpga-qspi-ecc", .data = &a10_qspiecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_SDMMC
{ .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_SDRAM
{ .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data },
#endif
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
/*
* The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5
* because 2 IRQs are shared among the all ECC peripherals. The ECC
* manager manages the IRQs and the children.
* Based on xgene_edac.c peripheral code.
*/
static ssize_t __maybe_unused
altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dci = file->private_data;
struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
const struct edac_device_prv_data *priv = drvdata->data;
void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
unsigned long flags;
u8 trig_type;
if (!user_buf || get_user(trig_type, user_buf))
return -EFAULT;
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
writel(priv->ue_set_mask, set_addr);
else
writel(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
local_irq_restore(flags);
return count;
}
/*
* The Stratix10 EDAC Error Injection Functions differ from Arria10
* slightly. A few Arria10 peripherals can use this injection function.
* Inject the error into the memory and then readback to trigger the IRQ.
*/
static ssize_t __maybe_unused
altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dci = file->private_data;
struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
const struct edac_device_prv_data *priv = drvdata->data;
void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
unsigned long flags;
u8 trig_type;
if (!user_buf || get_user(trig_type, user_buf))
return -EFAULT;
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
writel(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
/* Setup Address to 0 */
writel(0, drvdata->base + ECC_BLK_ADDRESS_OFST);
/* Setup accctrl to read & ecc & data override */
writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
/* Setup write for single bit change */
writel(readl(drvdata->base + ECC_BLK_RDATA0_OFST) ^ 0x1,
drvdata->base + ECC_BLK_WDATA0_OFST);
writel(readl(drvdata->base + ECC_BLK_RDATA1_OFST),
drvdata->base + ECC_BLK_WDATA1_OFST);
writel(readl(drvdata->base + ECC_BLK_RDATA2_OFST),
drvdata->base + ECC_BLK_WDATA2_OFST);
writel(readl(drvdata->base + ECC_BLK_RDATA3_OFST),
drvdata->base + ECC_BLK_WDATA3_OFST);
/* Copy Read ECC to Write ECC */
writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
drvdata->base + ECC_BLK_WECC0_OFST);
writel(readl(drvdata->base + ECC_BLK_RECC1_OFST),
drvdata->base + ECC_BLK_WECC1_OFST);
/* Setup accctrl to write & ecc override & data override */
writel(ECC_WRITE_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
/* Setup accctrl to read & ecc overwrite & data overwrite */
writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
}
/* Ensure the interrupt test bits are set */
wmb();
local_irq_restore(flags);
return count;
}
static void altr_edac_a10_irq_handler(struct irq_desc *desc)
{
int dberr, bit, sm_offset, irq_status;
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
A10_SYSMGR_ECC_INTSTAT_SERR_OFST;
chained_irq_enter(chip, desc);
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
bits = irq_status;
for_each_set_bit(bit, &bits, 32)
generic_handle_domain_irq(edac->domain, dberr * 32 + bit);
chained_irq_exit(chip, desc);
}
static int validate_parent_available(struct device_node *np)
{
struct device_node *parent;
int ret = 0;
/* SDRAM must be present for Linux (implied parent) */
if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
return 0;
/* Ensure parent device is enabled if parent node exists */
parent = of_parse_phandle(np, "altr,ecc-parent", 0);
if (parent && !of_device_is_available(parent))
ret = -ENODEV;
of_node_put(parent);
return ret;
}
static int get_s10_sdram_edac_resource(struct device_node *np,
struct resource *res)
{
struct device_node *parent;
int ret;
parent = of_parse_phandle(np, "altr,sdr-syscon", 0);
if (!parent)
return -ENODEV;
ret = of_address_to_resource(parent, 0, res);
of_node_put(parent);
return ret;
}
static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
struct device_node *np)
{
struct edac_device_ctl_info *dci;
struct altr_edac_device_dev *altdev;
char *ecc_name = (char *)np->name;
struct resource res;
int edac_idx;
int rc = 0;
const struct edac_device_prv_data *prv;
/* Get matching node and check for valid result */
const struct of_device_id *pdev_id =
of_match_node(altr_edac_a10_device_of_match, np);
if (IS_ERR_OR_NULL(pdev_id))
return -ENODEV;
/* Get driver specific data for this EDAC device */
prv = pdev_id->data;
if (IS_ERR_OR_NULL(prv))
return -ENODEV;
if (validate_parent_available(np))
return -ENODEV;
if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
return -ENOMEM;
if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
rc = get_s10_sdram_edac_resource(np, &res);
else
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: no resource address\n", ecc_name);
goto err_release_group;
}
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
1, ecc_name, 1, 0, NULL, 0,
edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: Unable to allocate EDAC device\n", ecc_name);
rc = -ENOMEM;
goto err_release_group;
}
altdev = dci->pvt_info;
dci->dev = edac->dev;
altdev->edac_dev_name = ecc_name;
altdev->edac_idx = edac_idx;
altdev->edac = edac;
altdev->edac_dev = dci;
altdev->data = prv;
altdev->ddev = *edac->dev;
dci->dev = &altdev->ddev;
dci->ctl_name = "Altera ECC Manager";
dci->mod_name = ecc_name;
dci->dev_name = ecc_name;
altdev->base = devm_ioremap_resource(edac->dev, &res);
if (IS_ERR(altdev->base)) {
rc = PTR_ERR(altdev->base);
goto err_release_group1;
}
/* Check specific dependencies for the module */
if (altdev->data->setup) {
rc = altdev->data->setup(altdev);
if (rc)
goto err_release_group1;
}
altdev->sb_irq = irq_of_parse_and_map(np, 0);
if (!altdev->sb_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating SBIRQ\n");
rc = -ENODEV;
goto err_release_group1;
}
rc = devm_request_irq(edac->dev, altdev->sb_irq, prv->ecc_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
ecc_name, altdev);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
goto err_release_group1;
}
#ifdef CONFIG_64BIT
/* Use IRQ to determine SError origin instead of assigning IRQ */
rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to parse DB IRQ index\n");
goto err_release_group1;
}
#else
altdev->db_irq = irq_of_parse_and_map(np, 1);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n");
rc = -ENODEV;
goto err_release_group1;
}
rc = devm_request_irq(edac->dev, altdev->db_irq, prv->ecc_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
ecc_name, altdev);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
goto err_release_group1;
}
#endif
rc = edac_device_add_device(dci);
if (rc) {
dev_err(edac->dev, "edac_device_add_device failed\n");
rc = -ENOMEM;
goto err_release_group1;
}
altr_create_edacdev_dbgfs(dci, prv);
list_add(&altdev->next, &edac->a10_ecc_devices);
devres_remove_group(edac->dev, altr_edac_a10_device_add);
return 0;
err_release_group1:
edac_device_free_ctl_info(dci);
err_release_group:
devres_release_group(edac->dev, NULL);
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s:Error setting up EDAC device: %d\n", ecc_name, rc);
return rc;
}
static void a10_eccmgr_irq_mask(struct irq_data *d)
{
struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d);
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
BIT(d->hwirq));
}
static void a10_eccmgr_irq_unmask(struct irq_data *d)
{
struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d);
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST,
BIT(d->hwirq));
}
static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct altr_arria10_edac *edac = d->host_data;
irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq);
irq_set_chip_data(irq, edac);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops a10_eccmgr_ic_ops = {
.map = a10_eccmgr_irqdomain_map,
.xlate = irq_domain_xlate_twocell,
};
/************** Stratix 10 EDAC Double Bit Error Handler ************/
#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
#ifdef CONFIG_64BIT
/* panic routine issues reboot on non-zero panic_timeout */
extern int panic_timeout;
/*
* The double bit error is handled through SError which is fatal. This is
* called as a panic notifier to printout ECC error info as part of the panic.
*/
static int s10_edac_dberr_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct altr_arria10_edac *edac = to_a10edac(this, panic_notifier);
int err_addr, dberror;
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
&dberror);
regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
if (dberror & S10_DBE_IRQ_MASK) {
struct list_head *position;
struct altr_edac_device_dev *ed;
struct arm_smccc_res result;
/* Find the matching DBE in the list of devices */
list_for_each(position, &edac->a10_ecc_devices) {
ed = list_entry(position, struct altr_edac_device_dev,
next);
if (!(BIT(ed->db_irq) & dberror))
continue;
writel(ALTR_A10_ECC_DERRPENA,
ed->base + ALTR_A10_ECC_INTSTAT_OFST);
err_addr = readl(ed->base + ALTR_S10_DERR_ADDRA_OFST);
regmap_write(edac->ecc_mgr_map,
S10_SYSMGR_UE_ADDR_OFST, err_addr);
edac_printk(KERN_ERR, EDAC_DEVICE,
"EDAC: [Fatal DBE on %s @ 0x%08X]\n",
ed->edac_dev_name, err_addr);
break;
}
/* Notify the System through SMC. Reboot delay = 1 second */
panic_timeout = 1;
arm_smccc_smc(INTEL_SIP_SMC_ECC_DBE, dberror, 0, 0, 0, 0,
0, 0, &result);
}
return NOTIFY_DONE;
}
#endif
/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
{
struct altr_arria10_edac *edac;
struct device_node *child;
edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
if (!edac)
return -ENOMEM;
edac->dev = &pdev->dev;
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
edac->ecc_mgr_map =
altr_sysmgr_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sysmgr-syscon");
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get syscon altr,sysmgr-syscon\n");
return PTR_ERR(edac->ecc_mgr_map);
}
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
&a10_eccmgr_ic_ops, edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
}
edac->sb_irq = platform_get_irq(pdev, 0);
if (edac->sb_irq < 0)
return edac->sb_irq;
irq_set_chained_handler_and_data(edac->sb_irq,
altr_edac_a10_irq_handler,
edac);
#ifdef CONFIG_64BIT
{
int dberror, err_addr;
edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
atomic_notifier_chain_register(&panic_notifier_list,
&edac->panic_notifier);
/* Printout a message if uncorrectable error previously. */
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST,
&dberror);
if (dberror) {
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
&err_addr);
edac_printk(KERN_ERR, EDAC_DEVICE,
"Previous Boot UE detected[0x%X] @ 0x%X\n",
dberror, err_addr);
/* Reset the sticky registers */
regmap_write(edac->ecc_mgr_map,
S10_SYSMGR_UE_VAL_OFST, 0);
regmap_write(edac->ecc_mgr_map,
S10_SYSMGR_UE_ADDR_OFST, 0);
}
}
#else
edac->db_irq = platform_get_irq(pdev, 1);
if (edac->db_irq < 0)
return edac->db_irq;
irq_set_chained_handler_and_data(edac->db_irq,
altr_edac_a10_irq_handler, edac);
#endif
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
if (of_match_node(altr_edac_a10_device_of_match, child))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
#endif
}
return 0;
}
static const struct of_device_id altr_edac_a10_of_match[] = {
{ .compatible = "altr,socfpga-a10-ecc-manager" },
{ .compatible = "altr,socfpga-s10-ecc-manager" },
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
static struct platform_driver altr_edac_a10_driver = {
.probe = altr_edac_a10_probe,
.driver = {
.name = "socfpga_a10_ecc_manager",
.of_match_table = altr_edac_a10_of_match,
},
};
module_platform_driver(altr_edac_a10_driver);
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
| linux-master | drivers/edac/altera_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "edac_module.h"
static struct dentry *edac_debugfs;
static ssize_t edac_fake_inject_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
static enum hw_event_mc_err_type type;
u16 errcount = mci->fake_inject_count;
if (!errcount)
errcount = 1;
type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
: HW_EVENT_ERR_CORRECTED;
printk(KERN_DEBUG
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
errcount > 1 ? "s" : "",
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
);
edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2],
"FAKE ERROR", "for EDAC testing only");
return count;
}
static const struct file_operations debug_fake_inject_fops = {
.open = simple_open,
.write = edac_fake_inject_write,
.llseek = generic_file_llseek,
};
void __init edac_debugfs_init(void)
{
edac_debugfs = debugfs_create_dir("edac", NULL);
}
void edac_debugfs_exit(void)
{
debugfs_remove_recursive(edac_debugfs);
}
void edac_create_debugfs_nodes(struct mem_ctl_info *mci)
{
struct dentry *parent;
char name[80];
int i;
parent = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
for (i = 0; i < mci->n_layers; i++) {
sprintf(name, "fake_inject_%s",
edac_layer_name[mci->layers[i].type]);
debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
&mci->fake_inject_layer[i]);
}
debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
&mci->fake_inject_ue);
debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
&mci->fake_inject_count);
debugfs_create_file("fake_inject", S_IWUSR, parent, &mci->dev,
&debug_fake_inject_fops);
mci->debugfs = parent;
}
/* Create a toplevel dir under EDAC's debugfs hierarchy */
struct dentry *edac_debugfs_create_dir(const char *dirname)
{
if (!edac_debugfs)
return NULL;
return debugfs_create_dir(dirname, edac_debugfs);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_dir);
/* Create a toplevel dir under EDAC's debugfs hierarchy with parent @parent */
struct dentry *
edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent)
{
return debugfs_create_dir(dirname, parent);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_dir_at);
/*
* Create a file under EDAC's hierarchy or a sub-hierarchy:
*
* @name: file name
* @mode: file permissions
* @parent: parent dentry. If NULL, it becomes the toplevel EDAC dir
* @data: private data of caller
* @fops: file operations of this file
*/
struct dentry *
edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
void *data, const struct file_operations *fops)
{
if (!parent)
parent = edac_debugfs;
return debugfs_create_file(name, mode, parent, data, fops);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_file);
/* Wrapper for debugfs_create_x8() */
void edac_debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value)
{
if (!parent)
parent = edac_debugfs;
debugfs_create_x8(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x8);
/* Wrapper for debugfs_create_x16() */
void edac_debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value)
{
if (!parent)
parent = edac_debugfs;
debugfs_create_x16(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x16);
/* Wrapper for debugfs_create_x32() */
void edac_debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value)
{
if (!parent)
parent = edac_debugfs;
debugfs_create_x32(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x32);
| linux-master | drivers/edac/debugfs.c |
/*
* Intel 5400 class Memory Controllers kernel module (Seaburg)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Copyright (c) 2008 by:
* Ben Woodard <[email protected]>
* Mauro Carvalho Chehab
*
* Red Hat Inc. https://www.redhat.com
*
* Forked and adapted from the i5000_edac driver which was
* written by Douglas Thompson Linux Networx <[email protected]>
*
* This module is based on the following document:
*
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
* This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
* 2 channels operating in lockstep no-mirror mode. Each channel can have up to
* 4 dimm's, each with up to 8GB.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include "edac_module.h"
/*
* Alter this version for the I5400 module when modifications are made
*/
#define I5400_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i5400_edac"
#define i5400_printk(level, fmt, arg...) \
edac_printk(level, "i5400", fmt, ##arg)
#define i5400_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
#define MAX_BRANCHES 2
#define CHANNELS_PER_BRANCH 2
#define DIMMS_PER_CHANNEL 4
#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
/* Device 16,
* Function 0: System Address
* Function 1: Memory Branch Map, Control, Errors Register
* Function 2: FSB Error Registers
*
* All 3 functions of Device 16 (0,1,2) share the SAME DID and
* uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
* PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
* for device 21 (0,1).
*/
/* OFFSETS for Function 0 */
#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
#define MAXCH 0x56 /* Max Channel Number */
#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
/* OFFSETS for Function 1 */
#define TOLM 0x6C
#define REDMEMB 0x7C
#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */
#define MIR0 0x80
#define MIR1 0x84
#define AMIR0 0x8c
#define AMIR1 0x90
/* Fatal error registers */
#define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */
#define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */
#define NERR_FAT_FBD 0x9c
#define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */
/* Non-fatal error register */
#define NERR_NF_FBD 0xa4
/* Enable error mask */
#define EMASK_FBD 0xa8
#define ERR0_FBD 0xac
#define ERR1_FBD 0xb0
#define ERR2_FBD 0xb4
#define MCERR_FBD 0xb8
/* No OFFSETS for Device 16 Function 2 */
/*
* Device 21,
* Function 0: Memory Map Branch 0
*
* Device 22,
* Function 0: Memory Map Branch 1
*/
/* OFFSETS for Function 0 */
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
#define MTR0 0x80
#define MTR1 0x82
#define MTR2 0x84
#define MTR3 0x86
/* OFFSETS for Function 1 */
#define NRECFGLOG 0x74
#define RECFGLOG 0x78
#define NRECMEMA 0xbe
#define NRECMEMB 0xc0
#define NRECFB_DIMMA 0xc4
#define NRECFB_DIMMB 0xc8
#define NRECFB_DIMMC 0xcc
#define NRECFB_DIMMD 0xd0
#define NRECFB_DIMME 0xd4
#define NRECFB_DIMMF 0xd8
#define REDMEMA 0xdC
#define RECMEMA 0xf0
#define RECMEMB 0xf4
#define RECFB_DIMMA 0xf8
#define RECFB_DIMMB 0xec
#define RECFB_DIMMC 0xf0
#define RECFB_DIMMD 0xf4
#define RECFB_DIMME 0xf8
#define RECFB_DIMMF 0xfC
/*
* Error indicator bits and masks
* Error masks are according with Table 5-17 of i5400 datasheet
*/
enum error_mask {
EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */
EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */
EMASK_M3 = 1<<2, /* Reserved */
EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */
EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
EMASK_M6 = 1<<5, /* Unsupported on i5400 */
EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */
EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
EMASK_M10 = 1<<9, /* Unsupported on i5400 */
EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
EMASK_M18 = 1<<17, /* Unsupported on i5400 */
EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
EMASK_M22 = 1<<21, /* SPD protocol Error */
EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
EMASK_M24 = 1<<23, /* Refresh error */
EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
};
/*
* Names to translate bit error into something useful
*/
static const char *error_name[] = {
[0] = "Memory Write error on non-redundant retry",
[1] = "Memory or FB-DIMM configuration CRC read error",
/* Reserved */
[3] = "Uncorrectable Data ECC on Replay",
[4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
/* M6 Unsupported on i5400 */
[6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[7] = "Aliased Uncorrectable Patrol Data ECC",
[8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
/* M10 Unsupported on i5400 */
[10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[11] = "Non-Aliased Uncorrectable Patrol Data ECC",
[12] = "Memory Write error on first attempt",
[13] = "FB-DIMM Configuration Write error on first attempt",
[14] = "Memory or FB-DIMM configuration CRC read error",
[15] = "Channel Failed-Over Occurred",
[16] = "Correctable Non-Mirrored Demand Data ECC",
/* M18 Unsupported on i5400 */
[18] = "Correctable Resilver- or Spare-Copy Data ECC",
[19] = "Correctable Patrol Data ECC",
[20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
[21] = "SPD protocol Error",
[22] = "Non-Redundant Fast Reset Timeout",
[23] = "Refresh error",
[24] = "Memory Write error on redundant retry",
[25] = "Redundant Fast Reset Timeout",
[26] = "Correctable Counter Threshold Exceeded",
[27] = "DIMM-Spare Copy Completed",
[28] = "DIMM-Isolation Completed",
};
/* Fatal errors */
#define ERROR_FAT_MASK (EMASK_M1 | \
EMASK_M2 | \
EMASK_M23)
/* Correctable errors */
#define ERROR_NF_CORRECTABLE (EMASK_M27 | \
EMASK_M20 | \
EMASK_M19 | \
EMASK_M18 | \
EMASK_M17 | \
EMASK_M16)
#define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
EMASK_M28)
#define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
#define ERROR_NF_NORTH_CRC (EMASK_M21)
/* Recoverable errors */
#define ERROR_NF_RECOVERABLE (EMASK_M26 | \
EMASK_M25 | \
EMASK_M24 | \
EMASK_M15 | \
EMASK_M14 | \
EMASK_M13 | \
EMASK_M12 | \
EMASK_M11 | \
EMASK_M9 | \
EMASK_M8 | \
EMASK_M7 | \
EMASK_M5)
/* uncorrectable errors */
#define ERROR_NF_UNCORRECTABLE (EMASK_M4)
/* mask to all non-fatal errors */
#define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
ERROR_NF_UNCORRECTABLE | \
ERROR_NF_RECOVERABLE | \
ERROR_NF_DIMM_SPARE | \
ERROR_NF_SPD_PROTOCOL | \
ERROR_NF_NORTH_CRC)
/*
* Define error masks for the several registers
*/
/* Enable all fatal and non fatal errors */
#define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
/* mask for fatal error registers */
#define FERR_FAT_MASK ERROR_FAT_MASK
/* masks for non-fatal error register */
static inline int to_nf_mask(unsigned int mask)
{
return (mask & EMASK_M29) | (mask >> 3);
};
static inline int from_nf_ferr(unsigned int mask)
{
return (mask & EMASK_M29) | /* Bit 28 */
(mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */
};
#define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK)
#define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE)
#define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE)
#define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL)
#define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC)
#define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE)
#define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE)
/*
* Defines to extract the various fields from the
* MTRx - Memory Technology Registers
*/
#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4)
#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
#define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
static inline int extract_fbdchan_indx(u32 x)
{
return (x>>28) & 0x3;
}
/* Device name and register DID (Device ID) */
struct i5400_dev_info {
const char *ctl_name; /* name for this device */
u16 fsb_mapping_errors; /* DID for the branchmap,control */
};
/* Table of devices attributes supported by this driver */
static const struct i5400_dev_info i5400_devs[] = {
{
.ctl_name = "I5400",
.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
},
};
struct i5400_dimm_info {
int megabytes; /* size, 0 means not present */
};
/* driver private data structure */
struct i5400_pvt {
struct pci_dev *system_address; /* 16.0 */
struct pci_dev *branchmap_werrors; /* 16.1 */
struct pci_dev *fsb_error_regs; /* 16.2 */
struct pci_dev *branch_0; /* 21.0 */
struct pci_dev *branch_1; /* 22.0 */
u16 tolm; /* top of low memory */
union {
u64 ambase; /* AMB BAR */
struct {
u32 ambase_bottom;
u32 ambase_top;
} u __packed;
};
u16 mir0, mir1;
u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
int maxdimmperch; /* Max DIMMs per channel */
};
/* I5400 MCH error information retrieved from Hardware */
struct i5400_error_info {
/* These registers are always read from the MC */
u32 ferr_fat_fbd; /* First Errors Fatal */
u32 nerr_fat_fbd; /* Next Errors Fatal */
u32 ferr_nf_fbd; /* First Errors Non-Fatal */
u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
/* These registers are input ONLY if there was a Recoverable Error */
u32 redmemb; /* Recoverable Mem Data Error log B */
u16 recmema; /* Recoverable Mem Error log A */
u32 recmemb; /* Recoverable Mem Error log B */
/* These registers are input ONLY if there was a Non-Rec Error */
u16 nrecmema; /* Non-Recoverable Mem log A */
u32 nrecmemb; /* Non-Recoverable Mem log B */
};
/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
5400 better to use an inline function than a macro in this case */
static inline int nrec_bank(struct i5400_error_info *info)
{
return ((info->nrecmema) >> 12) & 0x7;
}
static inline int nrec_rank(struct i5400_error_info *info)
{
return ((info->nrecmema) >> 8) & 0xf;
}
static inline int nrec_buf_id(struct i5400_error_info *info)
{
return ((info->nrecmema)) & 0xff;
}
static inline int nrec_rdwr(struct i5400_error_info *info)
{
return (info->nrecmemb) >> 31;
}
/* This applies to both NREC and REC string so it can be used with nrec_rdwr
and rec_rdwr */
static inline const char *rdwr_str(int rdwr)
{
return rdwr ? "Write" : "Read";
}
static inline int nrec_cas(struct i5400_error_info *info)
{
return ((info->nrecmemb) >> 16) & 0x1fff;
}
static inline int nrec_ras(struct i5400_error_info *info)
{
return (info->nrecmemb) & 0xffff;
}
static inline int rec_bank(struct i5400_error_info *info)
{
return ((info->recmema) >> 12) & 0x7;
}
static inline int rec_rank(struct i5400_error_info *info)
{
return ((info->recmema) >> 8) & 0xf;
}
static inline int rec_rdwr(struct i5400_error_info *info)
{
return (info->recmemb) >> 31;
}
static inline int rec_cas(struct i5400_error_info *info)
{
return ((info->recmemb) >> 16) & 0x1fff;
}
static inline int rec_ras(struct i5400_error_info *info)
{
return (info->recmemb) & 0xffff;
}
static struct edac_pci_ctl_info *i5400_pci;
/*
* i5400_get_error_info Retrieve the hardware error information from
* the hardware and cache it in the 'info'
* structure
*/
static void i5400_get_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{
struct i5400_pvt *pvt;
u32 value;
pvt = mci->pvt_info;
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
/* Mask only the bits that the doc says are valid
*/
value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
/* If there is an error, then read in the
NEXT FATAL error register and the Memory Error Log Register A
*/
if (value & FERR_FAT_MASK) {
info->ferr_fat_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMA, &info->nrecmema);
pci_read_config_dword(pvt->branchmap_werrors,
NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_FAT_FBD, value);
} else {
info->ferr_fat_fbd = 0;
info->nerr_fat_fbd = 0;
info->nrecmema = 0;
info->nrecmemb = 0;
}
/* read in the 1st NON-FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
/* If there is an error, then read in the 1st NON-FATAL error
* register as well */
if (value & FERR_NF_MASK) {
info->ferr_nf_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_NF_FBD, &info->nerr_nf_fbd);
pci_read_config_word(pvt->branchmap_werrors,
RECMEMA, &info->recmema);
pci_read_config_dword(pvt->branchmap_werrors,
RECMEMB, &info->recmemb);
pci_read_config_dword(pvt->branchmap_werrors,
REDMEMB, &info->redmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_NF_FBD, value);
} else {
info->ferr_nf_fbd = 0;
info->nerr_nf_fbd = 0;
info->recmema = 0;
info->recmemb = 0;
info->redmemb = 0;
}
}
/*
* i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
* struct i5400_error_info *info,
* int handle_errors);
*
* handle the Intel FATAL and unrecoverable errors, if any
*/
static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
struct i5400_error_info *info,
unsigned long allErrors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
int branch;
int channel;
int bank;
int buf_id;
int rank;
int rdwr;
int ras, cas;
int errnum;
char *type = NULL;
enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
if (!allErrors)
return; /* if no error, return now */
if (allErrors & ERROR_FAT_MASK) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
} else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
/* ONLY ONE of the possible error bits will be set, as per the docs */
branch = extract_fbdchan_indx(info->ferr_fat_fbd);
channel = branch;
/* Use the NON-Recoverable macros to extract data */
bank = nrec_bank(info);
rank = nrec_rank(info);
buf_id = nrec_buf_id(info);
rdwr = nrec_rdwr(info);
ras = nrec_ras(info);
cas = nrec_cas(info);
edac_dbg(0, "\t\t%s DIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
type, rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
/* Form out message */
snprintf(msg, sizeof(msg),
"Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
bank, buf_id, ras, cas, allErrors, error_name[errnum]);
edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0,
branch >> 1, -1, rank,
rdwr ? "Write error" : "Read error",
msg);
}
/*
* i5400_process_fatal_error_info(struct mem_ctl_info *mci,
* struct i5400_error_info *info,
* int handle_errors);
*
* handle the Intel NON-FATAL errors, if any
*/
static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
unsigned long allErrors;
int branch;
int channel;
int bank;
int rank;
int rdwr;
int ras, cas;
int errnum;
/* mask off the Error bits that are possible */
allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
if (!allErrors)
return; /* if no error, return now */
/* ONLY ONE of the possible error bits will be set, as per the docs */
if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
i5400_proccess_non_recoverable_info(mci, info, allErrors);
return;
}
/* Correctable errors */
if (allErrors & ERROR_NF_CORRECTABLE) {
edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
channel = 0;
if (REC_ECC_LOCATOR_ODD(info->redmemb))
channel = 1;
/* Convert channel to be based from zero, instead of
* from branch base of 0 */
channel += branch;
bank = rec_bank(info);
rank = rec_rank(info);
rdwr = rec_rdwr(info);
ras = rec_ras(info);
cas = rec_cas(info);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
/* Form out message */
snprintf(msg, sizeof(msg),
"Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
"RAS=%d CAS=%d, CE Err=0x%lx (%s))",
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
branch >> 1, channel % 2, rank,
rdwr ? "Write error" : "Read error",
msg);
return;
}
/* Miscellaneous errors */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
i5400_mc_printk(mci, KERN_EMERG,
"Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
branch >> 1, allErrors, error_name[errnum]);
}
/*
* i5400_process_error_info Process the error info that is
* in the 'info' structure, previously retrieved from hardware
*/
static void i5400_process_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{ u32 allErrors;
/* First handle any fatal errors that occurred */
allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
i5400_proccess_non_recoverable_info(mci, info, allErrors);
/* now handle any non-fatal errors that occurred */
i5400_process_nonfatal_error_info(mci, info);
}
/*
* i5400_clear_error Retrieve any error from the hardware
* but do NOT process that error.
* Used for 'clearing' out of previous errors
* Called by the Core module.
*/
static void i5400_clear_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
i5400_get_error_info(mci, &info);
}
/*
* i5400_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
/*
* i5400_put_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i5400_put_devices(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
pvt = mci->pvt_info;
/* Decrement usage count for devices */
pci_dev_put(pvt->branch_1);
pci_dev_put(pvt->branch_0);
pci_dev_put(pvt->fsb_error_regs);
pci_dev_put(pvt->branchmap_werrors);
}
/*
* i5400_get_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
{
struct i5400_pvt *pvt;
struct pci_dev *pdev;
pvt = mci->pvt_info;
pvt->branchmap_werrors = NULL;
pvt->fsb_error_regs = NULL;
pvt->branch_0 = NULL;
pvt->branch_1 = NULL;
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
if (!pdev) {
/* End of list, leave */
i5400_printk(KERN_ERR,
"'system address,Process Bus' "
"device not found:"
"vendor 0x%x device 0x%x ERR func 1 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR);
return -ENODEV;
}
/* Store device 16 func 1 */
if (PCI_FUNC(pdev->devfn) == 1)
break;
}
pvt->branchmap_werrors = pdev;
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
if (!pdev) {
/* End of list, leave */
i5400_printk(KERN_ERR,
"'system address,Process Bus' "
"device not found:"
"vendor 0x%x device 0x%x ERR func 2 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR);
pci_dev_put(pvt->branchmap_werrors);
return -ENODEV;
}
/* Store device 16 func 2 */
if (PCI_FUNC(pdev->devfn) == 2)
break;
}
pvt->fsb_error_regs = pdev;
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->system_address),
pvt->system_address->vendor, pvt->system_address->device);
edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->branchmap_werrors),
pvt->branchmap_werrors->vendor,
pvt->branchmap_werrors->device);
edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->fsb_error_regs),
pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
if (!pvt->branch_0) {
i5400_printk(KERN_ERR,
"MC: 'BRANCH 0' device not found:"
"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
pci_dev_put(pvt->fsb_error_regs);
pci_dev_put(pvt->branchmap_werrors);
return -ENODEV;
}
/* If this device claims to have more than 2 channels then
* fetch Branch 1's information
*/
if (pvt->maxch < CHANNELS_PER_BRANCH)
return 0;
pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
if (!pvt->branch_1) {
i5400_printk(KERN_ERR,
"MC: 'BRANCH 1' device not found:"
"vendor 0x%x device 0x%x Func 0 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD1);
pci_dev_put(pvt->branch_0);
pci_dev_put(pvt->fsb_error_regs);
pci_dev_put(pvt->branchmap_werrors);
return -ENODEV;
}
return 0;
}
/*
* determine_amb_present
*
* the information is contained in DIMMS_PER_CHANNEL different
* registers determining which of the DIMMS_PER_CHANNEL requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
* b0_ambpresent0 for channel '0'
* b0_ambpresent1 for channel '1'
* b1_ambpresent0 for channel '2'
* b1_ambpresent1 for channel '3'
*/
static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
{
int amb_present;
if (channel < CHANNELS_PER_BRANCH) {
if (channel & 0x1)
amb_present = pvt->b0_ambpresent1;
else
amb_present = pvt->b0_ambpresent0;
} else {
if (channel & 0x1)
amb_present = pvt->b1_ambpresent1;
else
amb_present = pvt->b1_ambpresent0;
}
return amb_present;
}
/*
* determine_mtr(pvt, dimm, channel)
*
* return the proper MTR register as determine by the dimm and desired channel
*/
static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
{
int mtr;
int n;
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
n = dimm;
if (n >= DIMMS_PER_CHANNEL) {
edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
dimm);
return 0;
}
if (channel < CHANNELS_PER_BRANCH)
mtr = pvt->b0_mtr[n];
else
mtr = pvt->b1_mtr[n];
return mtr;
}
/*
*/
static void decode_mtr(int slot_row, u16 mtr)
{
int ans;
ans = MTR_DIMMS_PRESENT(mtr);
edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
MTR_DIMM_RANK(mtr) ? "double" : "single");
edac_dbg(2, "\t\tNUMROW: %s\n",
MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
"65,536 - 16 rows");
edac_dbg(2, "\t\tNUMCOL: %s\n",
MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
"reserved");
}
static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
mtr = determine_mtr(pvt, dimm, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
if (amb_present_reg & (1 << dimm)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
/* Add thenumber of ROW bits */
addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
/* add the number of COLUMN bits */
addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
/* add the number of RANK bits */
addrBits += MTR_DIMM_RANK(mtr);
addrBits += 6; /* add 64 bits per DIMM */
addrBits -= 20; /* divide by 2^^20 */
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
}
}
}
/*
* calculate_dimm_size
*
* also will output a DIMM matrix map, if debug is enabled, for viewing
* how the DIMMs are populated
*/
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
int dimm, max_dimms;
char *p, *mem_buffer;
int space, n;
int channel, branch;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
mem_buffer = p = kmalloc(space, GFP_KERNEL);
if (p == NULL) {
i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
__FILE__, __func__);
return;
}
/* Scan all the actual DIMMS
* and calculate the information for each DIMM
* Start with the highest dimm first, to display it first
* and work toward the 0th dimm
*/
max_dimms = pvt->maxdimmperch;
for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
/* on an odd dimm, first output a 'boundary' marker,
* then reset the message buffer */
if (dimm & 0x1) {
n = snprintf(p, space, "---------------------------"
"-------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
n = snprintf(p, space, "dimm %2d ", dimm);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
dinfo = &pvt->dimm_info[dimm][channel];
handle_channel(pvt, dimm, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
}
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
"-------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
/* now output the 'channel' labels */
n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
n = snprintf(p, space, "channel %d | ", channel);
p += n;
space -= n;
}
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
n = snprintf(p, space, " ");
p += n;
for (branch = 0; branch < MAX_BRANCHES; branch++) {
n = snprintf(p, space, " branch %d | ", branch);
p += n;
space -= n;
}
/* output the last message and free buffer */
edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
/*
* i5400_get_mc_regs read in the necessary registers and
* cache locally
*
* Fills in the private data members
*/
static void i5400_get_mc_regs(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
u32 actual_tolm;
u16 limit;
int slot_row;
int way0, way1;
pvt = mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
&pvt->u.ambase_bottom);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
&pvt->u.ambase_top);
edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
/* Get the MIR[0-1] regs */
limit = (pvt->mir0 >> 4) & 0x0fff;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0xfff;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_mtr[slot_row] = 0;
continue;
}
/* Branch 1 set of MTR registers */
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
slot_row, where, pvt->b1_mtr[slot_row]);
}
/* Read and dump branch 0's MTRs */
edac_dbg(2, "Memory Technology Registers:\n");
edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
&pvt->b0_ambpresent0);
edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
&pvt->b1_ambpresent0);
edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
&pvt->b1_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
* orderly matrix */
calculate_dimm_size(pvt);
}
/*
* i5400_init_dimms Initialize the 'dimms' table within
* the mci control structure with the
* addressing of memory.
*
* return:
* 0 success
* 1 no actual memory found on this MC
*/
static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
struct dimm_info *dimm;
int ndimms;
int mtr;
int size_mb;
int channel, slot;
pvt = mci->pvt_info;
ndimms = 0;
/*
* FIXME: remove pvt->dimm_info[slot][channel] and use the 3
* layers here.
*/
for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
channel++) {
for (slot = 0; slot < mci->layers[2].size; slot++) {
mtr = determine_mtr(pvt, slot, channel);
/* if no DIMMS on this slot, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
dimm = edac_get_dimm(mci, channel / 2, channel % 2, slot);
size_mb = pvt->dimm_info[slot][channel].megabytes;
edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
channel / 2, channel % 2, slot,
size_mb / 1000, size_mb % 1000);
dimm->nr_pages = size_mb << 8;
dimm->grain = 8;
dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
DEV_X8 : DEV_X4;
dimm->mtype = MEM_FB_DDR2;
/*
* The eccc mechanism is SDDC (aka SECC), with
* is similar to Chipkill.
*/
dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
EDAC_S8ECD8ED : EDAC_S4ECD4ED;
ndimms++;
}
}
/*
* When just one memory is provided, it should be at location (0,0,0).
* With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
*/
if (ndimms == 1)
mci->dimms[0]->edac_mode = EDAC_SECDED;
return (ndimms == 0);
}
/*
* i5400_enable_error_reporting
* Turn on the memory reporting features of the hardware
*/
static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
u32 fbd_error_mask;
pvt = mci->pvt_info;
/* Read the FBD Error Mask Register */
pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
&fbd_error_mask);
/* Enable with a '0' */
fbd_error_mask &= ~(ENABLE_EMASK_ALL);
pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
fbd_error_mask);
}
/*
* i5400_probe1 Probe for ONE instance of device to see if it is
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
struct edac_mc_layer layers[3];
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
/*
* allocate a new MC control structure
*
* This drivers uses the DIMM slot as "csrow" and the rest as "channel".
*/
layers[0].type = EDAC_MC_LAYER_BRANCH;
layers[0].size = MAX_BRANCHES;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = CHANNELS_PER_BRANCH;
layers[1].is_virt_csrow = false;
layers[2].type = EDAC_MC_LAYER_SLOT;
layers[2].size = DIMMS_PER_CHANNEL;
layers[2].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
pvt->maxch = MAX_CHANNELS;
pvt->maxdimmperch = DIMMS_PER_CHANNEL;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
goto fail0;
/* Time to get serious */
i5400_get_mc_regs(mci); /* retrieve the hardware registers */
mci->mc_idx = 0;
mci->mtype_cap = MEM_FLAG_FB_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5400_edac.c";
mci->ctl_name = i5400_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
/* initialize the MC control structure 'dimms' table
* with the mapping and control information */
if (i5400_init_dimms(mci)) {
edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
edac_dbg(1, "MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
goto fail1;
}
i5400_clear_error(mci);
/* allocating generic PCI control info */
i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i5400_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
return 0;
/* Error exit unwinding stack */
fail1:
i5400_put_devices(mci);
fail0:
edac_mc_free(mci);
return -ENODEV;
}
/*
* i5400_init_one constructor for one instance of device
*
* returns:
* negative on error
* count (>= 0)
*/
static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
if (rc)
return rc;
/* now probe and enable the device */
return i5400_probe1(pdev, id->driver_data);
}
/*
* i5400_remove_one destructor for one instance of device
*
*/
static void i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
/* retrieve references to resources, and free those resources */
i5400_put_devices(mci);
pci_disable_device(pdev);
edac_mc_free(mci);
}
/*
* pci_device_id table for which devices we are looking for
*
* The "E500P" device is the first device supported.
*/
static const struct pci_device_id i5400_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
/*
* i5400_driver pci_driver structure for this module
*
*/
static struct pci_driver i5400_driver = {
.name = "i5400_edac",
.probe = i5400_init_one,
.remove = i5400_remove_one,
.id_table = i5400_pci_tbl,
};
/*
* i5400_init Module entry function
* Try to initialize this module for its devices
*/
static int __init i5400_init(void)
{
int pci_rc;
edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i5400_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
/*
* i5400_exit() Module exit function
* Unregister the driver
*/
static void __exit i5400_exit(void)
{
edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5400_driver);
}
module_init(i5400_init);
module_exit(i5400_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Woodard <[email protected]>");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
I5400_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i5400_edac.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* APM X-Gene SoC EDAC (error detection and correction)
*
* Copyright (c) 2015, Applied Micro Circuits Corporation
* Author: Feng Kan <[email protected]>
* Loc Ho <[email protected]>
*/
#include <linux/ctype.h>
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
#include "edac_module.h"
#define EDAC_MOD_STR "xgene_edac"
/* Global error configuration status registers (CSR) */
#define PCPHPERRINTSTS 0x0000
#define PCPHPERRINTMSK 0x0004
#define MCU_CTL_ERR_MASK BIT(12)
#define IOB_PA_ERR_MASK BIT(11)
#define IOB_BA_ERR_MASK BIT(10)
#define IOB_XGIC_ERR_MASK BIT(9)
#define IOB_RB_ERR_MASK BIT(8)
#define L3C_UNCORR_ERR_MASK BIT(5)
#define MCU_UNCORR_ERR_MASK BIT(4)
#define PMD3_MERR_MASK BIT(3)
#define PMD2_MERR_MASK BIT(2)
#define PMD1_MERR_MASK BIT(1)
#define PMD0_MERR_MASK BIT(0)
#define PCPLPERRINTSTS 0x0008
#define PCPLPERRINTMSK 0x000C
#define CSW_SWITCH_TRACE_ERR_MASK BIT(2)
#define L3C_CORR_ERR_MASK BIT(1)
#define MCU_CORR_ERR_MASK BIT(0)
#define MEMERRINTSTS 0x0010
#define MEMERRINTMSK 0x0014
struct xgene_edac {
struct device *dev;
struct regmap *csw_map;
struct regmap *mcba_map;
struct regmap *mcbb_map;
struct regmap *efuse_map;
struct regmap *rb_map;
void __iomem *pcp_csr;
spinlock_t lock;
struct dentry *dfs;
struct list_head mcus;
struct list_head pmds;
struct list_head l3s;
struct list_head socs;
struct mutex mc_lock;
int mc_active_mask;
int mc_registered_mask;
};
static void xgene_edac_pcp_rd(struct xgene_edac *edac, u32 reg, u32 *val)
{
*val = readl(edac->pcp_csr + reg);
}
static void xgene_edac_pcp_clrbits(struct xgene_edac *edac, u32 reg,
u32 bits_mask)
{
u32 val;
spin_lock(&edac->lock);
val = readl(edac->pcp_csr + reg);
val &= ~bits_mask;
writel(val, edac->pcp_csr + reg);
spin_unlock(&edac->lock);
}
static void xgene_edac_pcp_setbits(struct xgene_edac *edac, u32 reg,
u32 bits_mask)
{
u32 val;
spin_lock(&edac->lock);
val = readl(edac->pcp_csr + reg);
val |= bits_mask;
writel(val, edac->pcp_csr + reg);
spin_unlock(&edac->lock);
}
/* Memory controller error CSR */
#define MCU_MAX_RANK 8
#define MCU_RANK_STRIDE 0x40
#define MCUGECR 0x0110
#define MCU_GECR_DEMANDUCINTREN_MASK BIT(0)
#define MCU_GECR_BACKUCINTREN_MASK BIT(1)
#define MCU_GECR_CINTREN_MASK BIT(2)
#define MUC_GECR_MCUADDRERREN_MASK BIT(9)
#define MCUGESR 0x0114
#define MCU_GESR_ADDRNOMATCH_ERR_MASK BIT(7)
#define MCU_GESR_ADDRMULTIMATCH_ERR_MASK BIT(6)
#define MCU_GESR_PHYP_ERR_MASK BIT(3)
#define MCUESRR0 0x0314
#define MCU_ESRR_MULTUCERR_MASK BIT(3)
#define MCU_ESRR_BACKUCERR_MASK BIT(2)
#define MCU_ESRR_DEMANDUCERR_MASK BIT(1)
#define MCU_ESRR_CERR_MASK BIT(0)
#define MCUESRRA0 0x0318
#define MCUEBLRR0 0x031c
#define MCU_EBLRR_ERRBANK_RD(src) (((src) & 0x00000007) >> 0)
#define MCUERCRR0 0x0320
#define MCU_ERCRR_ERRROW_RD(src) (((src) & 0xFFFF0000) >> 16)
#define MCU_ERCRR_ERRCOL_RD(src) ((src) & 0x00000FFF)
#define MCUSBECNT0 0x0324
#define MCU_SBECNT_COUNT(src) ((src) & 0xFFFF)
#define CSW_CSWCR 0x0000
#define CSW_CSWCR_DUALMCB_MASK BIT(0)
#define MCBADDRMR 0x0000
#define MCBADDRMR_MCU_INTLV_MODE_MASK BIT(3)
#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
#define MCBADDRMR_MCB_INTLV_MODE_MASK BIT(1)
#define MCBADDRMR_ADDRESS_MODE_MASK BIT(0)
struct xgene_edac_mc_ctx {
struct list_head next;
char *name;
struct mem_ctl_info *mci;
struct xgene_edac *edac;
void __iomem *mcu_csr;
u32 mcu_id;
};
static ssize_t xgene_edac_mc_err_inject_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct mem_ctl_info *mci = file->private_data;
struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
int i;
for (i = 0; i < MCU_MAX_RANK; i++) {
writel(MCU_ESRR_MULTUCERR_MASK | MCU_ESRR_BACKUCERR_MASK |
MCU_ESRR_DEMANDUCERR_MASK | MCU_ESRR_CERR_MASK,
ctx->mcu_csr + MCUESRRA0 + i * MCU_RANK_STRIDE);
}
return count;
}
static const struct file_operations xgene_edac_mc_debug_inject_fops = {
.open = simple_open,
.write = xgene_edac_mc_err_inject_write,
.llseek = generic_file_llseek,
};
static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
{
if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
return;
if (!mci->debugfs)
return;
edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
&xgene_edac_mc_debug_inject_fops);
}
static void xgene_edac_mc_check(struct mem_ctl_info *mci)
{
struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
unsigned int pcp_hp_stat;
unsigned int pcp_lp_stat;
u32 reg;
u32 rank;
u32 bank;
u32 count;
u32 col_row;
xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
(MCU_CTL_ERR_MASK & pcp_hp_stat) ||
(MCU_CORR_ERR_MASK & pcp_lp_stat)))
return;
for (rank = 0; rank < MCU_MAX_RANK; rank++) {
reg = readl(ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
/* Detect uncorrectable memory error */
if (reg & (MCU_ESRR_DEMANDUCERR_MASK |
MCU_ESRR_BACKUCERR_MASK)) {
/* Detected uncorrectable memory error */
edac_mc_chipset_printk(mci, KERN_ERR, "X-Gene",
"MCU uncorrectable error at rank %d\n", rank);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
}
/* Detect correctable memory error */
if (reg & MCU_ESRR_CERR_MASK) {
bank = readl(ctx->mcu_csr + MCUEBLRR0 +
rank * MCU_RANK_STRIDE);
col_row = readl(ctx->mcu_csr + MCUERCRR0 +
rank * MCU_RANK_STRIDE);
count = readl(ctx->mcu_csr + MCUSBECNT0 +
rank * MCU_RANK_STRIDE);
edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
"MCU correctable error at rank %d bank %d column %d row %d count %d\n",
rank, MCU_EBLRR_ERRBANK_RD(bank),
MCU_ERCRR_ERRCOL_RD(col_row),
MCU_ERCRR_ERRROW_RD(col_row),
MCU_SBECNT_COUNT(count));
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
}
/* Clear all error registers */
writel(0x0, ctx->mcu_csr + MCUEBLRR0 + rank * MCU_RANK_STRIDE);
writel(0x0, ctx->mcu_csr + MCUERCRR0 + rank * MCU_RANK_STRIDE);
writel(0x0, ctx->mcu_csr + MCUSBECNT0 +
rank * MCU_RANK_STRIDE);
writel(reg, ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
}
/* Detect memory controller error */
reg = readl(ctx->mcu_csr + MCUGESR);
if (reg) {
if (reg & MCU_GESR_ADDRNOMATCH_ERR_MASK)
edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
"MCU address miss-match error\n");
if (reg & MCU_GESR_ADDRMULTIMATCH_ERR_MASK)
edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
"MCU address multi-match error\n");
writel(reg, ctx->mcu_csr + MCUGESR);
}
}
static void xgene_edac_mc_irq_ctl(struct mem_ctl_info *mci, bool enable)
{
struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
unsigned int val;
if (edac_op_state != EDAC_OPSTATE_INT)
return;
mutex_lock(&ctx->edac->mc_lock);
/*
* As there is only single bit for enable error and interrupt mask,
* we must only enable top level interrupt after all MCUs are
* registered. Otherwise, if there is an error and the corresponding
* MCU has not registered, the interrupt will never get cleared. To
* determine all MCU have registered, we will keep track of active
* MCUs and registered MCUs.
*/
if (enable) {
/* Set registered MCU bit */
ctx->edac->mc_registered_mask |= 1 << ctx->mcu_id;
/* Enable interrupt after all active MCU registered */
if (ctx->edac->mc_registered_mask ==
ctx->edac->mc_active_mask) {
/* Enable memory controller top level interrupt */
xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
MCU_UNCORR_ERR_MASK |
MCU_CTL_ERR_MASK);
xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
MCU_CORR_ERR_MASK);
}
/* Enable MCU interrupt and error reporting */
val = readl(ctx->mcu_csr + MCUGECR);
val |= MCU_GECR_DEMANDUCINTREN_MASK |
MCU_GECR_BACKUCINTREN_MASK |
MCU_GECR_CINTREN_MASK |
MUC_GECR_MCUADDRERREN_MASK;
writel(val, ctx->mcu_csr + MCUGECR);
} else {
/* Disable MCU interrupt */
val = readl(ctx->mcu_csr + MCUGECR);
val &= ~(MCU_GECR_DEMANDUCINTREN_MASK |
MCU_GECR_BACKUCINTREN_MASK |
MCU_GECR_CINTREN_MASK |
MUC_GECR_MCUADDRERREN_MASK);
writel(val, ctx->mcu_csr + MCUGECR);
/* Disable memory controller top level interrupt */
xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK);
xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
MCU_CORR_ERR_MASK);
/* Clear registered MCU bit */
ctx->edac->mc_registered_mask &= ~(1 << ctx->mcu_id);
}
mutex_unlock(&ctx->edac->mc_lock);
}
static int xgene_edac_mc_is_active(struct xgene_edac_mc_ctx *ctx, int mc_idx)
{
unsigned int reg;
u32 mcu_mask;
if (regmap_read(ctx->edac->csw_map, CSW_CSWCR, ®))
return 0;
if (reg & CSW_CSWCR_DUALMCB_MASK) {
/*
* Dual MCB active - Determine if all 4 active or just MCU0
* and MCU2 active
*/
if (regmap_read(ctx->edac->mcbb_map, MCBADDRMR, ®))
return 0;
mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
} else {
/*
* Single MCB active - Determine if MCU0/MCU1 or just MCU0
* active
*/
if (regmap_read(ctx->edac->mcba_map, MCBADDRMR, ®))
return 0;
mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
}
/* Save active MC mask if hasn't set already */
if (!ctx->edac->mc_active_mask)
ctx->edac->mc_active_mask = mcu_mask;
return (mcu_mask & (1 << mc_idx)) ? 1 : 0;
}
static int xgene_edac_mc_add(struct xgene_edac *edac, struct device_node *np)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct xgene_edac_mc_ctx tmp_ctx;
struct xgene_edac_mc_ctx *ctx;
struct resource res;
int rc;
memset(&tmp_ctx, 0, sizeof(tmp_ctx));
tmp_ctx.edac = edac;
if (!devres_open_group(edac->dev, xgene_edac_mc_add, GFP_KERNEL))
return -ENOMEM;
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) {
dev_err(edac->dev, "no MCU resource address\n");
goto err_group;
}
tmp_ctx.mcu_csr = devm_ioremap_resource(edac->dev, &res);
if (IS_ERR(tmp_ctx.mcu_csr)) {
dev_err(edac->dev, "unable to map MCU resource\n");
rc = PTR_ERR(tmp_ctx.mcu_csr);
goto err_group;
}
/* Ignore non-active MCU */
if (of_property_read_u32(np, "memory-controller", &tmp_ctx.mcu_id)) {
dev_err(edac->dev, "no memory-controller property\n");
rc = -ENODEV;
goto err_group;
}
if (!xgene_edac_mc_is_active(&tmp_ctx, tmp_ctx.mcu_id)) {
rc = -ENODEV;
goto err_group;
}
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 4;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 2;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(tmp_ctx.mcu_id, ARRAY_SIZE(layers), layers,
sizeof(*ctx));
if (!mci) {
rc = -ENOMEM;
goto err_group;
}
ctx = mci->pvt_info;
*ctx = tmp_ctx; /* Copy over resource value */
ctx->name = "xgene_edac_mc_err";
ctx->mci = mci;
mci->pdev = &mci->dev;
mci->ctl_name = ctx->name;
mci->dev_name = ctx->name;
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_RDDR3 |
MEM_FLAG_DDR | MEM_FLAG_DDR2 | MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_page_to_phys = NULL;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = SCRUB_HW_SRC;
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check = xgene_edac_mc_check;
if (edac_mc_add_mc(mci)) {
dev_err(edac->dev, "edac_mc_add_mc failed\n");
rc = -EINVAL;
goto err_free;
}
xgene_edac_mc_create_debugfs_node(mci);
list_add(&ctx->next, &edac->mcus);
xgene_edac_mc_irq_ctl(mci, true);
devres_remove_group(edac->dev, xgene_edac_mc_add);
dev_info(edac->dev, "X-Gene EDAC MC registered\n");
return 0;
err_free:
edac_mc_free(mci);
err_group:
devres_release_group(edac->dev, xgene_edac_mc_add);
return rc;
}
static int xgene_edac_mc_remove(struct xgene_edac_mc_ctx *mcu)
{
xgene_edac_mc_irq_ctl(mcu->mci, false);
edac_mc_del_mc(&mcu->mci->dev);
edac_mc_free(mcu->mci);
return 0;
}
/* CPU L1/L2 error CSR */
#define MAX_CPU_PER_PMD 2
#define CPU_CSR_STRIDE 0x00100000
#define CPU_L2C_PAGE 0x000D0000
#define CPU_MEMERR_L2C_PAGE 0x000E0000
#define CPU_MEMERR_CPU_PAGE 0x000F0000
#define MEMERR_CPU_ICFECR_PAGE_OFFSET 0x0000
#define MEMERR_CPU_ICFESR_PAGE_OFFSET 0x0004
#define MEMERR_CPU_ICFESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
#define MEMERR_CPU_ICFESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
#define MEMERR_CPU_ICFESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
#define MEMERR_CPU_ICFESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
#define MEMERR_CPU_ICFESR_MULTCERR_MASK BIT(2)
#define MEMERR_CPU_ICFESR_CERR_MASK BIT(0)
#define MEMERR_CPU_LSUESR_PAGE_OFFSET 0x000c
#define MEMERR_CPU_LSUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
#define MEMERR_CPU_LSUESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
#define MEMERR_CPU_LSUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
#define MEMERR_CPU_LSUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
#define MEMERR_CPU_LSUESR_MULTCERR_MASK BIT(2)
#define MEMERR_CPU_LSUESR_CERR_MASK BIT(0)
#define MEMERR_CPU_LSUECR_PAGE_OFFSET 0x0008
#define MEMERR_CPU_MMUECR_PAGE_OFFSET 0x0010
#define MEMERR_CPU_MMUESR_PAGE_OFFSET 0x0014
#define MEMERR_CPU_MMUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
#define MEMERR_CPU_MMUESR_ERRINDEX_RD(src) (((src) & 0x007F0000) >> 16)
#define MEMERR_CPU_MMUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
#define MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK BIT(7)
#define MEMERR_CPU_MMUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
#define MEMERR_CPU_MMUESR_MULTCERR_MASK BIT(2)
#define MEMERR_CPU_MMUESR_CERR_MASK BIT(0)
#define MEMERR_CPU_ICFESRA_PAGE_OFFSET 0x0804
#define MEMERR_CPU_LSUESRA_PAGE_OFFSET 0x080c
#define MEMERR_CPU_MMUESRA_PAGE_OFFSET 0x0814
#define MEMERR_L2C_L2ECR_PAGE_OFFSET 0x0000
#define MEMERR_L2C_L2ESR_PAGE_OFFSET 0x0004
#define MEMERR_L2C_L2ESR_ERRSYN_RD(src) (((src) & 0xFF000000) >> 24)
#define MEMERR_L2C_L2ESR_ERRWAY_RD(src) (((src) & 0x00FC0000) >> 18)
#define MEMERR_L2C_L2ESR_ERRCPU_RD(src) (((src) & 0x00020000) >> 17)
#define MEMERR_L2C_L2ESR_ERRGROUP_RD(src) (((src) & 0x0000E000) >> 13)
#define MEMERR_L2C_L2ESR_ERRACTION_RD(src) (((src) & 0x00001C00) >> 10)
#define MEMERR_L2C_L2ESR_ERRTYPE_RD(src) (((src) & 0x00000300) >> 8)
#define MEMERR_L2C_L2ESR_MULTUCERR_MASK BIT(3)
#define MEMERR_L2C_L2ESR_MULTICERR_MASK BIT(2)
#define MEMERR_L2C_L2ESR_UCERR_MASK BIT(1)
#define MEMERR_L2C_L2ESR_ERR_MASK BIT(0)
#define MEMERR_L2C_L2EALR_PAGE_OFFSET 0x0008
#define CPUX_L2C_L2RTOCR_PAGE_OFFSET 0x0010
#define MEMERR_L2C_L2EAHR_PAGE_OFFSET 0x000c
#define CPUX_L2C_L2RTOSR_PAGE_OFFSET 0x0014
#define MEMERR_L2C_L2RTOSR_MULTERR_MASK BIT(1)
#define MEMERR_L2C_L2RTOSR_ERR_MASK BIT(0)
#define CPUX_L2C_L2RTOALR_PAGE_OFFSET 0x0018
#define CPUX_L2C_L2RTOAHR_PAGE_OFFSET 0x001c
#define MEMERR_L2C_L2ESRA_PAGE_OFFSET 0x0804
/*
* Processor Module Domain (PMD) context - Context for a pair of processors.
* Each PMD consists of 2 CPUs and a shared L2 cache. Each CPU consists of
* its own L1 cache.
*/
struct xgene_edac_pmd_ctx {
struct list_head next;
struct device ddev;
char *name;
struct xgene_edac *edac;
struct edac_device_ctl_info *edac_dev;
void __iomem *pmd_csr;
u32 pmd;
int version;
};
static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev,
int cpu_idx)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *pg_f;
u32 val;
pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE;
val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
if (!val)
goto chk_lsu;
dev_err(edac_dev->dev,
"CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
MEMERR_CPU_ICFESR_ERRWAY_RD(val),
MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
MEMERR_CPU_ICFESR_ERRINFO_RD(val));
if (val & MEMERR_CPU_ICFESR_CERR_MASK)
dev_err(edac_dev->dev, "One or more correctable error\n");
if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
dev_err(edac_dev->dev, "Multiple correctable error\n");
switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
case 1:
dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
break;
case 2:
dev_err(edac_dev->dev, "Way select multiple hit\n");
break;
case 3:
dev_err(edac_dev->dev, "Physical tag parity error\n");
break;
case 4:
case 5:
dev_err(edac_dev->dev, "L1 data parity error\n");
break;
case 6:
dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
break;
}
/* Clear any HW errors */
writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
MEMERR_CPU_ICFESR_MULTCERR_MASK))
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
chk_lsu:
val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
if (!val)
goto chk_mmu;
dev_err(edac_dev->dev,
"CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
MEMERR_CPU_LSUESR_ERRWAY_RD(val),
MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
MEMERR_CPU_LSUESR_ERRINFO_RD(val));
if (val & MEMERR_CPU_LSUESR_CERR_MASK)
dev_err(edac_dev->dev, "One or more correctable error\n");
if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
dev_err(edac_dev->dev, "Multiple correctable error\n");
switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
case 0:
dev_err(edac_dev->dev, "Load tag error\n");
break;
case 1:
dev_err(edac_dev->dev, "Load data error\n");
break;
case 2:
dev_err(edac_dev->dev, "WSL multihit error\n");
break;
case 3:
dev_err(edac_dev->dev, "Store tag error\n");
break;
case 4:
dev_err(edac_dev->dev,
"DTB multihit from load pipeline error\n");
break;
case 5:
dev_err(edac_dev->dev,
"DTB multihit from store pipeline error\n");
break;
}
/* Clear any HW errors */
writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
MEMERR_CPU_LSUESR_MULTCERR_MASK))
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
chk_mmu:
val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
if (!val)
return;
dev_err(edac_dev->dev,
"CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
MEMERR_CPU_MMUESR_ERRWAY_RD(val),
MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
MEMERR_CPU_MMUESR_ERRINFO_RD(val),
val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : "ICF");
if (val & MEMERR_CPU_MMUESR_CERR_MASK)
dev_err(edac_dev->dev, "One or more correctable error\n");
if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
dev_err(edac_dev->dev, "Multiple correctable error\n");
switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
case 0:
dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
break;
case 1:
dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
break;
case 2:
dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
break;
case 3:
dev_err(edac_dev->dev, "TMO operation single bank error\n");
break;
case 4:
dev_err(edac_dev->dev, "Stage 2 UTB error\n");
break;
case 5:
dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
break;
case 6:
dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
break;
case 7:
dev_err(edac_dev->dev, "TMO operation multiple bank error\n");
break;
}
/* Clear any HW errors */
writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
}
static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *pg_d;
void __iomem *pg_e;
u32 val_hi;
u32 val_lo;
u32 val;
/* Check L2 */
pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
if (!val)
goto chk_l2c;
val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
dev_err(edac_dev->dev,
"PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
ctx->pmd, val, val_hi, val_lo);
dev_err(edac_dev->dev,
"ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
MEMERR_L2C_L2ESR_ERRSYN_RD(val),
MEMERR_L2C_L2ESR_ERRWAY_RD(val),
MEMERR_L2C_L2ESR_ERRCPU_RD(val),
MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
MEMERR_L2C_L2ESR_ERRACTION_RD(val));
if (val & MEMERR_L2C_L2ESR_ERR_MASK)
dev_err(edac_dev->dev, "One or more correctable error\n");
if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
dev_err(edac_dev->dev, "Multiple correctable error\n");
if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
dev_err(edac_dev->dev, "One or more uncorrectable error\n");
if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
dev_err(edac_dev->dev, "Multiple uncorrectable error\n");
switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
case 0:
dev_err(edac_dev->dev, "Outbound SDB parity error\n");
break;
case 1:
dev_err(edac_dev->dev, "Inbound SDB parity error\n");
break;
case 2:
dev_err(edac_dev->dev, "Tag ECC error\n");
break;
case 3:
dev_err(edac_dev->dev, "Data ECC error\n");
break;
}
/* Clear any HW errors */
writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
MEMERR_L2C_L2ESR_MULTICERR_MASK))
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
MEMERR_L2C_L2ESR_MULTUCERR_MASK))
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
chk_l2c:
/* Check if any memory request timed out on L2 cache */
pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
if (val) {
val_lo = readl(pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET);
val_hi = readl(pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET);
dev_err(edac_dev->dev,
"PMD%d L2C error L2C RTOSR 0x%08X @ 0x%08X.%08X\n",
ctx->pmd, val, val_hi, val_lo);
writel(val, pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
}
}
static void xgene_edac_pmd_check(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
unsigned int pcp_hp_stat;
int i;
xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat))
return;
/* Check CPU L1 error */
for (i = 0; i < MAX_CPU_PER_PMD; i++)
xgene_edac_pmd_l1_check(edac_dev, i);
/* Check CPU L2 error */
xgene_edac_pmd_l2_check(edac_dev);
}
static void xgene_edac_pmd_cpu_hw_cfg(struct edac_device_ctl_info *edac_dev,
int cpu)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE +
CPU_MEMERR_CPU_PAGE;
/*
* Enable CPU memory error:
* MEMERR_CPU_ICFESRA, MEMERR_CPU_LSUESRA, and MEMERR_CPU_MMUESRA
*/
writel(0x00000301, pg_f + MEMERR_CPU_ICFECR_PAGE_OFFSET);
writel(0x00000301, pg_f + MEMERR_CPU_LSUECR_PAGE_OFFSET);
writel(0x00000101, pg_f + MEMERR_CPU_MMUECR_PAGE_OFFSET);
}
static void xgene_edac_pmd_hw_cfg(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
/* Enable PMD memory error - MEMERR_L2C_L2ECR and L2C_L2RTOCR */
writel(0x00000703, pg_e + MEMERR_L2C_L2ECR_PAGE_OFFSET);
/* Configure L2C HW request time out feature if supported */
if (ctx->version > 1)
writel(0x00000119, pg_d + CPUX_L2C_L2RTOCR_PAGE_OFFSET);
}
static void xgene_edac_pmd_hw_ctl(struct edac_device_ctl_info *edac_dev,
bool enable)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
int i;
/* Enable PMD error interrupt */
if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
if (enable)
xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
PMD0_MERR_MASK << ctx->pmd);
else
xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
PMD0_MERR_MASK << ctx->pmd);
}
if (enable) {
xgene_edac_pmd_hw_cfg(edac_dev);
/* Two CPUs per a PMD */
for (i = 0; i < MAX_CPU_PER_PMD; i++)
xgene_edac_pmd_cpu_hw_cfg(edac_dev, i);
}
}
static ssize_t xgene_edac_pmd_l1_inject_ctrl_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *cpux_pg_f;
int i;
for (i = 0; i < MAX_CPU_PER_PMD; i++) {
cpux_pg_f = ctx->pmd_csr + i * CPU_CSR_STRIDE +
CPU_MEMERR_CPU_PAGE;
writel(MEMERR_CPU_ICFESR_MULTCERR_MASK |
MEMERR_CPU_ICFESR_CERR_MASK,
cpux_pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET);
writel(MEMERR_CPU_LSUESR_MULTCERR_MASK |
MEMERR_CPU_LSUESR_CERR_MASK,
cpux_pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET);
writel(MEMERR_CPU_MMUESR_MULTCERR_MASK |
MEMERR_CPU_MMUESR_CERR_MASK,
cpux_pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET);
}
return count;
}
static ssize_t xgene_edac_pmd_l2_inject_ctrl_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
writel(MEMERR_L2C_L2ESR_MULTUCERR_MASK |
MEMERR_L2C_L2ESR_MULTICERR_MASK |
MEMERR_L2C_L2ESR_UCERR_MASK |
MEMERR_L2C_L2ESR_ERR_MASK,
pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET);
return count;
}
static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = {
{
.open = simple_open,
.write = xgene_edac_pmd_l1_inject_ctrl_write,
.llseek = generic_file_llseek, },
{
.open = simple_open,
.write = xgene_edac_pmd_l2_inject_ctrl_write,
.llseek = generic_file_llseek, },
{ }
};
static void
xgene_edac_pmd_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
struct dentry *dbgfs_dir;
char name[10];
if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
return;
snprintf(name, sizeof(name), "PMD%d", ctx->pmd);
dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
if (!dbgfs_dir)
return;
edac_debugfs_create_file("l1_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
&xgene_edac_pmd_debug_inject_fops[0]);
edac_debugfs_create_file("l2_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
&xgene_edac_pmd_debug_inject_fops[1]);
}
static int xgene_edac_pmd_available(u32 efuse, int pmd)
{
return (efuse & (1 << pmd)) ? 0 : 1;
}
static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
int version)
{
struct edac_device_ctl_info *edac_dev;
struct xgene_edac_pmd_ctx *ctx;
struct resource res;
char edac_name[10];
u32 pmd;
int rc;
u32 val;
if (!devres_open_group(edac->dev, xgene_edac_pmd_add, GFP_KERNEL))
return -ENOMEM;
/* Determine if this PMD is disabled */
if (of_property_read_u32(np, "pmd-controller", &pmd)) {
dev_err(edac->dev, "no pmd-controller property\n");
rc = -ENODEV;
goto err_group;
}
rc = regmap_read(edac->efuse_map, 0, &val);
if (rc)
goto err_group;
if (!xgene_edac_pmd_available(val, pmd)) {
rc = -ENODEV;
goto err_group;
}
snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
edac_name, 1, "l2c", 1, 2, NULL,
0, edac_device_alloc_index());
if (!edac_dev) {
rc = -ENOMEM;
goto err_group;
}
ctx = edac_dev->pvt_info;
ctx->name = "xgene_pmd_err";
ctx->pmd = pmd;
ctx->edac = edac;
ctx->edac_dev = edac_dev;
ctx->ddev = *edac->dev;
ctx->version = version;
edac_dev->dev = &ctx->ddev;
edac_dev->ctl_name = ctx->name;
edac_dev->dev_name = ctx->name;
edac_dev->mod_name = EDAC_MOD_STR;
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) {
dev_err(edac->dev, "no PMD resource address\n");
goto err_free;
}
ctx->pmd_csr = devm_ioremap_resource(edac->dev, &res);
if (IS_ERR(ctx->pmd_csr)) {
dev_err(edac->dev,
"devm_ioremap_resource failed for PMD resource address\n");
rc = PTR_ERR(ctx->pmd_csr);
goto err_free;
}
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = xgene_edac_pmd_check;
xgene_edac_pmd_create_debugfs_nodes(edac_dev);
rc = edac_device_add_device(edac_dev);
if (rc > 0) {
dev_err(edac->dev, "edac_device_add_device failed\n");
rc = -ENOMEM;
goto err_free;
}
if (edac_op_state == EDAC_OPSTATE_INT)
edac_dev->op_state = OP_RUNNING_INTERRUPT;
list_add(&ctx->next, &edac->pmds);
xgene_edac_pmd_hw_ctl(edac_dev, 1);
devres_remove_group(edac->dev, xgene_edac_pmd_add);
dev_info(edac->dev, "X-Gene EDAC PMD%d registered\n", ctx->pmd);
return 0;
err_free:
edac_device_free_ctl_info(edac_dev);
err_group:
devres_release_group(edac->dev, xgene_edac_pmd_add);
return rc;
}
static int xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd)
{
struct edac_device_ctl_info *edac_dev = pmd->edac_dev;
xgene_edac_pmd_hw_ctl(edac_dev, 0);
edac_device_del_device(edac_dev->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
/* L3 Error device */
#define L3C_ESR (0x0A * 4)
#define L3C_ESR_DATATAG_MASK BIT(9)
#define L3C_ESR_MULTIHIT_MASK BIT(8)
#define L3C_ESR_UCEVICT_MASK BIT(6)
#define L3C_ESR_MULTIUCERR_MASK BIT(5)
#define L3C_ESR_MULTICERR_MASK BIT(4)
#define L3C_ESR_UCERR_MASK BIT(3)
#define L3C_ESR_CERR_MASK BIT(2)
#define L3C_ESR_UCERRINTR_MASK BIT(1)
#define L3C_ESR_CERRINTR_MASK BIT(0)
#define L3C_ECR (0x0B * 4)
#define L3C_ECR_UCINTREN BIT(3)
#define L3C_ECR_CINTREN BIT(2)
#define L3C_UCERREN BIT(1)
#define L3C_CERREN BIT(0)
#define L3C_ELR (0x0C * 4)
#define L3C_ELR_ERRSYN(src) ((src & 0xFF800000) >> 23)
#define L3C_ELR_ERRWAY(src) ((src & 0x007E0000) >> 17)
#define L3C_ELR_AGENTID(src) ((src & 0x0001E000) >> 13)
#define L3C_ELR_ERRGRP(src) ((src & 0x00000F00) >> 8)
#define L3C_ELR_OPTYPE(src) ((src & 0x000000F0) >> 4)
#define L3C_ELR_PADDRHIGH(src) (src & 0x0000000F)
#define L3C_AELR (0x0D * 4)
#define L3C_BELR (0x0E * 4)
#define L3C_BELR_BANK(src) (src & 0x0000000F)
struct xgene_edac_dev_ctx {
struct list_head next;
struct device ddev;
char *name;
struct xgene_edac *edac;
struct edac_device_ctl_info *edac_dev;
int edac_idx;
void __iomem *dev_csr;
int version;
};
/*
* Version 1 of the L3 controller has broken single bit correctable logic for
* certain error syndromes. Log them as uncorrectable in that case.
*/
static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
{
if (l3cesr & L3C_ESR_DATATAG_MASK) {
switch (L3C_ELR_ERRSYN(l3celr)) {
case 0x13C:
case 0x0B4:
case 0x007:
case 0x00D:
case 0x00E:
case 0x019:
case 0x01A:
case 0x01C:
case 0x04E:
case 0x041:
return true;
}
} else if (L3C_ELR_ERRWAY(l3celr) == 9)
return true;
return false;
}
static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
u32 l3cesr;
u32 l3celr;
u32 l3caelr;
u32 l3cbelr;
l3cesr = readl(ctx->dev_csr + L3C_ESR);
if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK)))
return;
if (l3cesr & L3C_ESR_UCERR_MASK)
dev_err(edac_dev->dev, "L3C uncorrectable error\n");
if (l3cesr & L3C_ESR_CERR_MASK)
dev_warn(edac_dev->dev, "L3C correctable error\n");
l3celr = readl(ctx->dev_csr + L3C_ELR);
l3caelr = readl(ctx->dev_csr + L3C_AELR);
l3cbelr = readl(ctx->dev_csr + L3C_BELR);
if (l3cesr & L3C_ESR_MULTIHIT_MASK)
dev_err(edac_dev->dev, "L3C multiple hit error\n");
if (l3cesr & L3C_ESR_UCEVICT_MASK)
dev_err(edac_dev->dev,
"L3C dropped eviction of line with error\n");
if (l3cesr & L3C_ESR_MULTIUCERR_MASK)
dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n");
if (l3cesr & L3C_ESR_DATATAG_MASK)
dev_err(edac_dev->dev,
"L3C data error syndrome 0x%X group 0x%X\n",
L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr));
else
dev_err(edac_dev->dev,
"L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n",
L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr),
L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr));
/*
* NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr).
* Address [37:6] in l3caelr. Lower 6 bits are zero.
*/
dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n",
L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26),
(l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr));
dev_err(edac_dev->dev,
"L3C error status register value 0x%X\n", l3cesr);
/* Clear L3C error interrupt */
writel(0, ctx->dev_csr + L3C_ESR);
if (ctx->version <= 1 &&
xgene_edac_l3_promote_to_uc_err(l3cesr, l3celr)) {
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
return;
}
if (l3cesr & L3C_ESR_CERR_MASK)
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
if (l3cesr & L3C_ESR_UCERR_MASK)
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
static void xgene_edac_l3_hw_init(struct edac_device_ctl_info *edac_dev,
bool enable)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
u32 val;
val = readl(ctx->dev_csr + L3C_ECR);
val |= L3C_UCERREN | L3C_CERREN;
/* On disable, we just disable interrupt but keep error enabled */
if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
if (enable)
val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN;
else
val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN);
}
writel(val, ctx->dev_csr + L3C_ECR);
if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
/* Enable/disable L3 error top level interrupt */
if (enable) {
xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
L3C_UNCORR_ERR_MASK);
xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
L3C_CORR_ERR_MASK);
} else {
xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
L3C_UNCORR_ERR_MASK);
xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
L3C_CORR_ERR_MASK);
}
}
}
static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
/* Generate all errors */
writel(0xFFFFFFFF, ctx->dev_csr + L3C_ESR);
return count;
}
static const struct file_operations xgene_edac_l3_debug_inject_fops = {
.open = simple_open,
.write = xgene_edac_l3_inject_ctrl_write,
.llseek = generic_file_llseek
};
static void
xgene_edac_l3_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
struct dentry *dbgfs_dir;
char name[10];
if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
return;
snprintf(name, sizeof(name), "l3c%d", ctx->edac_idx);
dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
if (!dbgfs_dir)
return;
debugfs_create_file("l3_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
&xgene_edac_l3_debug_inject_fops);
}
static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
int version)
{
struct edac_device_ctl_info *edac_dev;
struct xgene_edac_dev_ctx *ctx;
struct resource res;
void __iomem *dev_csr;
int edac_idx;
int rc = 0;
if (!devres_open_group(edac->dev, xgene_edac_l3_add, GFP_KERNEL))
return -ENOMEM;
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) {
dev_err(edac->dev, "no L3 resource address\n");
goto err_release_group;
}
dev_csr = devm_ioremap_resource(edac->dev, &res);
if (IS_ERR(dev_csr)) {
dev_err(edac->dev,
"devm_ioremap_resource failed for L3 resource address\n");
rc = PTR_ERR(dev_csr);
goto err_release_group;
}
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
"l3c", 1, "l3c", 1, 0, NULL, 0,
edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
}
ctx = edac_dev->pvt_info;
ctx->dev_csr = dev_csr;
ctx->name = "xgene_l3_err";
ctx->edac_idx = edac_idx;
ctx->edac = edac;
ctx->edac_dev = edac_dev;
ctx->ddev = *edac->dev;
ctx->version = version;
edac_dev->dev = &ctx->ddev;
edac_dev->ctl_name = ctx->name;
edac_dev->dev_name = ctx->name;
edac_dev->mod_name = EDAC_MOD_STR;
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = xgene_edac_l3_check;
xgene_edac_l3_create_debugfs_nodes(edac_dev);
rc = edac_device_add_device(edac_dev);
if (rc > 0) {
dev_err(edac->dev, "failed edac_device_add_device()\n");
rc = -ENOMEM;
goto err_ctl_free;
}
if (edac_op_state == EDAC_OPSTATE_INT)
edac_dev->op_state = OP_RUNNING_INTERRUPT;
list_add(&ctx->next, &edac->l3s);
xgene_edac_l3_hw_init(edac_dev, 1);
devres_remove_group(edac->dev, xgene_edac_l3_add);
dev_info(edac->dev, "X-Gene EDAC L3 registered\n");
return 0;
err_ctl_free:
edac_device_free_ctl_info(edac_dev);
err_release_group:
devres_release_group(edac->dev, xgene_edac_l3_add);
return rc;
}
static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
{
struct edac_device_ctl_info *edac_dev = l3->edac_dev;
xgene_edac_l3_hw_init(edac_dev, 0);
edac_device_del_device(l3->edac->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
/* SoC error device */
#define IOBAXIS0TRANSERRINTSTS 0x0000
#define IOBAXIS0_M_ILLEGAL_ACCESS_MASK BIT(1)
#define IOBAXIS0_ILLEGAL_ACCESS_MASK BIT(0)
#define IOBAXIS0TRANSERRINTMSK 0x0004
#define IOBAXIS0TRANSERRREQINFOL 0x0008
#define IOBAXIS0TRANSERRREQINFOH 0x000c
#define REQTYPE_RD(src) (((src) & BIT(0)))
#define ERRADDRH_RD(src) (((src) & 0xffc00000) >> 22)
#define IOBAXIS1TRANSERRINTSTS 0x0010
#define IOBAXIS1TRANSERRINTMSK 0x0014
#define IOBAXIS1TRANSERRREQINFOL 0x0018
#define IOBAXIS1TRANSERRREQINFOH 0x001c
#define IOBPATRANSERRINTSTS 0x0020
#define IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7)
#define IOBPA_REQIDRAM_CORRUPT_MASK BIT(6)
#define IOBPA_M_TRANS_CORRUPT_MASK BIT(5)
#define IOBPA_TRANS_CORRUPT_MASK BIT(4)
#define IOBPA_M_WDATA_CORRUPT_MASK BIT(3)
#define IOBPA_WDATA_CORRUPT_MASK BIT(2)
#define IOBPA_M_RDATA_CORRUPT_MASK BIT(1)
#define IOBPA_RDATA_CORRUPT_MASK BIT(0)
#define IOBBATRANSERRINTSTS 0x0030
#define M_ILLEGAL_ACCESS_MASK BIT(15)
#define ILLEGAL_ACCESS_MASK BIT(14)
#define M_WIDRAM_CORRUPT_MASK BIT(13)
#define WIDRAM_CORRUPT_MASK BIT(12)
#define M_RIDRAM_CORRUPT_MASK BIT(11)
#define RIDRAM_CORRUPT_MASK BIT(10)
#define M_TRANS_CORRUPT_MASK BIT(9)
#define TRANS_CORRUPT_MASK BIT(8)
#define M_WDATA_CORRUPT_MASK BIT(7)
#define WDATA_CORRUPT_MASK BIT(6)
#define M_RBM_POISONED_REQ_MASK BIT(5)
#define RBM_POISONED_REQ_MASK BIT(4)
#define M_XGIC_POISONED_REQ_MASK BIT(3)
#define XGIC_POISONED_REQ_MASK BIT(2)
#define M_WRERR_RESP_MASK BIT(1)
#define WRERR_RESP_MASK BIT(0)
#define IOBBATRANSERRREQINFOL 0x0038
#define IOBBATRANSERRREQINFOH 0x003c
#define REQTYPE_F2_RD(src) ((src) & BIT(0))
#define ERRADDRH_F2_RD(src) (((src) & 0xffc00000) >> 22)
#define IOBBATRANSERRCSWREQID 0x0040
#define XGICTRANSERRINTSTS 0x0050
#define M_WR_ACCESS_ERR_MASK BIT(3)
#define WR_ACCESS_ERR_MASK BIT(2)
#define M_RD_ACCESS_ERR_MASK BIT(1)
#define RD_ACCESS_ERR_MASK BIT(0)
#define XGICTRANSERRINTMSK 0x0054
#define XGICTRANSERRREQINFO 0x0058
#define REQTYPE_MASK BIT(26)
#define ERRADDR_RD(src) ((src) & 0x03ffffff)
#define GLBL_ERR_STS 0x0800
#define MDED_ERR_MASK BIT(3)
#define DED_ERR_MASK BIT(2)
#define MSEC_ERR_MASK BIT(1)
#define SEC_ERR_MASK BIT(0)
#define GLBL_SEC_ERRL 0x0810
#define GLBL_SEC_ERRH 0x0818
#define GLBL_MSEC_ERRL 0x0820
#define GLBL_MSEC_ERRH 0x0828
#define GLBL_DED_ERRL 0x0830
#define GLBL_DED_ERRLMASK 0x0834
#define GLBL_DED_ERRH 0x0838
#define GLBL_DED_ERRHMASK 0x083c
#define GLBL_MDED_ERRL 0x0840
#define GLBL_MDED_ERRLMASK 0x0844
#define GLBL_MDED_ERRH 0x0848
#define GLBL_MDED_ERRHMASK 0x084c
/* IO Bus Registers */
#define RBCSR 0x0000
#define STICKYERR_MASK BIT(0)
#define RBEIR 0x0008
#define AGENT_OFFLINE_ERR_MASK BIT(30)
#define UNIMPL_RBPAGE_ERR_MASK BIT(29)
#define WORD_ALIGNED_ERR_MASK BIT(28)
#define PAGE_ACCESS_ERR_MASK BIT(27)
#define WRITE_ACCESS_MASK BIT(26)
static const char * const soc_mem_err_v1[] = {
"10GbE0",
"10GbE1",
"Security",
"SATA45",
"SATA23/ETH23",
"SATA01/ETH01",
"USB1",
"USB0",
"QML",
"QM0",
"QM1 (XGbE01)",
"PCIE4",
"PCIE3",
"PCIE2",
"PCIE1",
"PCIE0",
"CTX Manager",
"OCM",
"1GbE",
"CLE",
"AHBC",
"PktDMA",
"GFC",
"MSLIM",
"10GbE2",
"10GbE3",
"QM2 (XGbE23)",
"IOB",
"unknown",
"unknown",
"unknown",
"unknown",
};
static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
u32 err_addr_lo;
u32 err_addr_hi;
u32 reg;
u32 info;
/* GIC transaction error interrupt */
reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS);
if (!reg)
goto chk_iob_err;
dev_err(edac_dev->dev, "XGIC transaction error\n");
if (reg & RD_ACCESS_ERR_MASK)
dev_err(edac_dev->dev, "XGIC read size error\n");
if (reg & M_RD_ACCESS_ERR_MASK)
dev_err(edac_dev->dev, "Multiple XGIC read size error\n");
if (reg & WR_ACCESS_ERR_MASK)
dev_err(edac_dev->dev, "XGIC write size error\n");
if (reg & M_WR_ACCESS_ERR_MASK)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
chk_iob_err:
/* IOB memory error */
reg = readl(ctx->dev_csr + GLBL_ERR_STS);
if (!reg)
return;
if (reg & SEC_ERR_MASK) {
err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL);
err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH);
dev_err(edac_dev->dev,
"IOB single-bit correctable memory at 0x%08X.%08X error\n",
err_addr_lo, err_addr_hi);
writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL);
writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH);
}
if (reg & MSEC_ERR_MASK) {
err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL);
err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH);
dev_err(edac_dev->dev,
"IOB multiple single-bit correctable memory at 0x%08X.%08X error\n",
err_addr_lo, err_addr_hi);
writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL);
writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH);
}
if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK))
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
if (reg & DED_ERR_MASK) {
err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL);
err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH);
dev_err(edac_dev->dev,
"IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
err_addr_lo, err_addr_hi);
writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL);
writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH);
}
if (reg & MDED_ERR_MASK) {
err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL);
err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH);
dev_err(edac_dev->dev,
"Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
err_addr_lo, err_addr_hi);
writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL);
writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH);
}
if (reg & (DED_ERR_MASK | MDED_ERR_MASK))
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
u32 err_addr_lo;
u32 err_addr_hi;
u32 reg;
/* If the register bus resource isn't available, just skip it */
if (!ctx->edac->rb_map)
goto rb_skip;
/*
* Check RB access errors
* 1. Out of range
* 2. Un-implemented page
* 3. Un-aligned access
* 4. Offline slave IP
*/
if (regmap_read(ctx->edac->rb_map, RBCSR, ®))
return;
if (reg & STICKYERR_MASK) {
bool write;
dev_err(edac_dev->dev, "IOB bus access error(s)\n");
if (regmap_read(ctx->edac->rb_map, RBEIR, ®))
return;
write = reg & WRITE_ACCESS_MASK ? 1 : 0;
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
write ? "write" : "read");
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
write ? "write" : "read");
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
write ? "write" : "read");
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
write ? "write" : "read");
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
return;
}
rb_skip:
/* IOB Bridge agent transaction error interrupt */
reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
if (!reg)
return;
dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n");
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA write response error\n");
if (reg & M_WRERR_RESP_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA write response error\n");
if (reg & XGIC_POISONED_REQ_MASK)
dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n");
if (reg & M_XGIC_POISONED_REQ_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA XGIC poisoned write error\n");
if (reg & RBM_POISONED_REQ_MASK)
dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n");
if (reg & M_RBM_POISONED_REQ_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA RBM poisoned write error\n");
if (reg & WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB BA write error\n");
if (reg & M_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "Multiple IOB BA write error\n");
if (reg & TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB BA transaction error\n");
if (reg & M_TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n");
if (reg & RIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev,
"IOB BA RDIDRAM read transaction ID error\n");
if (reg & M_RIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA RDIDRAM read transaction ID error\n");
if (reg & WIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev,
"IOB BA RDIDRAM write transaction ID error\n");
if (reg & M_WIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA RDIDRAM write transaction ID error\n");
if (reg & ILLEGAL_ACCESS_MASK)
dev_err(edac_dev->dev,
"IOB BA XGIC/RB illegal access error\n");
if (reg & M_ILLEGAL_ACCESS_MASK)
dev_err(edac_dev->dev,
"Multiple IOB BA XGIC/RB illegal access error\n");
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
readl(ctx->dev_csr + IOBBATRANSERRCSWREQID));
writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS);
}
static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
u32 err_addr_lo;
u32 err_addr_hi;
u32 reg;
/* IOB Processing agent transaction error interrupt */
reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
if (!reg)
goto chk_iob_axi0;
dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
if (reg & IOBPA_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
"Multiple IOB PA read data RAM error\n");
if (reg & IOBPA_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
"Multiple IOB PA write data RAM error\n");
if (reg & IOBPA_TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction error\n");
if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "Multiple IOB PA transaction error\n");
if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev,
"Multiple IOB PA transaction ID RAM error\n");
writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS);
chk_iob_axi0:
/* IOB AXI0 Error */
reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
if (!reg)
goto chk_iob_axi1;
err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH);
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
REQTYPE_RD(err_addr_hi) ? "read" : "write",
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
chk_iob_axi1:
/* IOB AXI1 Error */
reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
if (!reg)
return;
err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH);
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
REQTYPE_RD(err_addr_hi) ? "read" : "write",
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
const char * const *soc_mem_err = NULL;
u32 pcp_hp_stat;
u32 pcp_lp_stat;
u32 reg;
int i;
xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
xgene_edac_pcp_rd(ctx->edac, MEMERRINTSTS, ®);
if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
(pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
return;
if (pcp_hp_stat & IOB_XGIC_ERR_MASK)
xgene_edac_iob_gic_report(edac_dev);
if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK))
xgene_edac_rb_report(edac_dev);
if (pcp_hp_stat & IOB_PA_ERR_MASK)
xgene_edac_pa_report(edac_dev);
if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) {
dev_info(edac_dev->dev,
"CSW switch trace correctable memory parity error\n");
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
}
if (!reg)
return;
if (ctx->version == 1)
soc_mem_err = soc_mem_err_v1;
if (!soc_mem_err) {
dev_err(edac_dev->dev, "SoC memory parity error 0x%08X\n",
reg);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
return;
}
for (i = 0; i < 31; i++) {
if (reg & (1 << i)) {
dev_err(edac_dev->dev, "%s memory parity error\n",
soc_mem_err[i]);
edac_device_handle_ue(edac_dev, 0, 0,
edac_dev->ctl_name);
}
}
}
static void xgene_edac_soc_hw_init(struct edac_device_ctl_info *edac_dev,
bool enable)
{
struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
/* Enable SoC IP error interrupt */
if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
if (enable) {
xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
IOB_PA_ERR_MASK |
IOB_BA_ERR_MASK |
IOB_XGIC_ERR_MASK |
IOB_RB_ERR_MASK);
xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
CSW_SWITCH_TRACE_ERR_MASK);
} else {
xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
IOB_PA_ERR_MASK |
IOB_BA_ERR_MASK |
IOB_XGIC_ERR_MASK |
IOB_RB_ERR_MASK);
xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
CSW_SWITCH_TRACE_ERR_MASK);
}
writel(enable ? 0x0 : 0xFFFFFFFF,
ctx->dev_csr + IOBAXIS0TRANSERRINTMSK);
writel(enable ? 0x0 : 0xFFFFFFFF,
ctx->dev_csr + IOBAXIS1TRANSERRINTMSK);
writel(enable ? 0x0 : 0xFFFFFFFF,
ctx->dev_csr + XGICTRANSERRINTMSK);
xgene_edac_pcp_setbits(ctx->edac, MEMERRINTMSK,
enable ? 0x0 : 0xFFFFFFFF);
}
}
static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
int version)
{
struct edac_device_ctl_info *edac_dev;
struct xgene_edac_dev_ctx *ctx;
void __iomem *dev_csr;
struct resource res;
int edac_idx;
int rc;
if (!devres_open_group(edac->dev, xgene_edac_soc_add, GFP_KERNEL))
return -ENOMEM;
rc = of_address_to_resource(np, 0, &res);
if (rc < 0) {
dev_err(edac->dev, "no SoC resource address\n");
goto err_release_group;
}
dev_csr = devm_ioremap_resource(edac->dev, &res);
if (IS_ERR(dev_csr)) {
dev_err(edac->dev,
"devm_ioremap_resource failed for soc resource address\n");
rc = PTR_ERR(dev_csr);
goto err_release_group;
}
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
"SOC", 1, "SOC", 1, 2, NULL, 0,
edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
}
ctx = edac_dev->pvt_info;
ctx->dev_csr = dev_csr;
ctx->name = "xgene_soc_err";
ctx->edac_idx = edac_idx;
ctx->edac = edac;
ctx->edac_dev = edac_dev;
ctx->ddev = *edac->dev;
ctx->version = version;
edac_dev->dev = &ctx->ddev;
edac_dev->ctl_name = ctx->name;
edac_dev->dev_name = ctx->name;
edac_dev->mod_name = EDAC_MOD_STR;
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = xgene_edac_soc_check;
rc = edac_device_add_device(edac_dev);
if (rc > 0) {
dev_err(edac->dev, "failed edac_device_add_device()\n");
rc = -ENOMEM;
goto err_ctl_free;
}
if (edac_op_state == EDAC_OPSTATE_INT)
edac_dev->op_state = OP_RUNNING_INTERRUPT;
list_add(&ctx->next, &edac->socs);
xgene_edac_soc_hw_init(edac_dev, 1);
devres_remove_group(edac->dev, xgene_edac_soc_add);
dev_info(edac->dev, "X-Gene EDAC SoC registered\n");
return 0;
err_ctl_free:
edac_device_free_ctl_info(edac_dev);
err_release_group:
devres_release_group(edac->dev, xgene_edac_soc_add);
return rc;
}
static int xgene_edac_soc_remove(struct xgene_edac_dev_ctx *soc)
{
struct edac_device_ctl_info *edac_dev = soc->edac_dev;
xgene_edac_soc_hw_init(edac_dev, 0);
edac_device_del_device(soc->edac->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
{
struct xgene_edac *ctx = dev_id;
struct xgene_edac_pmd_ctx *pmd;
struct xgene_edac_dev_ctx *node;
unsigned int pcp_hp_stat;
unsigned int pcp_lp_stat;
xgene_edac_pcp_rd(ctx, PCPHPERRINTSTS, &pcp_hp_stat);
xgene_edac_pcp_rd(ctx, PCPLPERRINTSTS, &pcp_lp_stat);
if ((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
(MCU_CTL_ERR_MASK & pcp_hp_stat) ||
(MCU_CORR_ERR_MASK & pcp_lp_stat)) {
struct xgene_edac_mc_ctx *mcu;
list_for_each_entry(mcu, &ctx->mcus, next)
xgene_edac_mc_check(mcu->mci);
}
list_for_each_entry(pmd, &ctx->pmds, next) {
if ((PMD0_MERR_MASK << pmd->pmd) & pcp_hp_stat)
xgene_edac_pmd_check(pmd->edac_dev);
}
list_for_each_entry(node, &ctx->l3s, next)
xgene_edac_l3_check(node->edac_dev);
list_for_each_entry(node, &ctx->socs, next)
xgene_edac_soc_check(node->edac_dev);
return IRQ_HANDLED;
}
static int xgene_edac_probe(struct platform_device *pdev)
{
struct xgene_edac *edac;
struct device_node *child;
struct resource *res;
int rc;
edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
if (!edac)
return -ENOMEM;
edac->dev = &pdev->dev;
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->mcus);
INIT_LIST_HEAD(&edac->pmds);
INIT_LIST_HEAD(&edac->l3s);
INIT_LIST_HEAD(&edac->socs);
spin_lock_init(&edac->lock);
mutex_init(&edac->mc_lock);
edac->csw_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"regmap-csw");
if (IS_ERR(edac->csw_map)) {
dev_err(edac->dev, "unable to get syscon regmap csw\n");
rc = PTR_ERR(edac->csw_map);
goto out_err;
}
edac->mcba_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"regmap-mcba");
if (IS_ERR(edac->mcba_map)) {
dev_err(edac->dev, "unable to get syscon regmap mcba\n");
rc = PTR_ERR(edac->mcba_map);
goto out_err;
}
edac->mcbb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"regmap-mcbb");
if (IS_ERR(edac->mcbb_map)) {
dev_err(edac->dev, "unable to get syscon regmap mcbb\n");
rc = PTR_ERR(edac->mcbb_map);
goto out_err;
}
edac->efuse_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"regmap-efuse");
if (IS_ERR(edac->efuse_map)) {
dev_err(edac->dev, "unable to get syscon regmap efuse\n");
rc = PTR_ERR(edac->efuse_map);
goto out_err;
}
/*
* NOTE: The register bus resource is optional for compatibility
* reason.
*/
edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"regmap-rb");
if (IS_ERR(edac->rb_map)) {
dev_warn(edac->dev, "missing syscon regmap rb\n");
edac->rb_map = NULL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(edac->pcp_csr)) {
dev_err(&pdev->dev, "no PCP resource address\n");
rc = PTR_ERR(edac->pcp_csr);
goto out_err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
int irq;
int i;
for (i = 0; i < 3; i++) {
irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
rc = irq;
goto out_err;
}
rc = devm_request_irq(&pdev->dev, irq,
xgene_edac_isr, IRQF_SHARED,
dev_name(&pdev->dev), edac);
if (rc) {
dev_err(&pdev->dev,
"Could not request IRQ %d\n", irq);
goto out_err;
}
}
}
edac->dfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
if (of_device_is_compatible(child, "apm,xgene-edac-mc"))
xgene_edac_mc_add(edac, child);
if (of_device_is_compatible(child, "apm,xgene-edac-pmd"))
xgene_edac_pmd_add(edac, child, 1);
if (of_device_is_compatible(child, "apm,xgene-edac-pmd-v2"))
xgene_edac_pmd_add(edac, child, 2);
if (of_device_is_compatible(child, "apm,xgene-edac-l3"))
xgene_edac_l3_add(edac, child, 1);
if (of_device_is_compatible(child, "apm,xgene-edac-l3-v2"))
xgene_edac_l3_add(edac, child, 2);
if (of_device_is_compatible(child, "apm,xgene-edac-soc"))
xgene_edac_soc_add(edac, child, 0);
if (of_device_is_compatible(child, "apm,xgene-edac-soc-v1"))
xgene_edac_soc_add(edac, child, 1);
}
return 0;
out_err:
return rc;
}
static int xgene_edac_remove(struct platform_device *pdev)
{
struct xgene_edac *edac = dev_get_drvdata(&pdev->dev);
struct xgene_edac_mc_ctx *mcu;
struct xgene_edac_mc_ctx *temp_mcu;
struct xgene_edac_pmd_ctx *pmd;
struct xgene_edac_pmd_ctx *temp_pmd;
struct xgene_edac_dev_ctx *node;
struct xgene_edac_dev_ctx *temp_node;
list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next)
xgene_edac_mc_remove(mcu);
list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next)
xgene_edac_pmd_remove(pmd);
list_for_each_entry_safe(node, temp_node, &edac->l3s, next)
xgene_edac_l3_remove(node);
list_for_each_entry_safe(node, temp_node, &edac->socs, next)
xgene_edac_soc_remove(node);
return 0;
}
static const struct of_device_id xgene_edac_of_match[] = {
{ .compatible = "apm,xgene-edac" },
{},
};
MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
static struct platform_driver xgene_edac_driver = {
.probe = xgene_edac_probe,
.remove = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
.of_match_table = xgene_edac_of_match,
},
};
static int __init xgene_edac_init(void)
{
int rc;
if (ghes_get_devices())
return -EBUSY;
/* Make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
rc = platform_driver_register(&xgene_edac_driver);
if (rc) {
edac_printk(KERN_ERR, EDAC_MOD_STR,
"EDAC fails to register\n");
goto reg_failed;
}
return 0;
reg_failed:
return rc;
}
module_init(xgene_edac_init);
static void __exit xgene_edac_exit(void)
{
platform_driver_unregister(&xgene_edac_driver);
}
module_exit(xgene_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Feng Kan <[email protected]>");
MODULE_DESCRIPTION("APM X-Gene EDAC driver");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state,
"EDAC error reporting state: 0=Poll, 2=Interrupt");
| linux-master | drivers/edac/xgene_edac.c |
/*
* Intel 82860 Memory Controller kernel module
* (C) 2005 Red Hat (http://www.redhat.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Ben Woodard <[email protected]>
* shamelessly copied from and based upon the edac_i82875 driver
* by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "i82860_edac"
#define i82860_printk(level, fmt, arg...) \
edac_printk(level, "i82860", fmt, ##arg)
#define i82860_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_82860_0
#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
#define I82860_MCHCFG 0x50
#define I82860_GBA 0x60
#define I82860_GBA_MASK 0x7FF
#define I82860_GBA_SHIFT 24
#define I82860_ERRSTS 0xC8
#define I82860_EAP 0xE4
#define I82860_DERRCTL_STS 0xE2
enum i82860_chips {
I82860 = 0,
};
struct i82860_dev_info {
const char *ctl_name;
};
struct i82860_error_info {
u16 errsts;
u32 eap;
u16 derrsyn;
u16 errsts2;
};
static const struct i82860_dev_info i82860_devs[] = {
[I82860] = {
.ctl_name = "i82860"},
};
static struct pci_dev *mci_pdev; /* init dev: in case that AGP code
* has already registered driver
*/
static struct edac_pci_ctl_info *i82860_pci;
static void i82860_get_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
pci_read_config_dword(pdev, I82860_EAP, &info->eap);
pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
/*
* If the error is the same for both reads then the first set of reads
* is valid. If there is a change then there is a CE no info and the
* second set of reads is valid and should be UE info.
*/
if (!(info->errsts2 & 0x0003))
return;
if ((info->errsts ^ info->errsts2) & 0x0003) {
pci_read_config_dword(pdev, I82860_EAP, &info->eap);
pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
}
}
static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info,
int handle_errors)
{
struct dimm_info *dimm;
int row;
if (!(info->errsts2 & 0x0003))
return 0;
if (!handle_errors)
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
dimm = mci->csrows[row]->channels[0]->dimm;
if (info->errsts & 0x0002)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
info->eap, 0, 0,
dimm->location[0], dimm->location[1], -1,
"i82860 UE", "");
else
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
info->eap, 0, info->derrsyn,
dimm->location[0], dimm->location[1], -1,
"i82860 CE", "");
return 1;
}
static void i82860_check(struct mem_ctl_info *mci)
{
struct i82860_error_info info;
i82860_get_error_info(mci, &info);
i82860_process_error_info(mci, &info, 1);
}
static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
{
unsigned long last_cumul_size;
u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
struct dimm_info *dimm;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
mchcfg_ddim = mchcfg_ddim & 0x180;
last_cumul_size = 0;
/* The group row boundary (GRA) reg values are boundary address
* for each DRAM row with a granularity of 16MB. GRA regs are
* cumulative; therefore GRA15 will contain the total memory contained
* in all eight rows.
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
dimm->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
dimm->mtype = MEM_RMBS;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i82860_error_info discard;
/*
* RDRAM has channels but these don't map onto the csrow abstraction.
* According with the datasheet, there are 2 Rambus channels, supporting
* up to 16 direct RDRAM devices.
* The device groups from the GRA registers seem to map reasonably
* well onto the notion of a chip select row.
* There are 16 GRA registers and since the name is associated with
* the channel and the GRA registers map to physical devices so we are
* going to make 1 channel for group.
*/
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = 2;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = 8;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82860_check;
mci->ctl_page_to_phys = NULL;
i82860_init_csrows(mci, pdev);
i82860_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* allocating generic PCI control info */
i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i82860_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int i82860_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "\n");
i82860_printk(KERN_INFO, "i82860 init one\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i82860_probe1(pdev, ent->driver_data);
if (rc == 0)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i82860_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (i82860_pci)
edac_pci_release_generic_ctl(i82860_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
edac_mc_free(mci);
}
static const struct pci_device_id i82860_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82860},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
static struct pci_driver i82860_driver = {
.name = EDAC_MOD_STR,
.probe = i82860_init_one,
.remove = i82860_remove_one,
.id_table = i82860_pci_tbl,
};
static int __init i82860_init(void)
{
int pci_rc;
edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
goto fail0;
if (!mci_pdev) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82860_0, NULL);
if (mci_pdev == NULL) {
edac_dbg(0, "860 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "860 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i82860_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i82860_exit(void)
{
edac_dbg(3, "\n");
pci_unregister_driver(&i82860_driver);
pci_dev_put(mci_pdev);
}
module_init(i82860_init);
module_exit(i82860_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) Ben Woodard <[email protected]>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i82860_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "edac_module.h"
static struct workqueue_struct *wq;
bool edac_queue_work(struct delayed_work *work, unsigned long delay)
{
return queue_delayed_work(wq, work, delay);
}
EXPORT_SYMBOL_GPL(edac_queue_work);
bool edac_mod_work(struct delayed_work *work, unsigned long delay)
{
return mod_delayed_work(wq, work, delay);
}
EXPORT_SYMBOL_GPL(edac_mod_work);
bool edac_stop_work(struct delayed_work *work)
{
bool ret;
ret = cancel_delayed_work_sync(work);
flush_workqueue(wq);
return ret;
}
EXPORT_SYMBOL_GPL(edac_stop_work);
int edac_workqueue_setup(void)
{
wq = alloc_ordered_workqueue("edac-poller", WQ_MEM_RECLAIM);
if (!wq)
return -ENODEV;
else
return 0;
}
void edac_workqueue_teardown(void)
{
destroy_workqueue(wq);
wq = NULL;
}
| linux-master | drivers/edac/wq.c |
/*
* Freescale Memory Controller kernel module
*
* Author: York Sun <[email protected]>
*
* Copyright 2016 NXP Semiconductor
*
* Derived from mpc85xx_edac.c
* Author: Dave Jiang <[email protected]>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "edac_module.h"
#include "fsl_ddr_edac.h"
static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
{ .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
static struct platform_driver fsl_ddr_mc_err_driver = {
.probe = fsl_mc_err_probe,
.remove = fsl_mc_err_remove,
.driver = {
.name = "fsl_ddr_mc_err",
.of_match_table = fsl_ddr_mc_err_of_match,
},
};
static int __init fsl_ddr_mc_init(void)
{
int res;
if (ghes_get_devices())
return -EBUSY;
/* make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
res = platform_driver_register(&fsl_ddr_mc_err_driver);
if (res) {
pr_err("MC fails to register\n");
return res;
}
return 0;
}
module_init(fsl_ddr_mc_init);
static void __exit fsl_ddr_mc_exit(void)
{
platform_driver_unregister(&fsl_ddr_mc_err_driver);
}
module_exit(fsl_ddr_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
| linux-master | drivers/edac/layerscape_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Intel i7 core/Nehalem Memory Controller kernel module
*
* This driver supports the memory controllers found on the Intel
* processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
* Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
* and Westmere-EP.
*
* Copyright (c) 2009-2010 by:
* Mauro Carvalho Chehab
*
* Red Hat Inc. https://www.redhat.com
*
* Forked and adapted from the i5400_edac driver
*
* Based on the following public Intel datasheets:
* Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
* Datasheet, Volume 2:
* http://download.intel.com/design/processor/datashts/320835.pdf
* Intel Xeon Processor 5500 Series Datasheet Volume 2
* http://www.intel.com/Assets/PDF/datasheet/321322.pdf
* also available at:
* http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <asm/mce.h>
#include <asm/processor.h>
#include <asm/div64.h>
#include "edac_module.h"
/* Static vars */
static LIST_HEAD(i7core_edac_list);
static DEFINE_MUTEX(i7core_edac_lock);
static int probed;
static int use_pci_fixup;
module_param(use_pci_fixup, int, 0444);
MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
/*
* This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
* registers start at bus 255, and are not reported by BIOS.
* We currently find devices with only 2 sockets. In order to support more QPI
* Quick Path Interconnect, just increment this number.
*/
#define MAX_SOCKET_BUSES 2
/*
* Alter this version for the module when modifications are made
*/
#define I7CORE_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i7core_edac"
/*
* Debug macros
*/
#define i7core_printk(level, fmt, arg...) \
edac_printk(level, "i7core", fmt, ##arg)
#define i7core_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
/*
* i7core Memory Controller Registers
*/
/* OFFSETS for Device 0 Function 0 */
#define MC_CFG_CONTROL 0x90
#define MC_CFG_UNLOCK 0x02
#define MC_CFG_LOCK 0x00
/* OFFSETS for Device 3 Function 0 */
#define MC_CONTROL 0x48
#define MC_STATUS 0x4c
#define MC_MAX_DOD 0x64
/*
* OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet:
* http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
*/
#define MC_TEST_ERR_RCV1 0x60
#define DIMM2_COR_ERR(r) ((r) & 0x7fff)
#define MC_TEST_ERR_RCV0 0x64
#define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
#define DIMM0_COR_ERR(r) ((r) & 0x7fff)
/* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */
#define MC_SSRCONTROL 0x48
#define SSR_MODE_DISABLE 0x00
#define SSR_MODE_ENABLE 0x01
#define SSR_MODE_MASK 0x03
#define MC_SCRUB_CONTROL 0x4c
#define STARTSCRUB (1 << 24)
#define SCRUBINTERVAL_MASK 0xffffff
#define MC_COR_ECC_CNT_0 0x80
#define MC_COR_ECC_CNT_1 0x84
#define MC_COR_ECC_CNT_2 0x88
#define MC_COR_ECC_CNT_3 0x8c
#define MC_COR_ECC_CNT_4 0x90
#define MC_COR_ECC_CNT_5 0x94
#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
/* OFFSETS for Devices 4,5 and 6 Function 0 */
#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
#define THREE_DIMMS_PRESENT (1 << 24)
#define SINGLE_QUAD_RANK_PRESENT (1 << 23)
#define QUAD_RANK_PRESENT (1 << 22)
#define REGISTERED_DIMM (1 << 15)
#define MC_CHANNEL_MAPPER 0x60
#define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
#define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
#define MC_CHANNEL_RANK_PRESENT 0x7c
#define RANK_PRESENT_MASK 0xffff
#define MC_CHANNEL_ADDR_MATCH 0xf0
#define MC_CHANNEL_ERROR_MASK 0xf8
#define MC_CHANNEL_ERROR_INJECT 0xfc
#define INJECT_ADDR_PARITY 0x10
#define INJECT_ECC 0x08
#define MASK_CACHELINE 0x06
#define MASK_FULL_CACHELINE 0x06
#define MASK_MSB32_CACHELINE 0x04
#define MASK_LSB32_CACHELINE 0x02
#define NO_MASK_CACHELINE 0x00
#define REPEAT_EN 0x01
/* OFFSETS for Devices 4,5 and 6 Function 1 */
#define MC_DOD_CH_DIMM0 0x48
#define MC_DOD_CH_DIMM1 0x4c
#define MC_DOD_CH_DIMM2 0x50
#define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
#define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
#define DIMM_PRESENT_MASK (1 << 9)
#define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
#define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
#define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
#define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
#define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
#define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
#define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
#define MC_DOD_NUMCOL_MASK 3
#define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
#define MC_RANK_PRESENT 0x7c
#define MC_SAG_CH_0 0x80
#define MC_SAG_CH_1 0x84
#define MC_SAG_CH_2 0x88
#define MC_SAG_CH_3 0x8c
#define MC_SAG_CH_4 0x90
#define MC_SAG_CH_5 0x94
#define MC_SAG_CH_6 0x98
#define MC_SAG_CH_7 0x9c
#define MC_RIR_LIMIT_CH_0 0x40
#define MC_RIR_LIMIT_CH_1 0x44
#define MC_RIR_LIMIT_CH_2 0x48
#define MC_RIR_LIMIT_CH_3 0x4C
#define MC_RIR_LIMIT_CH_4 0x50
#define MC_RIR_LIMIT_CH_5 0x54
#define MC_RIR_LIMIT_CH_6 0x58
#define MC_RIR_LIMIT_CH_7 0x5C
#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
#define MC_RIR_WAY_CH 0x80
#define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
#define MC_RIR_WAY_RANK_MASK 0x7
/*
* i7core structs
*/
#define NUM_CHANS 3
#define MAX_DIMMS 3 /* Max DIMMS per channel */
#define MAX_MCR_FUNC 4
#define MAX_CHAN_FUNC 3
struct i7core_info {
u32 mc_control;
u32 mc_status;
u32 max_dod;
u32 ch_map;
};
struct i7core_inject {
int enable;
u32 section;
u32 type;
u32 eccmask;
/* Error address mask */
int channel, dimm, rank, bank, page, col;
};
struct i7core_channel {
bool is_3dimms_present;
bool is_single_4rank;
bool has_4rank;
u32 dimms;
};
struct pci_id_descr {
int dev;
int func;
int dev_id;
int optional;
};
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
};
struct i7core_dev {
struct list_head list;
u8 socket;
struct pci_dev **pdev;
int n_devs;
struct mem_ctl_info *mci;
};
struct i7core_pvt {
struct device *addrmatch_dev, *chancounts_dev;
struct pci_dev *pci_noncore;
struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
struct i7core_dev *i7core_dev;
struct i7core_info info;
struct i7core_inject inject;
struct i7core_channel channel[NUM_CHANS];
int ce_count_available;
/* ECC corrected errors counts per udimm */
unsigned long udimm_ce_count[MAX_DIMMS];
int udimm_last_ce_count[MAX_DIMMS];
/* ECC corrected errors counts per rdimm */
unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
bool is_registered, enable_scrub;
/* DCLK Frequency used for computing scrub rate */
int dclk_freq;
/* Struct to control EDAC polling */
struct edac_pci_ctl_info *i7core_pci;
};
#define PCI_DESCR(device, function, device_id) \
.dev = (device), \
.func = (function), \
.dev_id = (device_id)
static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
/* Exists only for RDIMM */
{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
/* Channel 0 */
{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
/* Channel 1 */
{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
/* Channel 2 */
{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
/* Generic Non-core registers */
/*
* This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
* On Xeon 55xx, however, it has a different id (8086:2c40). So,
* the probing code needs to test for the other address in case of
* failure of this one
*/
{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
};
static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
{ PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
{ PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
{ PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
{ PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
{ PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
/*
* This is the PCI device has an alternate address on some
* processors like Core i7 860
*/
{ PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
};
static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
/* Exists only for RDIMM */
{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
/* Channel 0 */
{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
/* Channel 1 */
{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
/* Channel 2 */
{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
/* Generic Non-core registers */
{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
};
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
static const struct pci_id_table pci_dev_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
{0,} /* 0 terminated list. */
};
/*
* pci_device_id table for which devices we are looking for
*/
static const struct pci_device_id i7core_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
{0,} /* 0 terminated list. */
};
/****************************************************************************
Ancillary status routines
****************************************************************************/
/* MC_CONTROL bits */
#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
/* MC_STATUS bits */
#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
/* MC_MAX_DOD read functions */
static inline int numdimms(u32 dimms)
{
return (dimms & 0x3) + 1;
}
static inline int numrank(u32 rank)
{
static const int ranks[] = { 1, 2, 4, -EINVAL };
return ranks[rank & 0x3];
}
static inline int numbank(u32 bank)
{
static const int banks[] = { 4, 8, 16, -EINVAL };
return banks[bank & 0x3];
}
static inline int numrow(u32 row)
{
static const int rows[] = {
1 << 12, 1 << 13, 1 << 14, 1 << 15,
1 << 16, -EINVAL, -EINVAL, -EINVAL,
};
return rows[row & 0x7];
}
static inline int numcol(u32 col)
{
static const int cols[] = {
1 << 10, 1 << 11, 1 << 12, -EINVAL,
};
return cols[col & 0x3];
}
static struct i7core_dev *get_i7core_dev(u8 socket)
{
struct i7core_dev *i7core_dev;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
if (i7core_dev->socket == socket)
return i7core_dev;
}
return NULL;
}
static struct i7core_dev *alloc_i7core_dev(u8 socket,
const struct pci_id_table *table)
{
struct i7core_dev *i7core_dev;
i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
if (!i7core_dev)
return NULL;
i7core_dev->pdev = kcalloc(table->n_devs, sizeof(*i7core_dev->pdev),
GFP_KERNEL);
if (!i7core_dev->pdev) {
kfree(i7core_dev);
return NULL;
}
i7core_dev->socket = socket;
i7core_dev->n_devs = table->n_devs;
list_add_tail(&i7core_dev->list, &i7core_edac_list);
return i7core_dev;
}
static void free_i7core_dev(struct i7core_dev *i7core_dev)
{
list_del(&i7core_dev->list);
kfree(i7core_dev->pdev);
kfree(i7core_dev);
}
/****************************************************************************
Memory check routines
****************************************************************************/
static int get_dimm_config(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
int i, j;
enum edac_type mode;
enum mem_type mtype;
struct dimm_info *dimm;
/* Get data from the MC register, function 0 */
pdev = pvt->pci_mcr[0];
if (!pdev)
return -ENODEV;
/* Device 3 function 0 reads */
pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
pvt->i7core_dev->socket, pvt->info.mc_control,
pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
if (ECC_ENABLED(pvt)) {
edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
if (ECCx8(pvt))
mode = EDAC_S8ECD8ED;
else
mode = EDAC_S4ECD4ED;
} else {
edac_dbg(0, "ECC disabled\n");
mode = EDAC_NONE;
}
/* FIXME: need to handle the error codes */
edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
numdimms(pvt->info.max_dod),
numrank(pvt->info.max_dod >> 2),
numbank(pvt->info.max_dod >> 4),
numrow(pvt->info.max_dod >> 6),
numcol(pvt->info.max_dod >> 9));
for (i = 0; i < NUM_CHANS; i++) {
u32 data, dimm_dod[3], value[8];
if (!pvt->pci_ch[i][0])
continue;
if (!CH_ACTIVE(pvt, i)) {
edac_dbg(0, "Channel %i is not active\n", i);
continue;
}
if (CH_DISABLED(pvt, i)) {
edac_dbg(0, "Channel %i is disabled\n", i);
continue;
}
/* Devices 4-6 function 0 */
pci_read_config_dword(pvt->pci_ch[i][0],
MC_CHANNEL_DIMM_INIT_PARAMS, &data);
if (data & THREE_DIMMS_PRESENT)
pvt->channel[i].is_3dimms_present = true;
if (data & SINGLE_QUAD_RANK_PRESENT)
pvt->channel[i].is_single_4rank = true;
if (data & QUAD_RANK_PRESENT)
pvt->channel[i].has_4rank = true;
if (data & REGISTERED_DIMM)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
/* Devices 4-6 function 1 */
pci_read_config_dword(pvt->pci_ch[i][1],
MC_DOD_CH_DIMM0, &dimm_dod[0]);
pci_read_config_dword(pvt->pci_ch[i][1],
MC_DOD_CH_DIMM1, &dimm_dod[1]);
pci_read_config_dword(pvt->pci_ch[i][1],
MC_DOD_CH_DIMM2, &dimm_dod[2]);
edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
i,
RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
data,
pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
pvt->channel[i].has_4rank ? "HAS_4R " : "",
(data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
u32 banks, ranks, rows, cols;
u32 size, npages;
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
dimm = edac_get_dimm(mci, i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
npages = MiB_TO_PAGES(size);
dimm->nr_pages = npages;
switch (banks) {
case 4:
dimm->dtype = DEV_X4;
break;
case 8:
dimm->dtype = DEV_X8;
break;
case 16:
dimm->dtype = DEV_X16;
break;
default:
dimm->dtype = DEV_UNKNOWN;
}
snprintf(dimm->label, sizeof(dimm->label),
"CPU#%uChannel#%u_DIMM#%u",
pvt->i7core_dev->socket, i, j);
dimm->grain = 8;
dimm->edac_mode = mode;
dimm->mtype = mtype;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
for (j = 0; j < 8; j++)
edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
(value[j] >> 27) & 0x1,
(value[j] >> 24) & 0x7,
(value[j] & ((1 << 24) - 1)));
}
return 0;
}
/****************************************************************************
Error insertion routines
****************************************************************************/
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/* The i7core has independent error injection features per channel.
However, to have a simpler code, we don't allow enabling error injection
on more than one channel.
Also, since a change at an inject parameter will be applied only at enable,
we're disabling error injection on all write calls to the sysfs nodes that
controls the error code injection.
*/
static int disable_inject(const struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
pvt->inject.enable = 0;
if (!pvt->pci_ch[pvt->inject.channel][0])
return -ENODEV;
pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_INJECT, 0);
return 0;
}
/*
* i7core inject inject.section
*
* accept and store error injection inject.section value
* bit 0 - refers to the lower 32-byte half cacheline
* bit 1 - refers to the upper 32-byte half cacheline
*/
static ssize_t i7core_inject_section_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
if (pvt->inject.enable)
disable_inject(mci);
rc = kstrtoul(data, 10, &value);
if ((rc < 0) || (value > 3))
return -EIO;
pvt->inject.section = (u32) value;
return count;
}
static ssize_t i7core_inject_section_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
return sprintf(data, "0x%08x\n", pvt->inject.section);
}
/*
* i7core inject.type
*
* accept and store error injection inject.section value
* bit 0 - repeat enable - Enable error repetition
* bit 1 - inject ECC error
* bit 2 - inject parity error
*/
static ssize_t i7core_inject_type_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
if (pvt->inject.enable)
disable_inject(mci);
rc = kstrtoul(data, 10, &value);
if ((rc < 0) || (value > 7))
return -EIO;
pvt->inject.type = (u32) value;
return count;
}
static ssize_t i7core_inject_type_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
return sprintf(data, "0x%08x\n", pvt->inject.type);
}
/*
* i7core_inject_inject.eccmask_store
*
* The type of error (UE/CE) will depend on the inject.eccmask value:
* Any bits set to a 1 will flip the corresponding ECC bit
* Correctable errors can be injected by flipping 1 bit or the bits within
* a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
* 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
* uncorrectable error to be injected.
*/
static ssize_t i7core_inject_eccmask_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
if (pvt->inject.enable)
disable_inject(mci);
rc = kstrtoul(data, 10, &value);
if (rc < 0)
return -EIO;
pvt->inject.eccmask = (u32) value;
return count;
}
static ssize_t i7core_inject_eccmask_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
}
/*
* i7core_addrmatch
*
* The type of error (UE/CE) will depend on the inject.eccmask value:
* Any bits set to a 1 will flip the corresponding ECC bit
* Correctable errors can be injected by flipping 1 bit or the bits within
* a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
* 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
* uncorrectable error to be injected.
*/
#define DECLARE_ADDR_MATCH(param, limit) \
static ssize_t i7core_inject_store_##param( \
struct device *dev, \
struct device_attribute *mattr, \
const char *data, size_t count) \
{ \
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt; \
long value; \
int rc; \
\
edac_dbg(1, "\n"); \
pvt = mci->pvt_info; \
\
if (pvt->inject.enable) \
disable_inject(mci); \
\
if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
value = -1; \
else { \
rc = kstrtoul(data, 10, &value); \
if ((rc < 0) || (value >= limit)) \
return -EIO; \
} \
\
pvt->inject.param = value; \
\
return count; \
} \
\
static ssize_t i7core_inject_show_##param( \
struct device *dev, \
struct device_attribute *mattr, \
char *data) \
{ \
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt; \
\
pvt = mci->pvt_info; \
edac_dbg(1, "pvt=%p\n", pvt); \
if (pvt->inject.param < 0) \
return sprintf(data, "any\n"); \
else \
return sprintf(data, "%d\n", pvt->inject.param);\
}
#define ATTR_ADDR_MATCH(param) \
static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
i7core_inject_show_##param, \
i7core_inject_store_##param)
DECLARE_ADDR_MATCH(channel, 3);
DECLARE_ADDR_MATCH(dimm, 3);
DECLARE_ADDR_MATCH(rank, 4);
DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
ATTR_ADDR_MATCH(channel);
ATTR_ADDR_MATCH(dimm);
ATTR_ADDR_MATCH(rank);
ATTR_ADDR_MATCH(bank);
ATTR_ADDR_MATCH(page);
ATTR_ADDR_MATCH(col);
static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
{
u32 read;
int count;
edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
where, val);
for (count = 0; count < 10; count++) {
if (count)
msleep(100);
pci_write_config_dword(dev, where, val);
pci_read_config_dword(dev, where, &read);
if (read == val)
return 0;
}
i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
"write=%08x. Read=%08x\n",
dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
where, val, read);
return -EINVAL;
}
/*
* This routine prepares the Memory Controller for error injection.
* The error will be injected when some process tries to write to the
* memory that matches the given criteria.
* The criteria can be set in terms of a mask where dimm, rank, bank, page
* and col can be specified.
* A -1 value for any of the mask items will make the MCU to ignore
* that matching criteria for error injection.
*
* It should be noticed that the error will only happen after a write operation
* on a memory that matches the condition. if REPEAT_EN is not enabled at
* inject mask, then it will produce just one error. Otherwise, it will repeat
* until the injectmask would be cleaned.
*
* FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
* is reliable enough to check if the MC is using the
* three channels. However, this is not clear at the datasheet.
*/
static ssize_t i7core_inject_enable_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
u32 injectmask;
u64 mask = 0;
int rc;
long enable;
if (!pvt->pci_ch[pvt->inject.channel][0])
return 0;
rc = kstrtoul(data, 10, &enable);
if ((rc < 0))
return 0;
if (enable) {
pvt->inject.enable = 1;
} else {
disable_inject(mci);
return count;
}
/* Sets pvt->inject.dimm mask */
if (pvt->inject.dimm < 0)
mask |= 1LL << 41;
else {
if (pvt->channel[pvt->inject.channel].dimms > 2)
mask |= (pvt->inject.dimm & 0x3LL) << 35;
else
mask |= (pvt->inject.dimm & 0x1LL) << 36;
}
/* Sets pvt->inject.rank mask */
if (pvt->inject.rank < 0)
mask |= 1LL << 40;
else {
if (pvt->channel[pvt->inject.channel].dimms > 2)
mask |= (pvt->inject.rank & 0x1LL) << 34;
else
mask |= (pvt->inject.rank & 0x3LL) << 34;
}
/* Sets pvt->inject.bank mask */
if (pvt->inject.bank < 0)
mask |= 1LL << 39;
else
mask |= (pvt->inject.bank & 0x15LL) << 30;
/* Sets pvt->inject.page mask */
if (pvt->inject.page < 0)
mask |= 1LL << 38;
else
mask |= (pvt->inject.page & 0xffff) << 14;
/* Sets pvt->inject.column mask */
if (pvt->inject.col < 0)
mask |= 1LL << 37;
else
mask |= (pvt->inject.col & 0x3fff);
/*
* bit 0: REPEAT_EN
* bits 1-2: MASK_HALF_CACHELINE
* bit 3: INJECT_ECC
* bit 4: INJECT_ADDR_PARITY
*/
injectmask = (pvt->inject.type & 1) |
(pvt->inject.section & 0x3) << 1 |
(pvt->inject.type & 0x6) << (3 - 1);
/* Unlock writes to registers - this register is write only */
pci_write_config_dword(pvt->pci_noncore,
MC_CFG_CONTROL, 0x2);
write_and_test(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ADDR_MATCH, mask);
write_and_test(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
write_and_test(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
write_and_test(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_INJECT, injectmask);
/*
* This is something undocumented, based on my tests
* Without writing 8 to this register, errors aren't injected. Not sure
* why.
*/
pci_write_config_dword(pvt->pci_noncore,
MC_CFG_CONTROL, 8);
edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
mask, pvt->inject.eccmask, injectmask);
return count;
}
static ssize_t i7core_inject_enable_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
u32 injectmask;
if (!pvt->pci_ch[pvt->inject.channel][0])
return 0;
pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_INJECT, &injectmask);
edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
if (injectmask & 0x0c)
pvt->inject.enable = 1;
return sprintf(data, "%d\n", pvt->inject.enable);
}
#define DECLARE_COUNTER(param) \
static ssize_t i7core_show_counter_##param( \
struct device *dev, \
struct device_attribute *mattr, \
char *data) \
{ \
struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt = mci->pvt_info; \
\
edac_dbg(1, "\n"); \
if (!pvt->ce_count_available || (pvt->is_registered)) \
return sprintf(data, "data unavailable\n"); \
return sprintf(data, "%lu\n", \
pvt->udimm_ce_count[param]); \
}
#define ATTR_COUNTER(param) \
static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
i7core_show_counter_##param, \
NULL)
DECLARE_COUNTER(0);
DECLARE_COUNTER(1);
DECLARE_COUNTER(2);
ATTR_COUNTER(0);
ATTR_COUNTER(1);
ATTR_COUNTER(2);
/*
* inject_addrmatch device sysfs struct
*/
static struct attribute *i7core_addrmatch_attrs[] = {
&dev_attr_channel.attr,
&dev_attr_dimm.attr,
&dev_attr_rank.attr,
&dev_attr_bank.attr,
&dev_attr_page.attr,
&dev_attr_col.attr,
NULL
};
static const struct attribute_group addrmatch_grp = {
.attrs = i7core_addrmatch_attrs,
};
static const struct attribute_group *addrmatch_groups[] = {
&addrmatch_grp,
NULL
};
static void addrmatch_release(struct device *device)
{
edac_dbg(1, "Releasing device %s\n", dev_name(device));
kfree(device);
}
static const struct device_type addrmatch_type = {
.groups = addrmatch_groups,
.release = addrmatch_release,
};
/*
* all_channel_counts sysfs struct
*/
static struct attribute *i7core_udimm_counters_attrs[] = {
&dev_attr_udimm0.attr,
&dev_attr_udimm1.attr,
&dev_attr_udimm2.attr,
NULL
};
static const struct attribute_group all_channel_counts_grp = {
.attrs = i7core_udimm_counters_attrs,
};
static const struct attribute_group *all_channel_counts_groups[] = {
&all_channel_counts_grp,
NULL
};
static void all_channel_counts_release(struct device *device)
{
edac_dbg(1, "Releasing device %s\n", dev_name(device));
kfree(device);
}
static const struct device_type all_channel_counts_type = {
.groups = all_channel_counts_groups,
.release = all_channel_counts_release,
};
/*
* inject sysfs attributes
*/
static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
i7core_inject_section_show, i7core_inject_section_store);
static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
i7core_inject_type_show, i7core_inject_type_store);
static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
i7core_inject_eccmask_show, i7core_inject_eccmask_store);
static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
i7core_inject_enable_show, i7core_inject_enable_store);
static struct attribute *i7core_dev_attrs[] = {
&dev_attr_inject_section.attr,
&dev_attr_inject_type.attr,
&dev_attr_inject_eccmask.attr,
&dev_attr_inject_enable.attr,
NULL
};
ATTRIBUTE_GROUPS(i7core_dev);
static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
int rc;
pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL);
if (!pvt->addrmatch_dev)
return -ENOMEM;
pvt->addrmatch_dev->type = &addrmatch_type;
pvt->addrmatch_dev->bus = mci->dev.bus;
device_initialize(pvt->addrmatch_dev);
pvt->addrmatch_dev->parent = &mci->dev;
dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
dev_set_drvdata(pvt->addrmatch_dev, mci);
edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
rc = -ENOMEM;
goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
pvt->chancounts_dev->bus = mci->dev.bus;
device_initialize(pvt->chancounts_dev);
pvt->chancounts_dev->parent = &mci->dev;
dev_set_name(pvt->chancounts_dev, "all_channel_counts");
dev_set_drvdata(pvt->chancounts_dev, mci);
edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
goto err_put_chancounts;
}
return 0;
err_put_chancounts:
put_device(pvt->chancounts_dev);
err_del_addrmatch:
device_del(pvt->addrmatch_dev);
err_put_addrmatch:
put_device(pvt->addrmatch_dev);
return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
edac_dbg(1, "\n");
if (!pvt->is_registered) {
device_del(pvt->chancounts_dev);
put_device(pvt->chancounts_dev);
}
device_del(pvt->addrmatch_dev);
put_device(pvt->addrmatch_dev);
}
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
/*
* i7core_put_all_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i7core_put_devices(struct i7core_dev *i7core_dev)
{
int i;
edac_dbg(0, "\n");
for (i = 0; i < i7core_dev->n_devs; i++) {
struct pci_dev *pdev = i7core_dev->pdev[i];
if (!pdev)
continue;
edac_dbg(0, "Removing dev %02x:%02x.%d\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
static void i7core_put_all_devices(void)
{
struct i7core_dev *i7core_dev, *tmp;
list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
i7core_put_devices(i7core_dev);
free_i7core_dev(i7core_dev);
}
}
static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
{
struct pci_dev *pdev = NULL;
int i;
/*
* On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
* aren't announced by acpi. So, we need to use a legacy scan probing
* to detect them
*/
while (table && table->descr) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
if (unlikely(!pdev)) {
for (i = 0; i < MAX_SOCKET_BUSES; i++)
pcibios_scan_specific_bus(255-i);
}
pci_dev_put(pdev);
table++;
}
}
static unsigned i7core_pci_lastbus(void)
{
int last_bus = 0, bus;
struct pci_bus *b = NULL;
while ((b = pci_find_next_bus(b)) != NULL) {
bus = b->number;
edac_dbg(0, "Found bus %d\n", bus);
if (bus > last_bus)
last_bus = bus;
}
edac_dbg(0, "Last bus %d\n", last_bus);
return last_bus;
}
/*
* i7core_get_all_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
static int i7core_get_onedevice(struct pci_dev **prev,
const struct pci_id_table *table,
const unsigned devno,
const unsigned last_bus)
{
struct i7core_dev *i7core_dev;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
u8 bus = 0;
u8 socket = 0;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
dev_descr->dev_id, *prev);
/*
* On Xeon 55xx, the Intel QuickPath Arch Generic Non-core regs
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
}
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
!pdev) {
pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
*prev);
}
if (!pdev) {
if (*prev) {
*prev = pdev;
return 0;
}
if (dev_descr->optional)
return 0;
if (devno == 0)
return -ENODEV;
i7core_printk(KERN_INFO,
"Device not found: dev %02x.%d PCI ID %04x:%04x\n",
dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/* End of list, leave */
return -ENODEV;
}
bus = pdev->bus->number;
socket = last_bus - bus;
i7core_dev = get_i7core_dev(socket);
if (!i7core_dev) {
i7core_dev = alloc_i7core_dev(socket, table);
if (!i7core_dev) {
pci_dev_put(pdev);
return -ENOMEM;
}
}
if (i7core_dev->pdev[devno]) {
i7core_printk(KERN_ERR,
"Duplicated device for "
"dev %02x:%02x.%d PCI ID %04x:%04x\n",
bus, dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pci_dev_put(pdev);
return -ENODEV;
}
i7core_dev->pdev[devno] = pdev;
/* Sanity check */
if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
PCI_FUNC(pdev->devfn) != dev_descr->func)) {
i7core_printk(KERN_ERR,
"Device PCI ID %04x:%04x "
"has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
bus, dev_descr->dev, dev_descr->func);
return -ENODEV;
}
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
i7core_printk(KERN_ERR,
"Couldn't enable "
"dev %02x:%02x.%d PCI ID %04x:%04x\n",
bus, dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
return -ENODEV;
}
edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
socket, bus, dev_descr->dev,
dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
* @from is always decremented if it is not %NULL. So, as we need
* to get all devices up to null, we need to do a get for the device
*/
pci_dev_get(pdev);
*prev = pdev;
return 0;
}
static int i7core_get_all_devices(void)
{
int i, rc, last_bus;
struct pci_dev *pdev = NULL;
const struct pci_id_table *table = pci_dev_table;
last_bus = i7core_pci_lastbus();
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
pdev = NULL;
do {
rc = i7core_get_onedevice(&pdev, table, i,
last_bus);
if (rc < 0) {
if (i == 0) {
i = table->n_devs;
break;
}
i7core_put_all_devices();
return -ENODEV;
}
} while (pdev);
}
table++;
}
return 0;
}
static int mci_bind_devs(struct mem_ctl_info *mci,
struct i7core_dev *i7core_dev)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
int i, func, slot;
char *family;
pvt->is_registered = false;
pvt->enable_scrub = false;
for (i = 0; i < i7core_dev->n_devs; i++) {
pdev = i7core_dev->pdev[i];
if (!pdev)
continue;
func = PCI_FUNC(pdev->devfn);
slot = PCI_SLOT(pdev->devfn);
if (slot == 3) {
if (unlikely(func > MAX_MCR_FUNC))
goto error;
pvt->pci_mcr[func] = pdev;
} else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
if (unlikely(func > MAX_CHAN_FUNC))
goto error;
pvt->pci_ch[slot - 4][func] = pdev;
} else if (!slot && !func) {
pvt->pci_noncore = pdev;
/* Detect the processor family */
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_I7_NONCORE:
family = "Xeon 35xx/ i7core";
pvt->enable_scrub = false;
break;
case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
family = "i7-800/i5-700";
pvt->enable_scrub = false;
break;
case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
family = "Xeon 34xx";
pvt->enable_scrub = false;
break;
case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
family = "Xeon 55xx";
pvt->enable_scrub = true;
break;
case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
family = "Xeon 56xx / i7-900";
pvt->enable_scrub = true;
break;
default:
family = "unknown";
pvt->enable_scrub = false;
}
edac_dbg(0, "Detected a processor type %s\n", family);
} else
goto error;
edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev, i7core_dev->socket);
if (PCI_SLOT(pdev->devfn) == 3 &&
PCI_FUNC(pdev->devfn) == 2)
pvt->is_registered = true;
}
return 0;
error:
i7core_printk(KERN_ERR, "Device %d, function %d "
"is out of the expected range\n",
slot, func);
return -EINVAL;
}
/****************************************************************************
Error check routines
****************************************************************************/
static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
const int chan,
const int new0,
const int new1,
const int new2)
{
struct i7core_pvt *pvt = mci->pvt_info;
int add0 = 0, add1 = 0, add2 = 0;
/* Updates CE counters if it is not the first time here */
if (pvt->ce_count_available) {
/* Updates CE counters */
add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
if (add2 < 0)
add2 += 0x7fff;
pvt->rdimm_ce_count[chan][2] += add2;
if (add1 < 0)
add1 += 0x7fff;
pvt->rdimm_ce_count[chan][1] += add1;
if (add0 < 0)
add0 += 0x7fff;
pvt->rdimm_ce_count[chan][0] += add0;
} else
pvt->ce_count_available = 1;
/* Store the new values */
pvt->rdimm_last_ce_count[chan][2] = new2;
pvt->rdimm_last_ce_count[chan][1] = new1;
pvt->rdimm_last_ce_count[chan][0] = new0;
/*updated the edac core */
if (add0 != 0)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
0, 0, 0,
chan, 0, -1, "error", "");
if (add1 != 0)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
0, 0, 0,
chan, 1, -1, "error", "");
if (add2 != 0)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
0, 0, 0,
chan, 2, -1, "error", "");
}
static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
u32 rcv[3][2];
int i, new0, new1, new2;
/*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
&rcv[0][0]);
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
&rcv[0][1]);
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
&rcv[1][0]);
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
&rcv[1][1]);
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
&rcv[2][0]);
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
&rcv[2][1]);
for (i = 0 ; i < 3; i++) {
edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
(i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
/*if the channel has 3 dimms*/
if (pvt->channel[i].dimms > 2) {
new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
} else {
new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
DIMM_BOT_COR_ERR(rcv[i][0]);
new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
DIMM_BOT_COR_ERR(rcv[i][1]);
new2 = 0;
}
i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
}
}
/* This function is based on the device 3 function 4 registers as described on:
* Intel Xeon Processor 5500 Series Datasheet Volume 2
* http://www.intel.com/Assets/PDF/datasheet/321322.pdf
* also available at:
* http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
*/
static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
u32 rcv1, rcv0;
int new0, new1, new2;
if (!pvt->pci_mcr[4]) {
edac_dbg(0, "MCR registers not found\n");
return;
}
/* Corrected test errors */
pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
/* Store the new values */
new2 = DIMM2_COR_ERR(rcv1);
new1 = DIMM1_COR_ERR(rcv0);
new0 = DIMM0_COR_ERR(rcv0);
/* Updates CE counters if it is not the first time here */
if (pvt->ce_count_available) {
/* Updates CE counters */
int add0, add1, add2;
add2 = new2 - pvt->udimm_last_ce_count[2];
add1 = new1 - pvt->udimm_last_ce_count[1];
add0 = new0 - pvt->udimm_last_ce_count[0];
if (add2 < 0)
add2 += 0x7fff;
pvt->udimm_ce_count[2] += add2;
if (add1 < 0)
add1 += 0x7fff;
pvt->udimm_ce_count[1] += add1;
if (add0 < 0)
add0 += 0x7fff;
pvt->udimm_ce_count[0] += add0;
if (add0 | add1 | add2)
i7core_printk(KERN_ERR, "New Corrected error(s): "
"dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
add0, add1, add2);
} else
pvt->ce_count_available = 1;
/* Store the new values */
pvt->udimm_last_ce_count[2] = new2;
pvt->udimm_last_ce_count[1] = new1;
pvt->udimm_last_ce_count[0] = new0;
}
/*
* According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
* Architectures Software Developer’s Manual Volume 3B.
* Nehalem are defined as family 0x06, model 0x1a
*
* The MCA registers used here are the following ones:
* struct mce field MCA Register
* m->status MSR_IA32_MC8_STATUS
* m->addr MSR_IA32_MC8_ADDR
* m->misc MSR_IA32_MC8_MISC
* In the case of Nehalem, the error information is masked at .status and .misc
* fields
*/
static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
char *optype, *err;
enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
bool uncorrected_error = m->mcgstatus & 1ll << 61;
bool ripv = m->mcgstatus & 1;
u32 optypenum = (m->status >> 4) & 0x07;
u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv)
tp_event = HW_EVENT_ERR_UNCORRECTED;
else
tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}
switch (optypenum) {
case 0:
optype = "generic undef request";
break;
case 1:
optype = "read error";
break;
case 2:
optype = "write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "scrubbing error";
break;
default:
optype = "reserved";
break;
}
switch (errnum) {
case 16:
err = "read ECC error";
break;
case 17:
err = "RAS ECC error";
break;
case 18:
err = "write parity error";
break;
case 19:
err = "redundancy loss";
break;
case 20:
err = "reserved";
break;
case 21:
err = "memory range error";
break;
case 22:
err = "RTID out of range";
break;
case 23:
err = "address parity error";
break;
case 24:
err = "byte enable parity error";
break;
default:
err = "unknown";
}
/*
* Call the helper to output message
* FIXME: what to do if core_err_cnt > 1? Currently, it generates
* only one event
*/
if (uncorrected_error || !pvt->is_registered)
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT,
m->addr & ~PAGE_MASK,
syndrome,
channel, dimm, -1,
err, optype);
}
/*
* i7core_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
i7core_mce_output_error(mci, m);
/*
* Now, let's increment CE error counts
*/
if (!pvt->is_registered)
i7core_udimm_check_mc_ecc_err(mci);
else
i7core_rdimm_check_mc_ecc_err(mci);
}
/*
* Check that logging is enabled and that this is the right type
* of error for us to handle.
*/
static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
struct i7core_dev *i7_dev;
struct mem_ctl_info *mci;
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev || (mce->kflags & MCE_HANDLED_CEC))
return NOTIFY_DONE;
mci = i7_dev->mci;
/*
* Just let mcelog handle it if the error is
* outside the memory controller
*/
if (((mce->status & 0xffff) >> 7) != 1)
return NOTIFY_DONE;
/* Bank 8 registers are the only ones that we know how to handle */
if (mce->bank != 8)
return NOTIFY_DONE;
i7core_check_error(mci, mce);
/* Advise mcelog that the errors were handled */
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_OK;
}
static struct notifier_block i7_mce_dec = {
.notifier_call = i7core_mce_check_error,
.priority = MCE_PRIO_EDAC,
};
struct memdev_dmi_entry {
u8 type;
u8 length;
u16 handle;
u16 phys_mem_array_handle;
u16 mem_err_info_handle;
u16 total_width;
u16 data_width;
u16 size;
u8 form;
u8 device_set;
u8 device_locator;
u8 bank_locator;
u8 memory_type;
u16 type_detail;
u16 speed;
u8 manufacturer;
u8 serial_number;
u8 asset_tag;
u8 part_number;
u8 attributes;
u32 extended_size;
u16 conf_mem_clk_speed;
} __attribute__((__packed__));
/*
* Decode the DRAM Clock Frequency, be paranoid, make sure that all
* memory devices show the same speed, and if they don't then consider
* all speeds to be invalid.
*/
static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
{
int *dclk_freq = _dclk_freq;
u16 dmi_mem_clk_speed;
if (*dclk_freq == -1)
return;
if (dh->type == DMI_ENTRY_MEM_DEVICE) {
struct memdev_dmi_entry *memdev_dmi_entry =
(struct memdev_dmi_entry *)dh;
unsigned long conf_mem_clk_speed_offset =
(unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
(unsigned long)&memdev_dmi_entry->type;
unsigned long speed_offset =
(unsigned long)&memdev_dmi_entry->speed -
(unsigned long)&memdev_dmi_entry->type;
/* Check that a DIMM is present */
if (memdev_dmi_entry->size == 0)
return;
/*
* Pick the configured speed if it's available, otherwise
* pick the DIMM speed, or we don't have a speed.
*/
if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
dmi_mem_clk_speed =
memdev_dmi_entry->conf_mem_clk_speed;
} else if (memdev_dmi_entry->length > speed_offset) {
dmi_mem_clk_speed = memdev_dmi_entry->speed;
} else {
*dclk_freq = -1;
return;
}
if (*dclk_freq == 0) {
/* First pass, speed was 0 */
if (dmi_mem_clk_speed > 0) {
/* Set speed if a valid speed is read */
*dclk_freq = dmi_mem_clk_speed;
} else {
/* Otherwise we don't have a valid speed */
*dclk_freq = -1;
}
} else if (*dclk_freq > 0 &&
*dclk_freq != dmi_mem_clk_speed) {
/*
* If we have a speed, check that all DIMMS are the same
* speed, otherwise set the speed as invalid.
*/
*dclk_freq = -1;
}
}
}
/*
* The default DCLK frequency is used as a fallback if we
* fail to find anything reliable in the DMI. The value
* is taken straight from the datasheet.
*/
#define DEFAULT_DCLK_FREQ 800
static int get_dclk_freq(void)
{
int dclk_freq = 0;
dmi_walk(decode_dclk, (void *)&dclk_freq);
if (dclk_freq < 1)
return DEFAULT_DCLK_FREQ;
return dclk_freq;
}
/*
* set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
* to hardware according to SCRUBINTERVAL formula
* found in datasheet.
*/
static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
u32 dw_scrub;
u32 dw_ssr;
/* Get data from the MC register, function 2 */
pdev = pvt->pci_mcr[2];
if (!pdev)
return -ENODEV;
pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
if (new_bw == 0) {
/* Prepare to disable petrol scrub */
dw_scrub &= ~STARTSCRUB;
/* Stop the patrol scrub engine */
write_and_test(pdev, MC_SCRUB_CONTROL,
dw_scrub & ~SCRUBINTERVAL_MASK);
/* Get current status of scrub rate and set bit to disable */
pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
dw_ssr &= ~SSR_MODE_MASK;
dw_ssr |= SSR_MODE_DISABLE;
} else {
const int cache_line_size = 64;
const u32 freq_dclk_mhz = pvt->dclk_freq;
unsigned long long scrub_interval;
/*
* Translate the desired scrub rate to a register value and
* program the corresponding register value.
*/
scrub_interval = (unsigned long long)freq_dclk_mhz *
cache_line_size * 1000000;
do_div(scrub_interval, new_bw);
if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
return -EINVAL;
dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
/* Start the patrol scrub engine */
pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
STARTSCRUB | dw_scrub);
/* Get current status of scrub rate and set bit to enable */
pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
dw_ssr &= ~SSR_MODE_MASK;
dw_ssr |= SSR_MODE_ENABLE;
}
/* Disable or enable scrubbing */
pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
return new_bw;
}
/*
* get_sdram_scrub_rate This routine convert current scrub rate value
* into byte/sec bandwidth according to
* SCRUBINTERVAL formula found in datasheet.
*/
static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
const u32 cache_line_size = 64;
const u32 freq_dclk_mhz = pvt->dclk_freq;
unsigned long long scrub_rate;
u32 scrubval;
/* Get data from the MC register, function 2 */
pdev = pvt->pci_mcr[2];
if (!pdev)
return -ENODEV;
/* Get current scrub control data */
pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
/* Mask highest 8-bits to 0 */
scrubval &= SCRUBINTERVAL_MASK;
if (!scrubval)
return 0;
/* Calculate scrub rate value into byte/sec bandwidth */
scrub_rate = (unsigned long long)freq_dclk_mhz *
1000000 * cache_line_size;
do_div(scrub_rate, scrubval);
return (int)scrub_rate;
}
static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
u32 pci_lock;
/* Unlock writes to pci registers */
pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
pci_lock &= ~0x3;
pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
pci_lock | MC_CFG_UNLOCK);
mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
}
static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
u32 pci_lock;
/* Lock writes to pci registers */
pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
pci_lock &= ~0x3;
pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
pci_lock | MC_CFG_LOCK);
}
static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
{
pvt->i7core_pci = edac_pci_create_generic_ctl(
&pvt->i7core_dev->pdev[0]->dev,
EDAC_MOD_STR);
if (unlikely(!pvt->i7core_pci))
i7core_printk(KERN_WARNING,
"Unable to setup PCI error report via EDAC\n");
}
static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
{
if (likely(pvt->i7core_pci))
edac_pci_release_generic_ctl(pvt->i7core_pci);
else
i7core_printk(KERN_ERR,
"Couldn't find mem_ctl_info for socket %d\n",
pvt->i7core_dev->socket);
pvt->i7core_pci = NULL;
}
static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci = i7core_dev->mci;
struct i7core_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
}
pvt = mci->pvt_info;
edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
/* Disable scrubrate setting */
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
/* Remove MC sysfs nodes */
i7core_delete_sysfs_devices(mci);
edac_mc_del_mc(mci->pdev);
edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
i7core_dev->mci = NULL;
}
static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
int rc;
struct edac_mc_layer layers[2];
/* allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANS;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = MAX_DIMMS;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
/* Associates i7core_dev and mci for future usage */
pvt->i7core_dev = i7core_dev;
i7core_dev->mci = mci;
/*
* FIXME: how to handle RDDR3 at MCI level? It is possible to have
* Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
* memory channels
*/
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail1;
}
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
/* Store pci devices at mci for faster access */
rc = mci_bind_devs(mci, i7core_dev);
if (unlikely(rc < 0))
goto fail0;
/* Get dimm basic config */
get_dimm_config(mci);
/* record ptr to the generic device */
mci->pdev = &i7core_dev->pdev[0]->dev;
/* Enable scrubrate setting */
if (pvt->enable_scrub)
enable_sdram_scrub_setting(mci);
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc_with_groups(mci, i7core_dev_groups))) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
rc = -EINVAL;
goto fail0;
}
if (i7core_create_sysfs_devices(mci)) {
edac_dbg(0, "MC: failed to create sysfs nodes\n");
edac_mc_del_mc(mci->pdev);
rc = -EINVAL;
goto fail0;
}
/* Default error mask is any memory */
pvt->inject.channel = 0;
pvt->inject.dimm = -1;
pvt->inject.rank = -1;
pvt->inject.bank = -1;
pvt->inject.page = -1;
pvt->inject.col = -1;
/* allocating generic PCI control info */
i7core_pci_ctl_create(pvt);
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
return 0;
fail0:
kfree(mci->ctl_name);
fail1:
edac_mc_free(mci);
i7core_dev->mci = NULL;
return rc;
}
/*
* i7core_probe Probe for ONE instance of device to see if it is
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc, count = 0;
struct i7core_dev *i7core_dev;
/* get the pci devices we want to reserve for our use */
mutex_lock(&i7core_edac_lock);
/*
* All memory controllers are allocated at the first pass.
*/
if (unlikely(probed >= 1)) {
mutex_unlock(&i7core_edac_lock);
return -ENODEV;
}
probed++;
rc = i7core_get_all_devices();
if (unlikely(rc < 0))
goto fail0;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
count++;
rc = i7core_register_mci(i7core_dev);
if (unlikely(rc < 0))
goto fail1;
}
/*
* Nehalem-EX uses a different memory controller. However, as the
* memory controller is not visible on some Nehalem/Nehalem-EP, we
* need to indirectly probe via a X58 PCI device. The same devices
* are found on (some) Nehalem-EX. So, on those machines, the
* probe routine needs to return -ENODEV, as the actual Memory
* Controller registers won't be detected.
*/
if (!count) {
rc = -ENODEV;
goto fail1;
}
i7core_printk(KERN_INFO,
"Driver loaded, %d memory controller(s) found.\n",
count);
mutex_unlock(&i7core_edac_lock);
return 0;
fail1:
list_for_each_entry(i7core_dev, &i7core_edac_list, list)
i7core_unregister_mci(i7core_dev);
i7core_put_all_devices();
fail0:
mutex_unlock(&i7core_edac_lock);
return rc;
}
/*
* i7core_remove destructor for one instance of device
*
*/
static void i7core_remove(struct pci_dev *pdev)
{
struct i7core_dev *i7core_dev;
edac_dbg(0, "\n");
/*
* we have a trouble here: pdev value for removal will be wrong, since
* it will point to the X58 register used to detect that the machine
* is a Nehalem or upper design. However, due to the way several PCI
* devices are grouped together to provide MC functionality, we need
* to use a different method for releasing the devices
*/
mutex_lock(&i7core_edac_lock);
if (unlikely(!probed)) {
mutex_unlock(&i7core_edac_lock);
return;
}
list_for_each_entry(i7core_dev, &i7core_edac_list, list)
i7core_unregister_mci(i7core_dev);
/* Release PCI resources */
i7core_put_all_devices();
probed--;
mutex_unlock(&i7core_edac_lock);
}
MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
/*
* i7core_driver pci_driver structure for this module
*
*/
static struct pci_driver i7core_driver = {
.name = "i7core_edac",
.probe = i7core_probe,
.remove = i7core_remove,
.id_table = i7core_pci_tbl,
};
/*
* i7core_init Module entry function
* Try to initialize this module for its devices
*/
static int __init i7core_init(void)
{
int pci_rc;
edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
if (use_pci_fixup)
i7core_xeon_pci_fixup(pci_dev_table);
pci_rc = pci_register_driver(&i7core_driver);
if (pci_rc >= 0) {
mce_register_decode_chain(&i7_mce_dec);
return 0;
}
i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
return pci_rc;
}
/*
* i7core_exit() Module exit function
* Unregister the driver
*/
static void __exit i7core_exit(void)
{
edac_dbg(2, "\n");
pci_unregister_driver(&i7core_driver);
mce_unregister_decode_chain(&i7_mce_dec);
}
module_init(i7core_init);
module_exit(i7core_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
I7CORE_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i7core_edac.c |
/*
* Intel X38 Memory Controller kernel module
* Copyright (C) 2008 Cluster Computing, Inc.
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* This file is based on i3200_edac.c
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
#define EDAC_MOD_STR "x38_edac"
#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
#define X38_RANKS 8
#define X38_RANKS_PER_CHANNEL 4
#define X38_CHANNELS 2
/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
#define X38_MCHBAR_HIGH 0x4c
#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
#define X38_MMR_WINDOW_SIZE 16384
#define X38_TOM 0xa0 /* Top of Memory (16b)
*
* 15:10 reserved
* 9:0 total populated physical memory
*/
#define X38_TOM_MASK 0x3ff /* bits 9:0 */
#define X38_TOM_SHIFT 26 /* 64MiB grain */
#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15 reserved
* 14 Isochronous TBWRR Run Behind FIFO Full
* (ITCV)
* 13 Isochronous TBWRR Run Behind FIFO Put
* (ITSTV)
* 12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR (GTSE)
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 reserved
* 7 DRAM Throttle Flag (DTF)
* 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define X38_ERRSTS_UE 0x0002
#define X38_ERRSTS_CE 0x0001
#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
*
* 15:10 reserved
* 9:0 Channel 0 DRAM Rank Boundary Address
*/
#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
#define X38_DRB_MASK 0x3ff /* bits 9:0 */
#define X38_DRB_SHIFT 26 /* 64MiB grain */
#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
*
* 63:48 Error Column Address (ERRCOL)
* 47:32 Error Row Address (ERRROW)
* 31:29 Error Bank Address (ERRBANK)
* 28:27 Error Rank Address (ERRRANK)
* 26:24 reserved
* 23:16 Error Syndrome (ERRSYND)
* 15: 2 reserved
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
#define X38_ECCERRLOG_CE 0x1
#define X38_ECCERRLOG_UE 0x2
#define X38_ECCERRLOG_RANK_BITS 0x18000000
#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
static int x38_channel_num;
static int how_many_channel(struct pci_dev *pdev)
{
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
edac_dbg(0, "In single channel mode\n");
x38_channel_num = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
x38_channel_num = 2;
}
return x38_channel_num;
}
static unsigned long eccerrlog_syndrome(u64 log)
{
return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
}
static int eccerrlog_row(int channel, u64 log)
{
return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
(channel * X38_RANKS_PER_CHANNEL);
}
enum x38_chips {
X38 = 0,
};
struct x38_dev_info {
const char *ctl_name;
};
struct x38_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[X38_CHANNELS];
};
static const struct x38_dev_info x38_devs[] = {
[X38] = {
.ctl_name = "x38"},
};
static struct pci_dev *mci_pdev;
static int x38_registered = 1;
static void x38_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
X38_ERRSTS_BITS);
}
static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
struct x38_error_info *info)
{
struct pci_dev *pdev;
void __iomem *window = mci->pvt_info;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
if (!(info->errsts & X38_ERRSTS_BITS))
return;
info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
if (x38_channel_num == 2)
info->eccerrlog[1] = lo_hi_readq(window + X38_C1ECCERRLOG);
pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
if (x38_channel_num == 2)
info->eccerrlog[1] =
lo_hi_readq(window + X38_C1ECCERRLOG);
}
x38_clear_error_info(mci);
}
static void x38_process_error_info(struct mem_ctl_info *mci,
struct x38_error_info *info)
{
int channel;
u64 log;
if (!(info->errsts & X38_ERRSTS_BITS))
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
"UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(channel, log),
-1, -1,
"x38 UE", "");
} else if (log & X38_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, eccerrlog_syndrome(log),
eccerrlog_row(channel, log),
-1, -1,
"x38 CE", "");
}
}
}
static void x38_check(struct mem_ctl_info *mci)
{
struct x38_error_info info;
x38_get_and_clear_error_info(mci, &info);
x38_process_error_info(mci, &info);
}
static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
struct {
u32 mchbar_low;
u32 mchbar_high;
};
} u;
void __iomem *window;
pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
u.mchbar &= X38_MCHBAR_MASK;
if (u.mchbar != (resource_size_t)u.mchbar) {
printk(KERN_ERR
"x38: mmio space beyond accessible range (0x%llx)\n",
(unsigned long long)u.mchbar);
return NULL;
}
window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
return window;
}
static void x38_get_drbs(void __iomem *window,
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
{
int i;
for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
}
}
static bool x38_is_stacked(struct pci_dev *pdev,
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
{
u16 tom;
pci_read_config_word(pdev, X38_TOM, &tom);
tom &= X38_TOM_MASK;
return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
}
static unsigned long drb_to_nr_pages(
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
bool stacked, int channel, int rank)
{
int n;
n = drbs[channel][rank];
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) && drbs[channel][rank] ==
drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
}
n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
return n;
}
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i, j;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
edac_dbg(0, "MC:\n");
window = x38_map_mchbar(pdev);
if (!window)
return -ENODEV;
x38_get_drbs(window, drbs);
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = X38_RANKS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = x38_channel_num;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = x38_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = x38_check;
mci->ctl_page_to_phys = NULL;
mci->pvt_info = window;
stacked = x38_is_stacked(pdev, drbs);
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 64MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked,
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
if (nr_pages == 0)
continue;
for (j = 0; j < x38_channel_num; j++) {
struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / x38_channel_num;
dimm->grain = nr_pages << PAGE_SHIFT;
dimm->mtype = MEM_DDR2;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
}
x38_clear_error_info(mci);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
edac_dbg(3, "MC: success\n");
return 0;
fail:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}
static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = x38_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void x38_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
iounmap(mci->pvt_info);
edac_mc_free(mci);
}
static const struct pci_device_id x38_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
X38},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
static struct pci_driver x38_driver = {
.name = EDAC_MOD_STR,
.probe = x38_init_one,
.remove = x38_remove_one,
.id_table = x38_pci_tbl,
};
static int __init x38_init(void)
{
int pci_rc;
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&x38_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
x38_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_X38_HB, NULL);
if (!mci_pdev) {
edac_dbg(0, "x38 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "x38 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&x38_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit x38_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&x38_driver);
if (!x38_registered) {
x38_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(x38_init);
module_exit(x38_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/x38_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GHES/EDAC Linux driver
*
* Copyright (c) 2013 by Mauro Carvalho Chehab
*
* Red Hat Inc. https://www.redhat.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <acpi/ghes.h>
#include <linux/edac.h>
#include <linux/dmi.h>
#include "edac_module.h"
#include <ras/ras_event.h>
#include <linux/notifier.h>
#define OTHER_DETAIL_LEN 400
struct ghes_pvt {
struct mem_ctl_info *mci;
/* Buffers for the error handling routine */
char other_detail[OTHER_DETAIL_LEN];
char msg[80];
};
static refcount_t ghes_refcount = REFCOUNT_INIT(0);
/*
* Access to ghes_pvt must be protected by ghes_lock. The spinlock
* also provides the necessary (implicit) memory barrier for the SMP
* case to make the pointer visible on another CPU.
*/
static struct ghes_pvt *ghes_pvt;
/*
* This driver's representation of the system hardware, as collected
* from DMI.
*/
static struct ghes_hw_desc {
int num_dimms;
struct dimm_info *dimms;
} ghes_hw;
/* GHES registration mutex */
static DEFINE_MUTEX(ghes_reg_mutex);
/*
* Sync with other, potentially concurrent callers of
* ghes_edac_report_mem_error(). We don't know what the
* "inventive" firmware would do.
*/
static DEFINE_SPINLOCK(ghes_lock);
static bool system_scanned;
static struct list_head *ghes_devs;
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
u8 type;
u8 length;
u16 handle;
u16 phys_mem_array_handle;
u16 mem_err_info_handle;
u16 total_width;
u16 data_width;
u16 size;
u8 form_factor;
u8 device_set;
u8 device_locator;
u8 bank_locator;
u8 memory_type;
u16 type_detail;
u16 speed;
u8 manufacturer;
u8 serial_number;
u8 asset_tag;
u8 part_number;
u8 attributes;
u32 extended_size;
u16 conf_mem_clk_speed;
} __attribute__((__packed__));
static struct dimm_info *find_dimm_by_handle(struct mem_ctl_info *mci, u16 handle)
{
struct dimm_info *dimm;
mci_for_each_dimm(mci, dimm) {
if (dimm->smbios_handle == handle)
return dimm;
}
return NULL;
}
static void dimm_setup_label(struct dimm_info *dimm, u16 handle)
{
const char *bank = NULL, *device = NULL;
dmi_memdev_name(handle, &bank, &device);
/*
* Set to a NULL string when both bank and device are zero. In this case,
* the label assigned by default will be preserved.
*/
snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",
(bank && *bank) ? bank : "",
(bank && *bank && device && *device) ? " " : "",
(device && *device) ? device : "");
}
static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
{
u16 rdr_mask = BIT(7) | BIT(13);
if (entry->size == 0xffff) {
pr_info("Can't get DIMM%i size\n", dimm->idx);
dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
} else if (entry->size == 0x7fff) {
dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
} else {
if (entry->size & BIT(15))
dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10);
else
dimm->nr_pages = MiB_TO_PAGES(entry->size);
}
switch (entry->memory_type) {
case 0x12:
if (entry->type_detail & BIT(13))
dimm->mtype = MEM_RDDR;
else
dimm->mtype = MEM_DDR;
break;
case 0x13:
if (entry->type_detail & BIT(13))
dimm->mtype = MEM_RDDR2;
else
dimm->mtype = MEM_DDR2;
break;
case 0x14:
dimm->mtype = MEM_FB_DDR2;
break;
case 0x18:
if (entry->type_detail & BIT(12))
dimm->mtype = MEM_NVDIMM;
else if (entry->type_detail & BIT(13))
dimm->mtype = MEM_RDDR3;
else
dimm->mtype = MEM_DDR3;
break;
case 0x1a:
if (entry->type_detail & BIT(12))
dimm->mtype = MEM_NVDIMM;
else if (entry->type_detail & BIT(13))
dimm->mtype = MEM_RDDR4;
else
dimm->mtype = MEM_DDR4;
break;
default:
if (entry->type_detail & BIT(6))
dimm->mtype = MEM_RMBS;
else if ((entry->type_detail & rdr_mask) == rdr_mask)
dimm->mtype = MEM_RDR;
else if (entry->type_detail & BIT(7))
dimm->mtype = MEM_SDR;
else if (entry->type_detail & BIT(9))
dimm->mtype = MEM_EDO;
else
dimm->mtype = MEM_UNKNOWN;
}
/*
* Actually, we can only detect if the memory has bits for
* checksum or not
*/
if (entry->total_width == entry->data_width)
dimm->edac_mode = EDAC_NONE;
else
dimm->edac_mode = EDAC_SECDED;
dimm->dtype = DEV_UNKNOWN;
dimm->grain = 128; /* Likely, worse case */
dimm_setup_label(dimm, entry->handle);
if (dimm->nr_pages) {
edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
dimm->idx, edac_mem_types[dimm->mtype],
PAGES_TO_MiB(dimm->nr_pages),
(dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
entry->memory_type, entry->type_detail,
entry->total_width, entry->data_width);
}
dimm->smbios_handle = entry->handle;
}
static void enumerate_dimms(const struct dmi_header *dh, void *arg)
{
struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
struct ghes_hw_desc *hw = (struct ghes_hw_desc *)arg;
struct dimm_info *d;
if (dh->type != DMI_ENTRY_MEM_DEVICE)
return;
/* Enlarge the array with additional 16 */
if (!hw->num_dimms || !(hw->num_dimms % 16)) {
struct dimm_info *new;
new = krealloc_array(hw->dimms, hw->num_dimms + 16,
sizeof(struct dimm_info), GFP_KERNEL);
if (!new) {
WARN_ON_ONCE(1);
return;
}
hw->dimms = new;
}
d = &hw->dimms[hw->num_dimms];
d->idx = hw->num_dimms;
assign_dmi_dimm_info(d, entry);
hw->num_dimms++;
}
static void ghes_scan_system(void)
{
if (system_scanned)
return;
dmi_walk(enumerate_dimms, &ghes_hw);
system_scanned = true;
}
static int print_mem_error_other_detail(const struct cper_sec_mem_err *mem, char *msg,
const char *location, unsigned int len)
{
u32 n;
if (!msg)
return 0;
n = 0;
len -= 1;
n += scnprintf(msg + n, len - n, "APEI location: %s ", location);
if (!(mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS))
goto out;
n += scnprintf(msg + n, len - n, "status(0x%016llx): ", mem->error_status);
n += scnprintf(msg + n, len - n, "%s ", cper_mem_err_status_str(mem->error_status));
out:
msg[n] = '\0';
return n;
}
static int ghes_edac_report_mem_error(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cper_sec_mem_err *mem_err = (struct cper_sec_mem_err *)data;
struct cper_mem_err_compact cmem;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
unsigned long sev = val;
struct ghes_pvt *pvt;
unsigned long flags;
char *p;
/*
* We can do the locking below because GHES defers error processing
* from NMI to IRQ context. Whenever that changes, we'd at least
* know.
*/
if (WARN_ON_ONCE(in_nmi()))
return NOTIFY_OK;
spin_lock_irqsave(&ghes_lock, flags);
pvt = ghes_pvt;
if (!pvt)
goto unlock;
mci = pvt->mci;
e = &mci->error_desc;
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
e->grain = 1;
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
e->top_layer = -1;
e->mid_layer = -1;
e->low_layer = -1;
*pvt->other_detail = '\0';
*pvt->msg = '\0';
switch (sev) {
case GHES_SEV_CORRECTED:
e->type = HW_EVENT_ERR_CORRECTED;
break;
case GHES_SEV_RECOVERABLE:
e->type = HW_EVENT_ERR_UNCORRECTED;
break;
case GHES_SEV_PANIC:
e->type = HW_EVENT_ERR_FATAL;
break;
default:
case GHES_SEV_NO:
e->type = HW_EVENT_ERR_INFO;
}
edac_dbg(1, "error validation_bits: 0x%08llx\n",
(long long)mem_err->validation_bits);
/* Error type, mapped on e->msg */
if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
u8 etype = mem_err->error_type;
p = pvt->msg;
p += snprintf(p, sizeof(pvt->msg), "%s", cper_mem_err_type_str(etype));
} else {
strcpy(pvt->msg, "unknown error");
}
/* Error address */
if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
e->page_frame_number = PHYS_PFN(mem_err->physical_addr);
e->offset_in_page = offset_in_page(mem_err->physical_addr);
}
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
cper_mem_err_pack(mem_err, &cmem);
p += cper_mem_err_location(&cmem, p);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
struct dimm_info *dimm;
p += cper_dimm_err_location(&cmem, p);
dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle);
if (dimm) {
e->top_layer = dimm->idx;
strcpy(e->label, dimm->label);
}
}
if (p > e->location)
*(p - 1) = '\0';
if (!*e->label)
strcpy(e->label, "unknown memory");
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
p += print_mem_error_other_detail(mem_err, p, e->location, OTHER_DETAIL_LEN);
if (p > pvt->other_detail)
*(p - 1) = '\0';
edac_raw_mc_handle_error(e);
unlock:
spin_unlock_irqrestore(&ghes_lock, flags);
return NOTIFY_OK;
}
static struct notifier_block ghes_edac_mem_err_nb = {
.notifier_call = ghes_edac_report_mem_error,
.priority = 0,
};
static int ghes_edac_register(struct device *dev)
{
bool fake = false;
struct mem_ctl_info *mci;
struct ghes_pvt *pvt;
struct edac_mc_layer layers[1];
unsigned long flags;
int rc = 0;
/* finish another registration/unregistration instance first */
mutex_lock(&ghes_reg_mutex);
/*
* We have only one logical memory controller to which all DIMMs belong.
*/
if (refcount_inc_not_zero(&ghes_refcount))
goto unlock;
ghes_scan_system();
/* Check if we've got a bogus BIOS */
if (!ghes_hw.num_dimms) {
fake = true;
ghes_hw.num_dimms = 1;
}
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = ghes_hw.num_dimms;
layers[0].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
rc = -ENOMEM;
goto unlock;
}
pvt = mci->pvt_info;
pvt->mci = mci;
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "ghes_edac.c";
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
if (fake) {
pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
pr_info("work on such system. Use this driver with caution\n");
}
pr_info("This system has %d DIMM sockets.\n", ghes_hw.num_dimms);
if (!fake) {
struct dimm_info *src, *dst;
int i = 0;
mci_for_each_dimm(mci, dst) {
src = &ghes_hw.dimms[i];
dst->idx = src->idx;
dst->smbios_handle = src->smbios_handle;
dst->nr_pages = src->nr_pages;
dst->mtype = src->mtype;
dst->edac_mode = src->edac_mode;
dst->dtype = src->dtype;
dst->grain = src->grain;
/*
* If no src->label, preserve default label assigned
* from EDAC core.
*/
if (strlen(src->label))
memcpy(dst->label, src->label, sizeof(src->label));
i++;
}
} else {
struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0);
dimm->nr_pages = 1;
dimm->grain = 128;
dimm->mtype = MEM_UNKNOWN;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_SECDED;
}
rc = edac_mc_add_mc(mci);
if (rc < 0) {
pr_info("Can't register with the EDAC core\n");
edac_mc_free(mci);
rc = -ENODEV;
goto unlock;
}
spin_lock_irqsave(&ghes_lock, flags);
ghes_pvt = pvt;
spin_unlock_irqrestore(&ghes_lock, flags);
ghes_register_report_chain(&ghes_edac_mem_err_nb);
/* only set on success */
refcount_set(&ghes_refcount, 1);
unlock:
/* Not needed anymore */
kfree(ghes_hw.dimms);
ghes_hw.dimms = NULL;
mutex_unlock(&ghes_reg_mutex);
return rc;
}
static void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
unsigned long flags;
mutex_lock(&ghes_reg_mutex);
system_scanned = false;
memset(&ghes_hw, 0, sizeof(struct ghes_hw_desc));
if (!refcount_dec_and_test(&ghes_refcount))
goto unlock;
/*
* Wait for the irq handler being finished.
*/
spin_lock_irqsave(&ghes_lock, flags);
mci = ghes_pvt ? ghes_pvt->mci : NULL;
ghes_pvt = NULL;
spin_unlock_irqrestore(&ghes_lock, flags);
if (!mci)
goto unlock;
mci = edac_mc_del_mc(mci->pdev);
if (mci)
edac_mc_free(mci);
ghes_unregister_report_chain(&ghes_edac_mem_err_nb);
unlock:
mutex_unlock(&ghes_reg_mutex);
}
static int __init ghes_edac_init(void)
{
struct ghes *g, *g_tmp;
ghes_devs = ghes_get_devices();
if (!ghes_devs)
return -ENODEV;
if (list_empty(ghes_devs)) {
pr_info("GHES probing device list is empty");
return -ENODEV;
}
list_for_each_entry_safe(g, g_tmp, ghes_devs, elist) {
ghes_edac_register(g->dev);
}
return 0;
}
module_init(ghes_edac_init);
static void __exit ghes_edac_exit(void)
{
struct ghes *g, *g_tmp;
list_for_each_entry_safe(g, g_tmp, ghes_devs, elist) {
ghes_edac_unregister(g);
}
}
module_exit(ghes_edac_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Output ACPI APEI/GHES BIOS detected errors via EDAC");
| linux-master | drivers/edac/ghes_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Pondicherry2 memory controller.
*
* Copyright (c) 2016, Intel Corporation.
*
* [Derived from sb_edac.c]
*
* Translation of system physical addresses to DIMM addresses
* is a two stage process:
*
* First the Pondicherry 2 memory controller handles slice and channel interleaving
* in "sys2pmi()". This is (almost) completley common between platforms.
*
* Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
* rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_data/x86/p2sb.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include "edac_mc.h"
#include "edac_module.h"
#include "pnd2_edac.h"
#define EDAC_MOD_STR "pnd2_edac"
#define APL_NUM_CHANNELS 4
#define DNV_NUM_CHANNELS 2
#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
enum type {
APL,
DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
};
struct dram_addr {
int chan;
int dimm;
int rank;
int bank;
int row;
int col;
};
struct pnd2_pvt {
int dimm_geom[APL_NUM_CHANNELS];
u64 tolm, tohm;
};
/*
* System address space is divided into multiple regions with
* different interleave rules in each. The as0/as1 regions
* have no interleaving at all. The as2 region is interleaved
* between two channels. The mot region is magic and may overlap
* other regions, with its interleave rules taking precedence.
* Addresses not in any of these regions are interleaved across
* all four channels.
*/
static struct region {
u64 base;
u64 limit;
u8 enabled;
} mot, as0, as1, as2;
static struct dunit_ops {
char *name;
enum type type;
int pmiaddr_shift;
int pmiidx_shift;
int channels;
int dimms_per_channel;
int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
int (*get_registers)(void);
int (*check_ecc)(void);
void (*mk_region)(char *name, struct region *rp, void *asym);
void (*get_dimm_config)(struct mem_ctl_info *mci);
int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
struct dram_addr *daddr, char *msg);
} *ops;
static struct mem_ctl_info *pnd2_mci;
#define PND2_MSG_SIZE 256
/* Debug macros */
#define pnd2_printk(level, fmt, arg...) \
edac_printk(level, "pnd2", fmt, ##arg)
#define pnd2_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
#define SELECTOR_DISABLED (-1)
#define _4GB (1ul << 32)
#define PMI_ADDRESS_WIDTH 31
#define PND_MAX_PHYS_BIT 39
#define APL_ASYMSHIFT 28
#define DNV_ASYMSHIFT 31
#define CH_HASH_MASK_LSB 6
#define SLICE_HASH_MASK_LSB 6
#define MOT_SLC_INTLV_BIT 12
#define LOG2_PMI_ADDR_GRANULARITY 5
#define MOT_SHIFT 24
#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
#define U64_LSHIFT(val, s) ((u64)(val) << (s))
/*
* On Apollo Lake we access memory controller registers via a
* side-band mailbox style interface in a hidden PCI device
* configuration space.
*/
static struct pci_bus *p2sb_bus;
#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
#define P2SB_ADDR_OFF 0xd0
#define P2SB_DATA_OFF 0xd4
#define P2SB_STAT_OFF 0xd8
#define P2SB_ROUT_OFF 0xda
#define P2SB_EADD_OFF 0xdc
#define P2SB_HIDE_OFF 0xe1
#define P2SB_BUSY 1
#define P2SB_READ(size, off, ptr) \
pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
#define P2SB_WRITE(size, off, val) \
pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
static bool p2sb_is_busy(u16 *status)
{
P2SB_READ(word, P2SB_STAT_OFF, status);
return !!(*status & P2SB_BUSY);
}
static int _apl_rd_reg(int port, int off, int op, u32 *data)
{
int retries = 0xff, ret;
u16 status;
u8 hidden;
/* Unhide the P2SB device, if it's hidden */
P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
if (hidden)
P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
if (p2sb_is_busy(&status)) {
ret = -EAGAIN;
goto out;
}
P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
while (p2sb_is_busy(&status)) {
if (retries-- == 0) {
ret = -EBUSY;
goto out;
}
}
P2SB_READ(dword, P2SB_DATA_OFF, data);
ret = (status >> 1) & 0x3;
out:
/* Hide the P2SB device, if it was hidden before */
if (hidden)
P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
return ret;
}
static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
int ret = 0;
edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
switch (sz) {
case 8:
ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
fallthrough;
case 4:
ret |= _apl_rd_reg(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
break;
}
return ret;
}
static u64 get_mem_ctrl_hub_base_addr(void)
{
struct b_cr_mchbar_lo_pci lo;
struct b_cr_mchbar_hi_pci hi;
struct pci_dev *pdev;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
if (pdev) {
pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
pci_dev_put(pdev);
} else {
return 0;
}
if (!lo.enable) {
edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
return 0;
}
return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
}
#define DNV_MCHBAR_SIZE 0x8000
#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
void __iomem *base;
struct resource r;
int ret;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
if (!pdev)
return -ENODEV;
pci_read_config_dword(pdev, off, data);
pci_dev_put(pdev);
} else {
/* MMIO via memory controller hub base address */
if (op == 0 && port == 0x4c) {
memset(&r, 0, sizeof(r));
r.start = get_mem_ctrl_hub_base_addr();
if (!r.start)
return -ENODEV;
r.end = r.start + DNV_MCHBAR_SIZE - 1;
} else {
/* MMIO via sideband register base address */
ret = p2sb_bar(NULL, 0, &r);
if (ret)
return ret;
r.start += (port << 16);
r.end = r.start + DNV_SB_PORT_SIZE - 1;
}
base = ioremap(r.start, resource_size(&r));
if (!base)
return -ENODEV;
if (sz == 8)
*(u64 *)data = readq(base + off);
else
*(u32 *)data = readl(base + off);
iounmap(base);
}
edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
return 0;
}
#define RD_REGP(regp, regname, port) \
ops->rd_reg(port, \
regname##_offset, \
regname##_r_opcode, \
regp, sizeof(struct regname), \
#regname)
#define RD_REG(regp, regname) \
ops->rd_reg(regname ## _port, \
regname##_offset, \
regname##_r_opcode, \
regp, sizeof(struct regname), \
#regname)
static u64 top_lm, top_hm;
static bool two_slices;
static bool two_channels; /* Both PMI channels in one slice enabled */
static u8 sym_chan_mask;
static u8 asym_chan_mask;
static u8 chan_mask;
static int slice_selector = -1;
static int chan_selector = -1;
static u64 slice_hash_mask;
static u64 chan_hash_mask;
static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
{
rp->enabled = 1;
rp->base = base;
rp->limit = limit;
edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
}
static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
{
if (mask == 0) {
pr_info(FW_BUG "MOT mask cannot be zero\n");
return;
}
if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
pr_info(FW_BUG "MOT mask not power of two\n");
return;
}
if (base & ~mask) {
pr_info(FW_BUG "MOT region base/mask alignment error\n");
return;
}
rp->base = base;
rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
rp->enabled = 1;
edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
}
static bool in_region(struct region *rp, u64 addr)
{
if (!rp->enabled)
return false;
return rp->base <= addr && addr <= rp->limit;
}
static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
{
int mask = 0;
if (!p->slice_0_mem_disabled)
mask |= p->sym_slice0_channel_enabled;
if (!p->slice_1_disabled)
mask |= p->sym_slice1_channel_enabled << 2;
if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
mask &= 0x5;
return mask;
}
static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region0_mchbar *as0,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
mask = intlv[as2way->asym_2way_intlv_mode];
if (as0->slice0_asym_enable)
mask |= (1 << as0->slice0_asym_channel_select);
if (as1->slice1_asym_enable)
mask |= (4 << as1->slice1_asym_channel_select);
if (p->slice_0_mem_disabled)
mask &= 0xc;
if (p->slice_1_disabled)
mask &= 0x3;
if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
mask &= 0x5;
return mask;
}
static struct b_cr_tolud_pci tolud;
static struct b_cr_touud_lo_pci touud_lo;
static struct b_cr_touud_hi_pci touud_hi;
static struct b_cr_asym_mem_region0_mchbar asym0;
static struct b_cr_asym_mem_region1_mchbar asym1;
static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
static struct b_cr_mot_out_base_mchbar mot_base;
static struct b_cr_mot_out_mask_mchbar mot_mask;
static struct b_cr_slice_channel_hash chash;
/* Apollo Lake dunit */
/*
* Validated on board with just two DIMMs in the [0] and [2] positions
* in this array. Other port number matches documentation, but caution
* advised.
*/
static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
/* Denverton dunit */
static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
static struct d_cr_dsch dsch;
static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
static struct d_cr_drp drp[DNV_NUM_CHANNELS];
static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
static void apl_mk_region(char *name, struct region *rp, void *asym)
{
struct b_cr_asym_mem_region0_mchbar *a = asym;
mk_region(name, rp,
U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
}
static void dnv_mk_region(char *name, struct region *rp, void *asym)
{
struct b_cr_asym_mem_region_denverton *a = asym;
mk_region(name, rp,
U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
}
static int apl_get_registers(void)
{
int ret = -ENODEV;
int i;
if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
return -ENODEV;
/*
* RD_REGP() will fail for unpopulated or non-existent
* DIMM slots. Return success if we find at least one DIMM.
*/
for (i = 0; i < APL_NUM_CHANNELS; i++)
if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
ret = 0;
return ret;
}
static int dnv_get_registers(void)
{
int i;
if (RD_REG(&dsch, d_cr_dsch))
return -ENODEV;
for (i = 0; i < DNV_NUM_CHANNELS; i++)
if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
return -ENODEV;
return 0;
}
/*
* Read all the h/w config registers once here (they don't
* change at run time. Figure out which address ranges have
* which interleave characteristics.
*/
static int get_registers(void)
{
const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
RD_REG(&chash, b_cr_slice_channel_hash))
return -ENODEV;
if (ops->get_registers())
return -ENODEV;
if (ops->type == DNV) {
/* PMI channel idx (always 0) for asymmetric region */
asym0.slice0_asym_channel_select = 0;
asym1.slice1_asym_channel_select = 0;
/* PMI channel bitmap (always 1) for symmetric region */
chash.sym_slice0_channel_enabled = 0x1;
chash.sym_slice1_channel_enabled = 0x1;
}
if (asym0.slice0_asym_enable)
ops->mk_region("as0", &as0, &asym0);
if (asym1.slice1_asym_enable)
ops->mk_region("as1", &as1, &asym1);
if (asym_2way.asym_2way_interleave_enable) {
mk_region("as2way", &as2,
U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
}
if (mot_base.imr_en) {
mk_region_mask("mot", &mot,
U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
}
top_lm = U64_LSHIFT(tolud.tolud, 20);
top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
two_slices = !chash.slice_1_disabled &&
!chash.slice_0_mem_disabled &&
(chash.sym_slice0_channel_enabled != 0) &&
(chash.sym_slice1_channel_enabled != 0);
two_channels = !chash.ch_1_disabled &&
!chash.enable_pmi_dual_data_mode &&
((chash.sym_slice0_channel_enabled == 3) ||
(chash.sym_slice1_channel_enabled == 3));
sym_chan_mask = gen_sym_mask(&chash);
asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
chan_mask = sym_chan_mask | asym_chan_mask;
if (two_slices && !two_channels) {
if (chash.hvm_mode)
slice_selector = 29;
else
slice_selector = intlv[chash.interleave_mode];
} else if (!two_slices && two_channels) {
if (chash.hvm_mode)
chan_selector = 29;
else
chan_selector = intlv[chash.interleave_mode];
} else if (two_slices && two_channels) {
if (chash.hvm_mode) {
slice_selector = 29;
chan_selector = 30;
} else {
slice_selector = intlv[chash.interleave_mode];
chan_selector = intlv[chash.interleave_mode] + 1;
}
}
if (two_slices) {
if (!chash.hvm_mode)
slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
if (!two_channels)
slice_hash_mask |= BIT_ULL(slice_selector);
}
if (two_channels) {
if (!chash.hvm_mode)
chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
if (!two_slices)
chan_hash_mask |= BIT_ULL(chan_selector);
}
return 0;
}
/* Get a contiguous memory address (remove the MMIO gap) */
static u64 remove_mmio_gap(u64 sys)
{
return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
}
/* Squeeze out one address bit, shift upper part down to fill gap */
static void remove_addr_bit(u64 *addr, int bitidx)
{
u64 mask;
if (bitidx == -1)
return;
mask = (1ull << bitidx) - 1;
*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
}
/* XOR all the bits from addr specified in mask */
static int hash_by_mask(u64 addr, u64 mask)
{
u64 result = addr & mask;
result = (result >> 32) ^ result;
result = (result >> 16) ^ result;
result = (result >> 8) ^ result;
result = (result >> 4) ^ result;
result = (result >> 2) ^ result;
result = (result >> 1) ^ result;
return (int)result & 1;
}
/*
* First stage decode. Take the system address and figure out which
* second stage will deal with it based on interleave modes.
*/
static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
{
u64 contig_addr, contig_base, contig_offset, contig_base_adj;
int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
MOT_CHAN_INTLV_BIT_1SLC_2CH;
int slice_intlv_bit_rm = SELECTOR_DISABLED;
int chan_intlv_bit_rm = SELECTOR_DISABLED;
/* Determine if address is in the MOT region. */
bool mot_hit = in_region(&mot, addr);
/* Calculate the number of symmetric regions enabled. */
int sym_channels = hweight8(sym_chan_mask);
/*
* The amount we need to shift the asym base can be determined by the
* number of enabled symmetric channels.
* NOTE: This can only work because symmetric memory is not supposed
* to do a 3-way interleave.
*/
int sym_chan_shift = sym_channels >> 1;
/* Give up if address is out of range, or in MMIO gap */
if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
(addr >= top_lm && addr < _4GB) || addr >= top_hm) {
snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
return -EINVAL;
}
/* Get a contiguous memory address (remove the MMIO gap) */
contig_addr = remove_mmio_gap(addr);
if (in_region(&as0, addr)) {
*pmiidx = asym0.slice0_asym_channel_select;
contig_base = remove_mmio_gap(as0.base);
contig_offset = contig_addr - contig_base;
contig_base_adj = (contig_base >> sym_chan_shift) *
((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
} else if (in_region(&as1, addr)) {
*pmiidx = 2u + asym1.slice1_asym_channel_select;
contig_base = remove_mmio_gap(as1.base);
contig_offset = contig_addr - contig_base;
contig_base_adj = (contig_base >> sym_chan_shift) *
((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
bool channel1;
mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
hash_by_mask(contig_addr, chan_hash_mask);
*pmiidx |= (u32)channel1;
contig_base = remove_mmio_gap(as2.base);
chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
contig_offset = contig_addr - contig_base;
remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
} else {
/* Otherwise we're in normal, boring symmetric mode. */
*pmiidx = 0u;
if (two_slices) {
bool slice1;
if (mot_hit) {
slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
} else {
slice_intlv_bit_rm = slice_selector;
slice1 = hash_by_mask(addr, slice_hash_mask);
}
*pmiidx = (u32)slice1 << 1;
}
if (two_channels) {
bool channel1;
mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
MOT_CHAN_INTLV_BIT_1SLC_2CH;
if (mot_hit) {
chan_intlv_bit_rm = mot_intlv_bit;
channel1 = (addr >> mot_intlv_bit) & 1;
} else {
chan_intlv_bit_rm = chan_selector;
channel1 = hash_by_mask(contig_addr, chan_hash_mask);
}
*pmiidx |= (u32)channel1;
}
}
/* Remove the chan_selector bit first */
remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
/* Remove the slice bit (we remove it second because it must be lower */
remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
*pmiaddr = contig_addr;
return 0;
}
/* Translate PMI address to memory (rank, row, bank, column) */
#define C(n) (0x10 | (n)) /* column */
#define B(n) (0x20 | (n)) /* bank */
#define R(n) (0x40 | (n)) /* row */
#define RS (0x80) /* rank */
/* addrdec values */
#define AMAP_1KB 0
#define AMAP_2KB 1
#define AMAP_4KB 2
#define AMAP_RSVD 3
/* dden values */
#define DEN_4Gb 0
#define DEN_8Gb 2
/* dwid values */
#define X8 0
#define X16 1
static struct dimm_geometry {
u8 addrdec;
u8 dden;
u8 dwid;
u8 rowbits, colbits;
u16 bits[PMI_ADDRESS_WIDTH];
} dimms[] = {
{
.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
.rowbits = 15, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0, 0, 0, 0
}
},
{
.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
.rowbits = 16, .colbits = 11,
.bits = {
C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
R(14), R(15), 0, 0
}
},
{
.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
.rowbits = 15, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
0, 0, 0, 0
}
},
{
.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
.rowbits = 16, .colbits = 11,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
R(14), R(15), 0, 0
}
},
{
.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
.rowbits = 15, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
0, 0, 0, 0
}
},
{
.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
.rowbits = 16, .colbits = 10,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
R(15), 0, 0, 0
}
},
{
.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
.rowbits = 16, .colbits = 11,
.bits = {
C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
R(14), R(15), 0, 0
}
}
};
static int bank_hash(u64 pmiaddr, int idx, int shft)
{
int bhash = 0;
switch (idx) {
case 0:
bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
break;
case 1:
bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
bhash ^= ((pmiaddr >> 22) & 1) << 1;
break;
case 2:
bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
break;
}
return bhash;
}
static int rank_hash(u64 pmiaddr)
{
return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
}
/* Second stage decode. Compute rank, bank, row & column. */
static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
struct dram_addr *daddr, char *msg)
{
struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
struct pnd2_pvt *pvt = mci->pvt_info;
int g = pvt->dimm_geom[pmiidx];
struct dimm_geometry *d = &dimms[g];
int column = 0, bank = 0, row = 0, rank = 0;
int i, idx, type, skiprs = 0;
for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
int bit = (pmiaddr >> i) & 1;
if (i + skiprs >= PMI_ADDRESS_WIDTH) {
snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
return -EINVAL;
}
type = d->bits[i + skiprs] & ~0xf;
idx = d->bits[i + skiprs] & 0xf;
/*
* On single rank DIMMs ignore the rank select bit
* and shift remainder of "bits[]" down one place.
*/
if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
skiprs = 1;
type = d->bits[i + skiprs] & ~0xf;
idx = d->bits[i + skiprs] & 0xf;
}
switch (type) {
case C(0):
column |= (bit << idx);
break;
case B(0):
bank |= (bit << idx);
if (cr_drp0->bahen)
bank ^= bank_hash(pmiaddr, idx, d->addrdec);
break;
case R(0):
row |= (bit << idx);
break;
case RS:
rank = bit;
if (cr_drp0->rsien)
rank ^= rank_hash(pmiaddr);
break;
default:
if (bit) {
snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
return -EINVAL;
}
goto done;
}
}
done:
daddr->col = column;
daddr->bank = bank;
daddr->row = row;
daddr->rank = rank;
daddr->dimm = 0;
return 0;
}
/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
struct dram_addr *daddr, char *msg)
{
/* Rank 0 or 1 */
daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
/* Rank 2 or 3 */
daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
/*
* Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
* flip them if DIMM1 is larger than DIMM0.
*/
daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
if (dsch.ddr4en)
daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
if (dmap1[pmiidx].bxor) {
if (dsch.ddr4en) {
daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
if (dsch.chan_width == 0)
/* 64/72 bit dram channel width */
daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
else
/* 32/40 bit dram channel width */
daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
} else {
daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
if (dsch.chan_width == 0)
daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
else
daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
}
}
daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
if (dmap4[pmiidx].row14 != 31)
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
if (dmap4[pmiidx].row15 != 31)
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
if (dmap4[pmiidx].row16 != 31)
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
if (dmap4[pmiidx].row17 != 31)
daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
return 0;
}
static int check_channel(int ch)
{
if (drp0[ch].dramtype != 0) {
pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
return 1;
} else if (drp0[ch].eccen == 0) {
pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
return 1;
}
return 0;
}
static int apl_check_ecc_active(void)
{
int i, ret = 0;
/* Check dramtype and ECC mode for each present DIMM */
for (i = 0; i < APL_NUM_CHANNELS; i++)
if (chan_mask & BIT(i))
ret += check_channel(i);
return ret ? -EINVAL : 0;
}
#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
static int check_unit(int ch)
{
struct d_cr_drp *d = &drp[ch];
if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
return 1;
}
return 0;
}
static int dnv_check_ecc_active(void)
{
int i, ret = 0;
for (i = 0; i < DNV_NUM_CHANNELS; i++)
ret += check_unit(i);
return ret ? -EINVAL : 0;
}
static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
struct dram_addr *daddr, char *msg)
{
u64 pmiaddr;
u32 pmiidx;
int ret;
ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
if (ret)
return ret;
pmiaddr >>= ops->pmiaddr_shift;
/* pmi channel idx to dimm channel idx */
pmiidx >>= ops->pmiidx_shift;
daddr->chan = pmiidx;
ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
if (ret)
return ret;
edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
return 0;
}
static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
struct dram_addr *daddr)
{
enum hw_event_mc_err_type tp_event;
char *optype, msg[PND2_MSG_SIZE];
bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
bool overflow = m->status & MCI_STATUS_OVER;
bool uc_err = m->status & MCI_STATUS_UC;
bool recov = m->status & MCI_STATUS_S;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
int rc;
tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
HW_EVENT_ERR_CORRECTED;
/*
* According with Table 15-9 of the Intel Architecture spec vol 3A,
* memory errors should fit in this mask:
* 000f 0000 1mmm cccc (binary)
* where:
* f = Correction Report Filtering Bit. If 1, subsequent errors
* won't be shown
* mmm = error type
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
if (!((errcode & 0xef80) == 0x80)) {
optype = "Can't parse: it is not a mem";
} else {
switch (optypenum) {
case 0:
optype = "generic undef request error";
break;
case 1:
optype = "memory read error";
break;
case 2:
optype = "memory write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "memory scrubbing error";
break;
default:
optype = "reserved";
break;
}
}
/* Only decode errors with an valid address (ADDRV) */
if (!(m->status & MCI_STATUS_ADDRV))
return;
rc = get_memory_error_data(mci, m->addr, daddr, msg);
if (rc)
goto address_error;
snprintf(msg, sizeof(msg),
"%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
edac_dbg(0, "%s\n", msg);
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
return;
address_error:
edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
}
static void apl_get_dimm_config(struct mem_ctl_info *mci)
{
struct pnd2_pvt *pvt = mci->pvt_info;
struct dimm_info *dimm;
struct d_cr_drp0 *d;
u64 capacity;
int i, g;
for (i = 0; i < APL_NUM_CHANNELS; i++) {
if (!(chan_mask & BIT(i)))
continue;
dimm = edac_get_dimm(mci, i, 0, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d\n", i);
continue;
}
d = &drp0[i];
for (g = 0; g < ARRAY_SIZE(dimms); g++)
if (dimms[g].addrdec == d->addrdec &&
dimms[g].dden == d->dden &&
dimms[g].dwid == d->dwid)
break;
if (g == ARRAY_SIZE(dimms)) {
edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
continue;
}
pvt->dimm_geom[i] = g;
capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
(1ul << dimms[g].colbits);
edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
dimm->mtype = MEM_DDR3;
dimm->edac_mode = EDAC_SECDED;
snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
}
}
static const int dnv_dtypes[] = {
DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
};
static void dnv_get_dimm_config(struct mem_ctl_info *mci)
{
int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
struct dimm_info *dimm;
struct d_cr_drp *d;
u64 capacity;
if (dsch.ddr4en) {
memtype = MEM_DDR4;
banks = 16;
colbits = 10;
} else {
memtype = MEM_DDR3;
banks = 8;
}
for (i = 0; i < DNV_NUM_CHANNELS; i++) {
if (dmap4[i].row14 == 31)
rowbits = 14;
else if (dmap4[i].row15 == 31)
rowbits = 15;
else if (dmap4[i].row16 == 31)
rowbits = 16;
else if (dmap4[i].row17 == 31)
rowbits = 17;
else
rowbits = 18;
if (memtype == MEM_DDR3) {
if (dmap1[i].ca11 != 0x3f)
colbits = 12;
else
colbits = 10;
}
d = &drp[i];
/* DIMM0 is present if rank0 and/or rank1 is enabled */
ranks_of_dimm[0] = d->rken0 + d->rken1;
/* DIMM1 is present if rank2 and/or rank3 is enabled */
ranks_of_dimm[1] = d->rken2 + d->rken3;
for (j = 0; j < DNV_MAX_DIMMS; j++) {
if (!ranks_of_dimm[j])
continue;
dimm = edac_get_dimm(mci, i, j, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
continue;
}
capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
dimm->mtype = memtype;
dimm->edac_mode = EDAC_SECDED;
snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
}
}
}
static int pnd2_register_mci(struct mem_ctl_info **ppmci)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct pnd2_pvt *pvt;
int rc;
rc = ops->check_ecc();
if (rc < 0)
return rc;
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = ops->channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = ops->dimms_per_channel;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci)
return -ENOMEM;
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = ops->name;
mci->ctl_name = "Pondicherry2";
/* Get dimm basic config and the memory layout */
ops->get_dimm_config(mci);
if (edac_mc_add_mc(mci)) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
edac_mc_free(mci);
return -EINVAL;
}
*ppmci = mci;
return 0;
}
static void pnd2_unregister_mci(struct mem_ctl_info *mci)
{
if (unlikely(!mci || !mci->pvt_info)) {
pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
}
/* Remove MC sysfs nodes */
edac_mc_del_mc(NULL);
edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
edac_mc_free(mci);
}
/*
* Callback function registered with core kernel mce code.
* Called once for each logged error.
*/
static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
struct dram_addr daddr;
char *type;
mci = pnd2_mci;
if (!mci || (mce->kflags & MCE_HANDLED_CEC))
return NOTIFY_DONE;
/*
* Just let mcelog handle it if the error is
* outside the memory controller. A memory error
* is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
* bit 12 has an special meaning.
*/
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
type = "Event";
pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
pnd2_mce_output_error(mci, mce, &daddr);
/* Advice mcelog that the error were handled */
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_OK;
}
static struct notifier_block pnd2_mce_dec = {
.notifier_call = pnd2_mce_check_error,
.priority = MCE_PRIO_EDAC,
};
#ifdef CONFIG_EDAC_DEBUG
/*
* Write an address to this file to exercise the address decode
* logic in this driver.
*/
static u64 pnd2_fake_addr;
#define PND2_BLOB_SIZE 1024
static char pnd2_result[PND2_BLOB_SIZE];
static struct dentry *pnd2_test;
static struct debugfs_blob_wrapper pnd2_blob = {
.data = pnd2_result,
.size = 0
};
static int debugfs_u64_set(void *data, u64 val)
{
struct dram_addr daddr;
struct mce m;
*(u64 *)data = val;
m.mcgstatus = 0;
/* ADDRV + MemRd + Unknown channel */
m.status = MCI_STATUS_ADDRV + 0x9f;
m.addr = val;
pnd2_mce_output_error(pnd2_mci, &m, &daddr);
snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
"SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
pnd2_blob.size = strlen(pnd2_blob.data);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
static void setup_pnd2_debug(void)
{
pnd2_test = edac_debugfs_create_dir("pnd2_test");
edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
&pnd2_fake_addr, &fops_u64_wo);
debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
}
static void teardown_pnd2_debug(void)
{
debugfs_remove_recursive(pnd2_test);
}
#else
static void setup_pnd2_debug(void) {}
static void teardown_pnd2_debug(void) {}
#endif /* CONFIG_EDAC_DEBUG */
static int pnd2_probe(void)
{
int rc;
edac_dbg(2, "\n");
rc = get_registers();
if (rc)
return rc;
return pnd2_register_mci(&pnd2_mci);
}
static void pnd2_remove(void)
{
edac_dbg(0, "\n");
pnd2_unregister_mci(pnd2_mci);
}
static struct dunit_ops apl_ops = {
.name = "pnd2/apl",
.type = APL,
.pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
.pmiidx_shift = 0,
.channels = APL_NUM_CHANNELS,
.dimms_per_channel = 1,
.rd_reg = apl_rd_reg,
.get_registers = apl_get_registers,
.check_ecc = apl_check_ecc_active,
.mk_region = apl_mk_region,
.get_dimm_config = apl_get_dimm_config,
.pmi2mem = apl_pmi2mem,
};
static struct dunit_ops dnv_ops = {
.name = "pnd2/dnv",
.type = DNV,
.pmiaddr_shift = 0,
.pmiidx_shift = 1,
.channels = DNV_NUM_CHANNELS,
.dimms_per_channel = 2,
.rd_reg = dnv_rd_reg,
.get_registers = dnv_get_registers,
.check_ecc = dnv_check_ecc_active,
.mk_region = dnv_mk_region,
.get_dimm_config = dnv_get_dimm_config,
.pmi2mem = dnv_pmi2mem,
};
static const struct x86_cpu_id pnd2_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
static int __init pnd2_init(void)
{
const struct x86_cpu_id *id;
const char *owner;
int rc;
edac_dbg(2, "\n");
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
ops = (struct dunit_ops *)id->driver_data;
if (ops->type == APL) {
p2sb_bus = pci_find_bus(0, 0);
if (!p2sb_bus)
return -ENODEV;
}
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
rc = pnd2_probe();
if (rc < 0) {
pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
return rc;
}
if (!pnd2_mci)
return -ENODEV;
mce_register_decode_chain(&pnd2_mce_dec);
setup_pnd2_debug();
return 0;
}
static void __exit pnd2_exit(void)
{
edac_dbg(2, "\n");
teardown_pnd2_debug();
mce_unregister_decode_chain(&pnd2_mce_dec);
pnd2_remove();
}
module_init(pnd2_init);
module_exit(pnd2_exit);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tony Luck");
MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
| linux-master | drivers/edac/pnd2_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Texas Instruments DDR3 ECC error correction and detection driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
#include <linux/edac.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include "edac_module.h"
/* EMIF controller registers */
#define EMIF_SDRAM_CONFIG 0x008
#define EMIF_IRQ_STATUS 0x0ac
#define EMIF_IRQ_ENABLE_SET 0x0b4
#define EMIF_ECC_CTRL 0x110
#define EMIF_1B_ECC_ERR_CNT 0x130
#define EMIF_1B_ECC_ERR_THRSH 0x134
#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
/* Bit definitions for EMIF_SDRAM_CONFIG */
#define SDRAM_TYPE_SHIFT 29
#define SDRAM_TYPE_MASK GENMASK(31, 29)
#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
#define SDRAM_K2_NARROW_MODE_SHIFT 12
#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
#define SDRAM_ROWSIZE_SHIFT 7
#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
#define SDRAM_IBANK_SHIFT 4
#define SDRAM_IBANK_MASK GENMASK(6, 4)
#define SDRAM_K2_IBANK_SHIFT 5
#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
#define SDRAM_K2_EBANK_SHIFT 3
#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
#define SDRAM_PAGESIZE_SHIFT 0
#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
#define SDRAM_K2_PAGESIZE_SHIFT 0
#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
/* IRQ bit definitions */
#define EMIF_1B_ECC_ERR BIT(5)
#define EMIF_2B_ECC_ERR BIT(4)
#define EMIF_WR_ECC_ERR BIT(3)
#define EMIF_SYS_ERR BIT(0)
/* Bit 31 enables ECC and 28 enables RMW */
#define ECC_ENABLED (BIT(31) | BIT(28))
#define EDAC_MOD_NAME "ti-emif-edac"
enum {
EMIF_TYPE_DRA7,
EMIF_TYPE_K2
};
struct ti_edac {
void __iomem *reg;
};
static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
{
return readl_relaxed(edac->reg + offset);
}
static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
{
writel_relaxed(val, edac->reg + offset);
}
static irqreturn_t ti_edac_isr(int irq, void *data)
{
struct mem_ctl_info *mci = data;
struct ti_edac *edac = mci->pvt_info;
u32 irq_status;
u32 err_addr;
int err_count;
irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
if (irq_status & EMIF_1B_ECC_ERR) {
err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, -1, 0, 0, 0,
mci->ctl_name, "1B");
}
if (irq_status & EMIF_2B_ECC_ERR) {
err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, -1, 0, 0, 0,
mci->ctl_name, "2B");
}
if (irq_status & EMIF_WR_ECC_ERR)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, -1, 0, 0, 0,
mci->ctl_name, "WR");
ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
return IRQ_HANDLED;
}
static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
{
struct dimm_info *dimm;
struct ti_edac *edac = mci->pvt_info;
int bits;
u32 val;
u32 memsize;
dimm = edac_get_dimm(mci, 0, 0, 0);
val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
if (type == EMIF_TYPE_DRA7) {
bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
if (val & SDRAM_NARROW_MODE_MASK) {
bits++;
dimm->dtype = DEV_X16;
} else {
bits += 2;
dimm->dtype = DEV_X32;
}
} else {
bits = 16;
bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
SDRAM_K2_PAGESIZE_SHIFT) + 8;
bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
SDRAM_K2_NARROW_MODE_SHIFT;
switch (val) {
case 0:
bits += 3;
dimm->dtype = DEV_X64;
break;
case 1:
bits += 2;
dimm->dtype = DEV_X32;
break;
case 2:
bits++;
dimm->dtype = DEV_X16;
break;
}
}
memsize = 1 << bits;
dimm->nr_pages = memsize >> PAGE_SHIFT;
dimm->grain = 4;
if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
dimm->mtype = MEM_DDR2;
else
dimm->mtype = MEM_DDR3;
val = ti_edac_readl(edac, EMIF_ECC_CTRL);
if (val & ECC_ENABLED)
dimm->edac_mode = EDAC_SECDED;
else
dimm->edac_mode = EDAC_NONE;
}
static const struct of_device_id ti_edac_of_match[] = {
{ .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
{ .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
{},
};
MODULE_DEVICE_TABLE(of, ti_edac_of_match);
static int _emif_get_id(struct device_node *node)
{
struct device_node *np;
const __be32 *addrp;
u32 addr, my_addr;
int my_id = 0;
addrp = of_get_address(node, 0, NULL, NULL);
my_addr = (u32)of_translate_address(node, addrp);
for_each_matching_node(np, ti_edac_of_match) {
if (np == node)
continue;
addrp = of_get_address(np, 0, NULL, NULL);
addr = (u32)of_translate_address(np, addrp);
edac_printk(KERN_INFO, EDAC_MOD_NAME,
"addr=%x, my_addr=%x\n",
addr, my_addr);
if (addr < my_addr)
my_id++;
}
return my_id;
}
static int ti_edac_probe(struct platform_device *pdev)
{
int error_irq = 0, ret = -ENODEV;
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *reg;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
const struct of_device_id *id;
struct ti_edac *edac;
int emif_id;
id = of_match_device(ti_edac_of_match, &pdev->dev);
if (!id)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
if (IS_ERR(reg))
return PTR_ERR(reg);
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = 1;
/* Allocate ID number for our EMIF controller */
emif_id = _emif_get_id(pdev->dev.of_node);
if (emif_id < 0)
return -EINVAL;
mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
edac = mci->pvt_info;
edac->reg = reg;
platform_set_drvdata(pdev, mci);
mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_NAME;
mci->ctl_name = id->compatible;
mci->dev_name = dev_name(&pdev->dev);
/* Setup memory layout */
ti_edac_setup_dimm(mci, (u32)(id->data));
/* add EMIF ECC error handler */
error_irq = platform_get_irq(pdev, 0);
if (error_irq < 0) {
ret = error_irq;
goto err;
}
ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
"emif-edac-irq", mci);
if (ret) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"request_irq fail for EMIF EDAC irq\n");
goto err;
}
ret = edac_mc_add_mc(mci);
if (ret) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"Failed to register mci: %d.\n", ret);
goto err;
}
/* Generate an interrupt with each 1b error */
ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
EMIF_1B_ECC_ERR_THRSH);
/* Enable interrupts */
ti_edac_writel(edac,
EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
EMIF_IRQ_ENABLE_SET);
return 0;
err:
edac_mc_free(mci);
return ret;
}
static int ti_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver ti_edac_driver = {
.probe = ti_edac_probe,
.remove = ti_edac_remove,
.driver = {
.name = EDAC_MOD_NAME,
.of_match_table = ti_edac_of_match,
},
};
module_platform_driver(ti_edac_driver);
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/ti_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Pengutronix, Jan Luebbe <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/edac.h>
#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
#include "edac_mc.h"
#include "edac_device.h"
#include "edac_module.h"
/************************ EDAC MC (DDR RAM) ********************************/
#define SDRAM_NUM_CS 4
#define SDRAM_CONFIG_REG 0x0
#define SDRAM_CONFIG_ECC_MASK BIT(18)
#define SDRAM_CONFIG_REGISTERED_MASK BIT(17)
#define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15)
#define SDRAM_ADDR_CTRL_REG 0x10
#define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
#define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
#define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs)
#define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2)
#define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
#define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4)
#define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
#define SDRAM_ERR_DATA_H_REG 0x40
#define SDRAM_ERR_DATA_L_REG 0x44
#define SDRAM_ERR_RECV_ECC_REG 0x48
#define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
#define SDRAM_ERR_CALC_ECC_REG 0x4c
#define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
#define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
#define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
#define SDRAM_ERR_ADDR_REG 0x50
#define SDRAM_ERR_ADDR_BANK_OFFSET 23
#define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
#define SDRAM_ERR_ADDR_COL_OFFSET 8
#define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
#define SDRAM_ERR_ADDR_CS_OFFSET 1
#define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
#define SDRAM_ERR_ADDR_TYPE_MASK BIT(0)
#define SDRAM_ERR_CTRL_REG 0x54
#define SDRAM_ERR_CTRL_THR_OFFSET 16
#define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
#define SDRAM_ERR_CTRL_PROP_MASK BIT(9)
#define SDRAM_ERR_SBE_COUNT_REG 0x58
#define SDRAM_ERR_DBE_COUNT_REG 0x5c
#define SDRAM_ERR_CAUSE_ERR_REG 0xd0
#define SDRAM_ERR_CAUSE_MSG_REG 0xd8
#define SDRAM_ERR_CAUSE_DBE_MASK BIT(1)
#define SDRAM_ERR_CAUSE_SBE_MASK BIT(0)
#define SDRAM_RANK_CTRL_REG 0x1e0
#define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
struct axp_mc_drvdata {
void __iomem *base;
/* width in bytes */
unsigned int width;
/* bank interleaving */
bool cs_addr_sel[SDRAM_NUM_CS];
char msg[128];
};
/* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
uint8_t cs, uint8_t bank, uint16_t row,
uint16_t col)
{
if (drvdata->width == 8) {
/* 64 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xfff8) << 16) |
((bank & 0x7) << 16) |
((row & 0x7) << 13) |
((col & 0x3ff) << 3));
else
return (((row & 0xffff << 16) |
((bank & 0x7) << 13) |
((col & 0x3ff)) << 3));
} else if (drvdata->width == 4) {
/* 32 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xfff0) << 15) |
((bank & 0x7) << 16) |
((row & 0xf) << 12) |
((col & 0x3ff) << 2));
else
return (((row & 0xffff << 15) |
((bank & 0x7) << 12) |
((col & 0x3ff)) << 2));
} else {
/* 16 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xffe0) << 14) |
((bank & 0x7) << 16) |
((row & 0x1f) << 11) |
((col & 0x3ff) << 1));
else
return (((row & 0xffff << 14) |
((bank & 0x7) << 11) |
((col & 0x3ff)) << 1));
}
}
static void axp_mc_check(struct mem_ctl_info *mci)
{
struct axp_mc_drvdata *drvdata = mci->pvt_info;
uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
uint32_t row_val, col_val, bank_val, addr_val;
uint8_t syndrome_val, cs_val;
char *msg = drvdata->msg;
data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear cause registers */
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear error counter registers */
if (cnt_sbe)
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
if (cnt_dbe)
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
if (!cnt_sbe && !cnt_dbe)
return;
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
if (cnt_sbe)
cnt_sbe--;
else
dev_warn(mci->pdev, "inconsistent SBE count detected\n");
} else {
if (cnt_dbe)
cnt_dbe--;
else
dev_warn(mci->pdev, "inconsistent DBE count detected\n");
}
/* report earlier errors */
if (cnt_sbe)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
cnt_sbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */
mci->ctl_name,
"details unavailable (multiple errors)");
if (cnt_dbe)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
cnt_dbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */
mci->ctl_name,
"details unavailable (multiple errors)");
/* report details for most recent error */
cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
col_val);
msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */
msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1, /* error count */
addr_val >> PAGE_SHIFT,
addr_val & ~PAGE_MASK,
syndrome_val,
cs_val, -1, -1, /* top, mid, low layer */
mci->ctl_name, drvdata->msg);
} else {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1, /* error count */
addr_val >> PAGE_SHIFT,
addr_val & ~PAGE_MASK,
syndrome_val,
cs_val, -1, -1, /* top, mid, low layer */
mci->ctl_name, drvdata->msg);
}
}
static void axp_mc_read_config(struct mem_ctl_info *mci)
{
struct axp_mc_drvdata *drvdata = mci->pvt_info;
uint32_t config, addr_ctrl, rank_ctrl;
unsigned int i, cs_struct, cs_size;
struct dimm_info *dimm;
config = readl(drvdata->base + SDRAM_CONFIG_REG);
if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
/* 64 bit */
drvdata->width = 8;
else
/* 32 bit */
drvdata->width = 4;
addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
for (i = 0; i < SDRAM_NUM_CS; i++) {
dimm = mci->dimms[i];
if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
continue;
drvdata->cs_addr_sel[i] =
!!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
switch (cs_size) {
case 0: /* 2GBit */
dimm->nr_pages = 524288;
break;
case 1: /* 256MBit */
dimm->nr_pages = 65536;
break;
case 2: /* 512MBit */
dimm->nr_pages = 131072;
break;
case 3: /* 1GBit */
dimm->nr_pages = 262144;
break;
case 4: /* 4GBit */
dimm->nr_pages = 1048576;
break;
case 5: /* 8GBit */
dimm->nr_pages = 2097152;
break;
}
dimm->grain = 8;
dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
MEM_RDDR3 : MEM_DDR3;
dimm->edac_mode = EDAC_SECDED;
}
}
static const struct of_device_id axp_mc_of_match[] = {
{.compatible = "marvell,armada-xp-sdram-controller",},
{},
};
MODULE_DEVICE_TABLE(of, axp_mc_of_match);
static int axp_mc_probe(struct platform_device *pdev)
{
struct axp_mc_drvdata *drvdata;
struct edac_mc_layer layers[1];
const struct of_device_id *id;
struct mem_ctl_info *mci;
void __iomem *base;
uint32_t config;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
}
config = readl(base + SDRAM_CONFIG_REG);
if (!(config & SDRAM_CONFIG_ECC_MASK)) {
dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
return -EINVAL;
}
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = SDRAM_NUM_CS;
layers[0].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
if (!mci)
return -ENOMEM;
drvdata = mci->pvt_info;
drvdata->base = base;
mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
id = of_match_device(axp_mc_of_match, &pdev->dev);
mci->edac_check = axp_mc_check;
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = pdev->dev.driver->name;
mci->ctl_name = id ? id->compatible : "unknown";
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_NONE;
axp_mc_read_config(mci);
/* These SoCs have a reduced width bus */
if (of_machine_is_compatible("marvell,armada380") ||
of_machine_is_compatible("marvell,armadaxp-98dx3236"))
drvdata->width /= 2;
/* configure SBE threshold */
/* it seems that SBEs are not captured otherwise */
writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
/* clear cause registers */
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear counter registers */
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
if (edac_mc_add_mc(mci)) {
edac_mc_free(mci);
return -EINVAL;
}
edac_op_state = EDAC_OPSTATE_POLL;
return 0;
}
static int axp_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
.remove = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
},
};
/************************ EDAC Device (L2 Cache) ***************************/
struct aurora_l2_drvdata {
void __iomem *base;
char msg[128];
/* error injection via debugfs */
uint32_t inject_addr;
uint32_t inject_mask;
uint8_t inject_ctl;
struct dentry *debugfs;
};
#ifdef CONFIG_EDAC_DEBUG
static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
{
drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
}
#endif
static void aurora_l2_check(struct edac_device_ctl_info *dci)
{
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
unsigned int cnt_ce, cnt_ue;
char *msg = drvdata->msg;
size_t size = sizeof(drvdata->msg);
size_t len = 0;
cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
/* clear error counter registers */
if (cnt_ce || cnt_ue)
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
goto clear_remaining;
src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
if (src <= 3)
len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
else
len += scnprintf(msg+len, size-len, "src=IO ");
txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
switch (txn) {
case 0:
len += scnprintf(msg+len, size-len, "txn=Data-Read ");
break;
case 1:
len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
break;
case 2:
len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
break;
case 3:
len += scnprintf(msg+len, size-len, "txn=Eviction ");
break;
case 4:
len += scnprintf(msg+len, size-len,
"txn=Read-Modify-Write ");
break;
}
err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
switch (err) {
case 0:
len += scnprintf(msg+len, size-len, "err=CorrECC ");
break;
case 1:
len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
break;
case 2:
len += scnprintf(msg+len, size-len, "err=TagParity ");
break;
}
len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
/* clear error capture registers */
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
if (err) {
/* UnCorrECC or TagParity */
if (cnt_ue)
cnt_ue--;
edac_device_handle_ue(dci, 0, 0, drvdata->msg);
} else {
if (cnt_ce)
cnt_ce--;
edac_device_handle_ce(dci, 0, 0, drvdata->msg);
}
clear_remaining:
/* report remaining errors */
while (cnt_ue--)
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
while (cnt_ce--)
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
}
static void aurora_l2_poll(struct edac_device_ctl_info *dci)
{
#ifdef CONFIG_EDAC_DEBUG
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
#endif
aurora_l2_check(dci);
#ifdef CONFIG_EDAC_DEBUG
aurora_l2_inject(drvdata);
#endif
}
static const struct of_device_id aurora_l2_of_match[] = {
{.compatible = "marvell,aurora-system-cache",},
{},
};
MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
static int aurora_l2_probe(struct platform_device *pdev)
{
struct aurora_l2_drvdata *drvdata;
struct edac_device_ctl_info *dci;
const struct of_device_id *id;
uint32_t l2x0_aux_ctrl;
void __iomem *base;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
}
l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
dev_warn(&pdev->dev, "tag parity is not enabled\n");
if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
dev_warn(&pdev->dev, "data ECC is not enabled\n");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
"cpu", 1, "L", 1, 2, NULL, 0, 0);
if (!dci)
return -ENOMEM;
drvdata = dci->pvt_info;
drvdata->base = base;
dci->dev = &pdev->dev;
platform_set_drvdata(pdev, dci);
id = of_match_device(aurora_l2_of_match, &pdev->dev);
dci->edac_check = aurora_l2_poll;
dci->mod_name = pdev->dev.driver->name;
dci->ctl_name = id ? id->compatible : "unknown";
dci->dev_name = dev_name(&pdev->dev);
/* clear registers */
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
if (edac_device_add_device(dci)) {
edac_device_free_ctl_info(dci);
return -EINVAL;
}
#ifdef CONFIG_EDAC_DEBUG
drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
if (drvdata->debugfs) {
edac_debugfs_create_x32("inject_addr", 0644,
drvdata->debugfs,
&drvdata->inject_addr);
edac_debugfs_create_x32("inject_mask", 0644,
drvdata->debugfs,
&drvdata->inject_mask);
edac_debugfs_create_x8("inject_ctl", 0644,
drvdata->debugfs, &drvdata->inject_ctl);
}
#endif
return 0;
}
static int aurora_l2_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
#ifdef CONFIG_EDAC_DEBUG
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
edac_debugfs_remove_recursive(drvdata->debugfs);
#endif
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
.remove = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
},
};
/************************ Driver registration ******************************/
static struct platform_driver * const drivers[] = {
&axp_mc_driver,
&aurora_l2_driver,
};
static int __init armada_xp_edac_init(void)
{
int res;
if (ghes_get_devices())
return -EBUSY;
/* only polling is supported */
edac_op_state = EDAC_OPSTATE_POLL;
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
pr_warn("Armada XP EDAC drivers fail to register\n");
return 0;
}
module_init(armada_xp_edac_init);
static void __exit armada_xp_edac_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(armada_xp_edac_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pengutronix");
MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
| linux-master | drivers/edac/armada_xp_edac.c |
/*
* Cell MIC driver for ECC counting
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <[email protected]>
*
* This file may be distributed under the terms of the
* GNU General Public License.
*/
#undef DEBUG
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
#include "edac_module.h"
struct cell_edac_priv
{
struct cbe_mic_tm_regs __iomem *regs;
int node;
int chanmask;
#ifdef DEBUG
u64 prev_fir;
#endif
};
static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
{
struct cell_edac_priv *priv = mci->pvt_info;
struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset, syndrome;
dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
priv->node, chan, ar);
/* Address decoding is likely a bit bogus, to dbl check */
address = (ar & 0xffffffffe0000000ul) >> 29;
if (priv->chanmask == 0x3)
address = (address << 1) | chan;
pfn = address >> PAGE_SHIFT;
offset = address & ~PAGE_MASK;
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
csrow->first_page + pfn, offset, syndrome,
0, chan, -1, "", "");
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
{
struct cell_edac_priv *priv = mci->pvt_info;
struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset;
dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
priv->node, chan, ar);
/* Address decoding is likely a bit bogus, to dbl check */
address = (ar & 0xffffffffe0000000ul) >> 29;
if (priv->chanmask == 0x3)
address = (address << 1) | chan;
pfn = address >> PAGE_SHIFT;
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
csrow->first_page + pfn, offset, 0,
0, chan, -1, "", "");
}
static void cell_edac_check(struct mem_ctl_info *mci)
{
struct cell_edac_priv *priv = mci->pvt_info;
u64 fir, addreg, clear = 0;
fir = in_be64(&priv->regs->mic_fir);
#ifdef DEBUG
if (fir != priv->prev_fir) {
dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
priv->prev_fir = fir;
}
#endif
if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
cell_edac_count_ce(mci, 0, addreg);
}
if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
cell_edac_count_ce(mci, 1, addreg);
}
if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
cell_edac_count_ue(mci, 0, addreg);
}
if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
cell_edac_count_ue(mci, 1, addreg);
}
/* The procedure for clearing FIR bits is a bit ... weird */
if (clear) {
fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
fir |= CBE_MIC_FIR_ECC_RESET_MASK;
fir &= ~clear;
out_be64(&priv->regs->mic_fir, fir);
(void)in_be64(&priv->regs->mic_fir);
mb(); /* sync up */
#ifdef DEBUG
fir = in_be64(&priv->regs->mic_fir);
dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
#endif
}
}
static void cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
int j;
u32 nr_pages;
for_each_node_by_name(np, "memory") {
struct resource r;
/* We "know" that the Cell firmware only creates one entry
* in the "memory" nodes. If that changes, this code will
* need to be adapted.
*/
if (of_address_to_resource(np, 0, &r))
continue;
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
nr_pages = resource_size(&r) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + nr_pages - 1;
for (j = 0; j < csrow->nr_channels; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = MEM_XDR;
dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = nr_pages / csrow->nr_channels;
}
dev_dbg(mci->pdev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
csrow->first_page, nr_pages);
break;
}
of_node_put(np);
}
static int cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct cell_edac_priv *priv;
u64 reg;
int rc, chanmask, num_chans;
regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
if (regs == NULL)
return -ENODEV;
edac_op_state = EDAC_OPSTATE_POLL;
/* Get channel population */
reg = in_be64(®s->mic_mnt_cfg);
dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
chanmask = 0;
if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
chanmask |= 0x1;
if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
chanmask |= 0x2;
if (chanmask == 0) {
dev_warn(&pdev->dev,
"Yuck ! No channel populated ? Aborting !\n");
return -ENODEV;
}
dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
in_be64(®s->mic_fir));
/* Allocate & init EDAC MC data structure */
num_chans = chanmask == 3 ? 2 : 1;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = num_chans;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
sizeof(struct cell_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
priv->regs = regs;
priv->node = pdev->id;
priv->chanmask = chanmask;
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_XDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->mod_name = "cell_edac";
mci->ctl_name = "MIC";
mci->dev_name = dev_name(&pdev->dev);
mci->edac_check = cell_edac_check;
cell_edac_init_csrows(mci);
/* Register with EDAC core */
rc = edac_mc_add_mc(mci);
if (rc) {
dev_err(&pdev->dev, "failed to register with EDAC core\n");
edac_mc_free(mci);
return rc;
}
return 0;
}
static int cell_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
return 0;
}
static struct platform_driver cell_edac_driver = {
.driver = {
.name = "cbe-mic",
},
.probe = cell_edac_probe,
.remove = cell_edac_remove,
};
static int __init cell_edac_init(void)
{
/* Sanity check registers data structure */
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_df_ecc_address_0) != 0xf8);
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_df_ecc_address_1) != 0x1b8);
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_df_config) != 0x218);
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_fir) != 0x230);
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_mnt_cfg) != 0x210);
BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
mic_exc) != 0x208);
return platform_driver_register(&cell_edac_driver);
}
static void __exit cell_edac_exit(void)
{
platform_driver_unregister(&cell_edac_driver);
}
module_init(cell_edac_init);
module_exit(cell_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>");
MODULE_DESCRIPTION("ECC counting for Cell MIC");
| linux-master | drivers/edac/cell_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/cpu.h>
#include "mce_amd.h"
static struct amd_decoder_ops fam_ops;
static u8 xec_mask = 0xf;
static void (*decode_dram_ecc)(int node_id, struct mce *m);
void amd_register_ecc_decoder(void (*f)(int, struct mce *))
{
decode_dram_ecc = f;
}
EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
{
if (decode_dram_ecc) {
WARN_ON(decode_dram_ecc != f);
decode_dram_ecc = NULL;
}
}
EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
/*
* string representation for the different MCA reported error types, see F3x48
* or MSR0000_0411.
*/
/* transaction type */
static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
/* cache level */
static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
/* memory transaction type */
static const char * const rrrr_msgs[] = {
"GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
};
/* participating processor */
const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
EXPORT_SYMBOL_GPL(pp_msgs);
/* request timeout */
static const char * const to_msgs[] = { "no timeout", "timed out" };
/* memory or i/o */
static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
/* internal error type */
static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
static const char * const f15h_mc1_mce_desc[] = {
"UC during a demand linefill from L2",
"Parity error during data load from IC",
"Parity error for IC valid bit",
"Main tag parity error",
"Parity error in prediction queue",
"PFB data/address parity error",
"Parity error in the branch status reg",
"PFB promotion address error",
"Tag error during probe/victimization",
"Parity error for IC probe tag valid bit",
"PFB non-cacheable bit parity error",
"PFB valid bit parity error", /* xec = 0xd */
"Microcode Patch Buffer", /* xec = 010 */
"uop queue",
"insn buffer",
"predecode buffer",
"fetch address FIFO",
"dispatch uop queue"
};
static const char * const f15h_mc2_mce_desc[] = {
"Fill ECC error on data fills", /* xec = 0x4 */
"Fill parity error on insn fills",
"Prefetcher request FIFO parity error",
"PRQ address parity error",
"PRQ data parity error",
"WCC Tag ECC error",
"WCC Data ECC error",
"WCB Data parity error",
"VB Data ECC or parity error",
"L2 Tag ECC error", /* xec = 0x10 */
"Hard L2 Tag ECC error",
"Multiple hits on L2 tag",
"XAB parity error",
"PRB address parity error"
};
static const char * const mc4_mce_desc[] = {
"DRAM ECC error detected on the NB",
"CRC error detected on HT link",
"Link-defined sync error packets detected on HT link",
"HT Master abort",
"HT Target abort",
"Invalid GART PTE entry during GART table walk",
"Unsupported atomic RMW received from an IO link",
"Watchdog timeout due to lack of progress",
"DRAM ECC error detected on the NB",
"SVM DMA Exclusion Vector error",
"HT data error detected on link",
"Protocol error (link, L3, probe filter)",
"NB internal arrays parity error",
"DRAM addr/ctl signals parity error",
"IO link transmission error",
"L3 data cache ECC error", /* xec = 0x1c */
"L3 cache tag error",
"L3 LRU parity bits error",
"ECC Error in the Probe Filter directory"
};
static const char * const mc5_mce_desc[] = {
"CPU Watchdog timer expire",
"Wakeup array dest tag",
"AG payload array",
"EX payload array",
"IDRF array",
"Retire dispatch queue",
"Mapper checkpoint array",
"Physical register file EX0 port",
"Physical register file EX1 port",
"Physical register file AG0 port",
"Physical register file AG1 port",
"Flag register file",
"DE error occurred",
"Retire status queue"
};
static const char * const mc6_mce_desc[] = {
"Hardware Assertion",
"Free List",
"Physical Register File",
"Retire Queue",
"Scheduler table",
"Status Register File",
};
/* Scalable MCA error strings */
static const char * const smca_ls_mce_desc[] = {
"Load queue parity error",
"Store queue parity error",
"Miss address buffer payload parity error",
"Level 1 TLB parity error",
"DC Tag error type 5",
"DC Tag error type 6",
"DC Tag error type 1",
"Internal error type 1",
"Internal error type 2",
"System Read Data Error Thread 0",
"System Read Data Error Thread 1",
"DC Tag error type 2",
"DC Data error type 1 and poison consumption",
"DC Data error type 2",
"DC Data error type 3",
"DC Tag error type 4",
"Level 2 TLB parity error",
"PDC parity error",
"DC Tag error type 3",
"DC Tag error type 5",
"L2 Fill Data error",
};
static const char * const smca_ls2_mce_desc[] = {
"An ECC error was detected on a data cache read by a probe or victimization",
"An ECC error or L2 poison was detected on a data cache read by a load",
"An ECC error was detected on a data cache read-modify-write by a store",
"An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
"An ECC error or poison bit mismatch was detected on a tag read by a load",
"An ECC error or poison bit mismatch was detected on a tag read by a store",
"An ECC error was detected on an EMEM read by a load",
"An ECC error was detected on an EMEM read-modify-write by a store",
"A parity error was detected in an L1 TLB entry by any access",
"A parity error was detected in an L2 TLB entry by any access",
"A parity error was detected in a PWC entry by any access",
"A parity error was detected in an STQ entry by any access",
"A parity error was detected in an LDQ entry by any access",
"A parity error was detected in a MAB entry by any access",
"A parity error was detected in an SCB entry state field by any access",
"A parity error was detected in an SCB entry address field by any access",
"A parity error was detected in an SCB entry data field by any access",
"A parity error was detected in a WCB entry by any access",
"A poisoned line was detected in an SCB entry by any access",
"A SystemReadDataError error was reported on read data returned from L2 for a load",
"A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
"A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
"A hardware assertion error was reported",
"A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
};
static const char * const smca_if_mce_desc[] = {
"Op Cache Microtag Probe Port Parity Error",
"IC Microtag or Full Tag Multi-hit Error",
"IC Full Tag Parity Error",
"IC Data Array Parity Error",
"Decoupling Queue PhysAddr Parity Error",
"L0 ITLB Parity Error",
"L1 ITLB Parity Error",
"L2 ITLB Parity Error",
"BPQ Thread 0 Snoop Parity Error",
"BPQ Thread 1 Snoop Parity Error",
"L1 BTB Multi-Match Error",
"L2 BTB Multi-Match Error",
"L2 Cache Response Poison Error",
"System Read Data Error",
"Hardware Assertion Error",
"L1-TLB Multi-Hit",
"L2-TLB Multi-Hit",
"BSR Parity Error",
"CT MCE",
};
static const char * const smca_l2_mce_desc[] = {
"L2M Tag Multiple-Way-Hit error",
"L2M Tag or State Array ECC Error",
"L2M Data Array ECC Error",
"Hardware Assert Error",
};
static const char * const smca_de_mce_desc[] = {
"Micro-op cache tag parity error",
"Micro-op cache data parity error",
"Instruction buffer parity error",
"Micro-op queue parity error",
"Instruction dispatch queue parity error",
"Fetch address FIFO parity error",
"Patch RAM data parity error",
"Patch RAM sequencer parity error",
"Micro-op buffer parity error",
"Hardware Assertion MCA Error",
};
static const char * const smca_ex_mce_desc[] = {
"Watchdog Timeout error",
"Physical register file parity error",
"Flag register file parity error",
"Immediate displacement register file parity error",
"Address generator payload parity error",
"EX payload parity error",
"Checkpoint queue parity error",
"Retire dispatch queue parity error",
"Retire status queue parity error",
"Scheduling queue parity error",
"Branch buffer queue parity error",
"Hardware Assertion error",
"Spec Map parity error",
"Retire Map parity error",
};
static const char * const smca_fp_mce_desc[] = {
"Physical register file (PRF) parity error",
"Freelist (FL) parity error",
"Schedule queue parity error",
"NSQ parity error",
"Retire queue (RQ) parity error",
"Status register file (SRF) parity error",
"Hardware assertion",
};
static const char * const smca_l3_mce_desc[] = {
"Shadow Tag Macro ECC Error",
"Shadow Tag Macro Multi-way-hit Error",
"L3M Tag ECC Error",
"L3M Tag Multi-way-hit Error",
"L3M Data ECC Error",
"SDP Parity Error or SystemReadDataError from XI",
"L3 Victim Queue Parity Error",
"L3 Hardware Assertion",
};
static const char * const smca_cs_mce_desc[] = {
"Illegal Request",
"Address Violation",
"Security Violation",
"Illegal Response",
"Unexpected Response",
"Request or Probe Parity Error",
"Read Response Parity Error",
"Atomic Request Parity Error",
"Probe Filter ECC Error",
};
static const char * const smca_cs2_mce_desc[] = {
"Illegal Request",
"Address Violation",
"Security Violation",
"Illegal Response",
"Unexpected Response",
"Request or Probe Parity Error",
"Read Response Parity Error",
"Atomic Request Parity Error",
"SDP read response had no match in the CS queue",
"Probe Filter Protocol Error",
"Probe Filter ECC Error",
"SDP read response had an unexpected RETRY error",
"Counter overflow error",
"Counter underflow error",
};
static const char * const smca_pie_mce_desc[] = {
"Hardware Assert",
"Register security violation",
"Link Error",
"Poison data consumption",
"A deferred error was detected in the DF"
};
static const char * const smca_umc_mce_desc[] = {
"DRAM ECC error",
"Data poison error",
"SDP parity error",
"Advanced peripheral bus error",
"Address/Command parity error",
"Write data CRC error",
"DCQ SRAM ECC error",
"AES SRAM ECC error",
};
static const char * const smca_umc2_mce_desc[] = {
"DRAM ECC error",
"Data poison error",
"SDP parity error",
"Reserved",
"Address/Command parity error",
"Write data parity error",
"DCQ SRAM ECC error",
"Reserved",
"Read data parity error",
"Rdb SRAM ECC error",
"RdRsp SRAM ECC error",
"LM32 MP errors",
};
static const char * const smca_pb_mce_desc[] = {
"An ECC error in the Parameter Block RAM array",
};
static const char * const smca_psp_mce_desc[] = {
"An ECC or parity error in a PSP RAM instance",
};
static const char * const smca_psp2_mce_desc[] = {
"High SRAM ECC or parity error",
"Low SRAM ECC or parity error",
"Instruction Cache Bank 0 ECC or parity error",
"Instruction Cache Bank 1 ECC or parity error",
"Instruction Tag Ram 0 parity error",
"Instruction Tag Ram 1 parity error",
"Data Cache Bank 0 ECC or parity error",
"Data Cache Bank 1 ECC or parity error",
"Data Cache Bank 2 ECC or parity error",
"Data Cache Bank 3 ECC or parity error",
"Data Tag Bank 0 parity error",
"Data Tag Bank 1 parity error",
"Data Tag Bank 2 parity error",
"Data Tag Bank 3 parity error",
"Dirty Data Ram parity error",
"TLB Bank 0 parity error",
"TLB Bank 1 parity error",
"System Hub Read Buffer ECC or parity error",
};
static const char * const smca_smu_mce_desc[] = {
"An ECC or parity error in an SMU RAM instance",
};
static const char * const smca_smu2_mce_desc[] = {
"High SRAM ECC or parity error",
"Low SRAM ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"System Hub Read Buffer ECC or parity error",
"PHY RAM ECC error",
};
static const char * const smca_mp5_mce_desc[] = {
"High SRAM ECC or parity error",
"Low SRAM ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
};
static const char * const smca_mpdma_mce_desc[] = {
"Main SRAM [31:0] bank ECC or parity error",
"Main SRAM [63:32] bank ECC or parity error",
"Main SRAM [95:64] bank ECC or parity error",
"Main SRAM [127:96] bank ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"System Hub Read Buffer ECC or parity error",
"MPDMA TVF DVSEC Memory ECC or parity error",
"MPDMA TVF MMIO Mailbox0 ECC or parity error",
"MPDMA TVF MMIO Mailbox1 ECC or parity error",
"MPDMA TVF Doorbell Memory ECC or parity error",
"MPDMA TVF SDP Slave Memory 0 ECC or parity error",
"MPDMA TVF SDP Slave Memory 1 ECC or parity error",
"MPDMA TVF SDP Slave Memory 2 ECC or parity error",
"MPDMA TVF SDP Master Memory 0 ECC or parity error",
"MPDMA TVF SDP Master Memory 1 ECC or parity error",
"MPDMA TVF SDP Master Memory 2 ECC or parity error",
"MPDMA TVF SDP Master Memory 3 ECC or parity error",
"MPDMA TVF SDP Master Memory 4 ECC or parity error",
"MPDMA TVF SDP Master Memory 5 ECC or parity error",
"MPDMA TVF SDP Master Memory 6 ECC or parity error",
"MPDMA PTE Command FIFO ECC or parity error",
"MPDMA PTE Hub Data FIFO ECC or parity error",
"MPDMA PTE Internal Data FIFO ECC or parity error",
"MPDMA PTE Command Memory DMA ECC or parity error",
"MPDMA PTE Command Memory Internal ECC or parity error",
"MPDMA PTE DMA Completion FIFO ECC or parity error",
"MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
"MPDMA PTE Descriptor Completion FIFO ECC or parity error",
"MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
"MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
"SDP Watchdog Timer expired",
};
static const char * const smca_nbio_mce_desc[] = {
"ECC or Parity error",
"PCIE error",
"SDP ErrEvent error",
"SDP Egress Poison Error",
"IOHC Internal Poison Error",
};
static const char * const smca_pcie_mce_desc[] = {
"CCIX PER Message logging",
"CCIX Read Response with Status: Non-Data Error",
"CCIX Write Response with Status: Non-Data Error",
"CCIX Read Response with Status: Data Error",
"CCIX Non-okay write response with data error",
};
static const char * const smca_pcie2_mce_desc[] = {
"SDP Parity Error logging",
};
static const char * const smca_xgmipcs_mce_desc[] = {
"Data Loss Error",
"Training Error",
"Flow Control Acknowledge Error",
"Rx Fifo Underflow Error",
"Rx Fifo Overflow Error",
"CRC Error",
"BER Exceeded Error",
"Tx Vcid Data Error",
"Replay Buffer Parity Error",
"Data Parity Error",
"Replay Fifo Overflow Error",
"Replay Fifo Underflow Error",
"Elastic Fifo Overflow Error",
"Deskew Error",
"Flow Control CRC Error",
"Data Startup Limit Error",
"FC Init Timeout Error",
"Recovery Timeout Error",
"Ready Serial Timeout Error",
"Ready Serial Attempt Error",
"Recovery Attempt Error",
"Recovery Relock Attempt Error",
"Replay Attempt Error",
"Sync Header Error",
"Tx Replay Timeout Error",
"Rx Replay Timeout Error",
"LinkSub Tx Timeout Error",
"LinkSub Rx Timeout Error",
"Rx CMD Packet Error",
};
static const char * const smca_xgmiphy_mce_desc[] = {
"RAM ECC Error",
"ARC instruction buffer parity error",
"ARC data buffer parity error",
"PHY APB error",
};
static const char * const smca_nbif_mce_desc[] = {
"Timeout error from GMI",
"SRAM ECC error",
"NTB Error Event",
"SDP Parity error",
};
static const char * const smca_sata_mce_desc[] = {
"Parity error for port 0",
"Parity error for port 1",
"Parity error for port 2",
"Parity error for port 3",
"Parity error for port 4",
"Parity error for port 5",
"Parity error for port 6",
"Parity error for port 7",
};
static const char * const smca_usb_mce_desc[] = {
"Parity error or ECC error for S0 RAM0",
"Parity error or ECC error for S0 RAM1",
"Parity error or ECC error for S0 RAM2",
"Parity error for PHY RAM0",
"Parity error for PHY RAM1",
"AXI Slave Response error",
};
static const char * const smca_gmipcs_mce_desc[] = {
"Data Loss Error",
"Training Error",
"Replay Parity Error",
"Rx Fifo Underflow Error",
"Rx Fifo Overflow Error",
"CRC Error",
"BER Exceeded Error",
"Tx Fifo Underflow Error",
"Replay Buffer Parity Error",
"Tx Overflow Error",
"Replay Fifo Overflow Error",
"Replay Fifo Underflow Error",
"Elastic Fifo Overflow Error",
"Deskew Error",
"Offline Error",
"Data Startup Limit Error",
"FC Init Timeout Error",
"Recovery Timeout Error",
"Ready Serial Timeout Error",
"Ready Serial Attempt Error",
"Recovery Attempt Error",
"Recovery Relock Attempt Error",
"Deskew Abort Error",
"Rx Buffer Error",
"Rx LFDS Fifo Overflow Error",
"Rx LFDS Fifo Underflow Error",
"LinkSub Tx Timeout Error",
"LinkSub Rx Timeout Error",
"Rx CMD Packet Error",
"LFDS Training Timeout Error",
"LFDS FC Init Timeout Error",
"Data Loss Error",
};
struct smca_mce_desc {
const char * const *descs;
unsigned int num_descs;
};
static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
[SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
[SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
[SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
[SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
[SMCA_EX] = { smca_ex_mce_desc, ARRAY_SIZE(smca_ex_mce_desc) },
[SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) },
[SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) },
[SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) },
[SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
[SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
[SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
[SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
[SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
[SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
[SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
[SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
[SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
[SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
[SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
[SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
/* NBIF and SHUB have the same error descriptions, for now. */
[SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
[SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
[SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
[SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
[SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
/* All the PHY bank types have the same error descriptions, for now. */
[SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
[SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
[SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
};
static bool f12h_mc0_mce(u16 ec, u8 xec)
{
bool ret = false;
if (MEM_ERROR(ec)) {
u8 ll = LL(ec);
ret = true;
if (ll == LL_L2)
pr_cont("during L1 linefill from L2.\n");
else if (ll == LL_L1)
pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
else
ret = false;
}
return ret;
}
static bool f10h_mc0_mce(u16 ec, u8 xec)
{
if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
pr_cont("during data scrub.\n");
return true;
}
return f12h_mc0_mce(ec, xec);
}
static bool k8_mc0_mce(u16 ec, u8 xec)
{
if (BUS_ERROR(ec)) {
pr_cont("during system linefill.\n");
return true;
}
return f10h_mc0_mce(ec, xec);
}
static bool cat_mc0_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
if (MEM_ERROR(ec)) {
if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
return false;
switch (r4) {
case R4_DRD:
case R4_DWR:
pr_cont("Data/Tag parity error due to %s.\n",
(r4 == R4_DRD ? "load/hw prf" : "store"));
break;
case R4_EVICT:
pr_cont("Copyback parity error on a tag miss.\n");
break;
case R4_SNOOP:
pr_cont("Tag parity error during snoop.\n");
break;
default:
ret = false;
}
} else if (BUS_ERROR(ec)) {
if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
return false;
pr_cont("System read data error on a ");
switch (r4) {
case R4_RD:
pr_cont("TLB reload.\n");
break;
case R4_DWR:
pr_cont("store.\n");
break;
case R4_DRD:
pr_cont("load.\n");
break;
default:
ret = false;
}
} else {
ret = false;
}
return ret;
}
static bool f15h_mc0_mce(u16 ec, u8 xec)
{
bool ret = true;
if (MEM_ERROR(ec)) {
switch (xec) {
case 0x0:
pr_cont("Data Array access error.\n");
break;
case 0x1:
pr_cont("UC error during a linefill from L2/NB.\n");
break;
case 0x2:
case 0x11:
pr_cont("STQ access error.\n");
break;
case 0x3:
pr_cont("SCB access error.\n");
break;
case 0x10:
pr_cont("Tag error.\n");
break;
case 0x12:
pr_cont("LDQ access error.\n");
break;
default:
ret = false;
}
} else if (BUS_ERROR(ec)) {
if (!xec)
pr_cont("System Read Data Error.\n");
else
pr_cont(" Internal error condition type %d.\n", xec);
} else if (INT_ERROR(ec)) {
if (xec <= 0x1f)
pr_cont("Hardware Assert.\n");
else
ret = false;
} else
ret = false;
return ret;
}
static void decode_mc0_mce(struct mce *m)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC0 Error: ");
/* TLB error signatures are the same across families */
if (TLB_ERROR(ec)) {
if (TT(ec) == TT_DATA) {
pr_cont("%s TLB %s.\n", LL_MSG(ec),
((xec == 2) ? "locked miss"
: (xec ? "multimatch" : "parity")));
return;
}
} else if (fam_ops.mc0_mce(ec, xec))
;
else
pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
}
static bool k8_mc1_mce(u16 ec, u8 xec)
{
u8 ll = LL(ec);
bool ret = true;
if (!MEM_ERROR(ec))
return false;
if (ll == 0x2)
pr_cont("during a linefill from L2.\n");
else if (ll == 0x1) {
switch (R4(ec)) {
case R4_IRD:
pr_cont("Parity error during data load.\n");
break;
case R4_EVICT:
pr_cont("Copyback Parity/Victim error.\n");
break;
case R4_SNOOP:
pr_cont("Tag Snoop error.\n");
break;
default:
ret = false;
break;
}
} else
ret = false;
return ret;
}
static bool cat_mc1_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
if (!MEM_ERROR(ec))
return false;
if (TT(ec) != TT_INSTR)
return false;
if (r4 == R4_IRD)
pr_cont("Data/tag array parity error for a tag hit.\n");
else if (r4 == R4_SNOOP)
pr_cont("Tag error during snoop/victimization.\n");
else if (xec == 0x0)
pr_cont("Tag parity error from victim castout.\n");
else if (xec == 0x2)
pr_cont("Microcode patch RAM parity error.\n");
else
ret = false;
return ret;
}
static bool f15h_mc1_mce(u16 ec, u8 xec)
{
bool ret = true;
if (!MEM_ERROR(ec))
return false;
switch (xec) {
case 0x0 ... 0xa:
pr_cont("%s.\n", f15h_mc1_mce_desc[xec]);
break;
case 0xd:
pr_cont("%s.\n", f15h_mc1_mce_desc[xec-2]);
break;
case 0x10:
pr_cont("%s.\n", f15h_mc1_mce_desc[xec-4]);
break;
case 0x11 ... 0x15:
pr_cont("Decoder %s parity error.\n", f15h_mc1_mce_desc[xec-4]);
break;
default:
ret = false;
}
return ret;
}
static void decode_mc1_mce(struct mce *m)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC1 Error: ");
if (TLB_ERROR(ec))
pr_cont("%s TLB %s.\n", LL_MSG(ec),
(xec ? "multimatch" : "parity error"));
else if (BUS_ERROR(ec)) {
bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
} else if (INT_ERROR(ec)) {
if (xec <= 0x3f)
pr_cont("Hardware Assert.\n");
else
goto wrong_mc1_mce;
} else if (fam_ops.mc1_mce(ec, xec))
;
else
goto wrong_mc1_mce;
return;
wrong_mc1_mce:
pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
}
static bool k8_mc2_mce(u16 ec, u8 xec)
{
bool ret = true;
if (xec == 0x1)
pr_cont(" in the write data buffers.\n");
else if (xec == 0x3)
pr_cont(" in the victim data buffers.\n");
else if (xec == 0x2 && MEM_ERROR(ec))
pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
else if (xec == 0x0) {
if (TLB_ERROR(ec))
pr_cont("%s error in a Page Descriptor Cache or Guest TLB.\n",
TT_MSG(ec));
else if (BUS_ERROR(ec))
pr_cont(": %s/ECC error in data read from NB: %s.\n",
R4_MSG(ec), PP_MSG(ec));
else if (MEM_ERROR(ec)) {
u8 r4 = R4(ec);
if (r4 >= 0x7)
pr_cont(": %s error during data copyback.\n",
R4_MSG(ec));
else if (r4 <= 0x1)
pr_cont(": %s parity/ECC error during data "
"access from L2.\n", R4_MSG(ec));
else
ret = false;
} else
ret = false;
} else
ret = false;
return ret;
}
static bool f15h_mc2_mce(u16 ec, u8 xec)
{
bool ret = true;
if (TLB_ERROR(ec)) {
if (xec == 0x0)
pr_cont("Data parity TLB read error.\n");
else if (xec == 0x1)
pr_cont("Poison data provided for TLB fill.\n");
else
ret = false;
} else if (BUS_ERROR(ec)) {
if (xec > 2)
ret = false;
pr_cont("Error during attempted NB data read.\n");
} else if (MEM_ERROR(ec)) {
switch (xec) {
case 0x4 ... 0xc:
pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x4]);
break;
case 0x10 ... 0x14:
pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x7]);
break;
default:
ret = false;
}
} else if (INT_ERROR(ec)) {
if (xec <= 0x3f)
pr_cont("Hardware Assert.\n");
else
ret = false;
}
return ret;
}
static bool f16h_mc2_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
if (!MEM_ERROR(ec))
return false;
switch (xec) {
case 0x04 ... 0x05:
pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
break;
case 0x09 ... 0x0b:
case 0x0d ... 0x0f:
pr_cont("ECC error in L2 tag (%s).\n",
((r4 == R4_GEN) ? "BankReq" :
((r4 == R4_SNOOP) ? "Prb" : "Fill")));
break;
case 0x10 ... 0x19:
case 0x1b:
pr_cont("ECC error in L2 data array (%s).\n",
(((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
((r4 == R4_GEN) ? "Attr" :
((r4 == R4_EVICT) ? "Vict" : "Fill"))));
break;
case 0x1c ... 0x1d:
case 0x1f:
pr_cont("Parity error in L2 attribute bits (%s).\n",
((r4 == R4_RD) ? "Hit" :
((r4 == R4_GEN) ? "Attr" : "Fill")));
break;
default:
return false;
}
return true;
}
static void decode_mc2_mce(struct mce *m)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC2 Error: ");
if (!fam_ops.mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
static void decode_mc3_mce(struct mce *m)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
if (boot_cpu_data.x86 >= 0x14) {
pr_emerg("You shouldn't be seeing MC3 MCE on this cpu family,"
" please report on LKML.\n");
return;
}
pr_emerg(HW_ERR "MC3 Error");
if (xec == 0x0) {
u8 r4 = R4(ec);
if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
goto wrong_mc3_mce;
pr_cont(" during %s.\n", R4_MSG(ec));
} else
goto wrong_mc3_mce;
return;
wrong_mc3_mce:
pr_emerg(HW_ERR "Corrupted MC3 MCE info?\n");
}
static void decode_mc4_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
int node_id = topology_die_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u8 offset = 0;
pr_emerg(HW_ERR "MC4 Error (node %d): ", node_id);
switch (xec) {
case 0x0 ... 0xe:
/* special handling for DRAM ECCs */
if (xec == 0x0 || xec == 0x8) {
/* no ECCs on F11h */
if (fam == 0x11)
goto wrong_mc4_mce;
pr_cont("%s.\n", mc4_mce_desc[xec]);
if (decode_dram_ecc)
decode_dram_ecc(node_id, m);
return;
}
break;
case 0xf:
if (TLB_ERROR(ec))
pr_cont("GART Table Walk data error.\n");
else if (BUS_ERROR(ec))
pr_cont("DMA Exclusion Vector Table Walk error.\n");
else
goto wrong_mc4_mce;
return;
case 0x19:
if (fam == 0x15 || fam == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
return;
case 0x1c ... 0x1f:
offset = 13;
break;
default:
goto wrong_mc4_mce;
}
pr_cont("%s.\n", mc4_mce_desc[xec - offset]);
return;
wrong_mc4_mce:
pr_emerg(HW_ERR "Corrupted MC4 MCE info?\n");
}
static void decode_mc5_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
if (fam == 0xf || fam == 0x11)
goto wrong_mc5_mce;
pr_emerg(HW_ERR "MC5 Error: ");
if (INT_ERROR(ec)) {
if (xec <= 0x1f) {
pr_cont("Hardware Assert.\n");
return;
} else
goto wrong_mc5_mce;
}
if (xec == 0x0 || xec == 0xc)
pr_cont("%s.\n", mc5_mce_desc[xec]);
else if (xec <= 0xd)
pr_cont("%s parity error.\n", mc5_mce_desc[xec]);
else
goto wrong_mc5_mce;
return;
wrong_mc5_mce:
pr_emerg(HW_ERR "Corrupted MC5 MCE info?\n");
}
static void decode_mc6_mce(struct mce *m)
{
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC6 Error: ");
if (xec > 0x5)
goto wrong_mc6_mce;
pr_cont("%s parity error.\n", mc6_mce_desc[xec]);
return;
wrong_mc6_mce:
pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n");
}
/* Decode errors according to Scalable MCA specification */
static void decode_smca_error(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
if (bank_type >= N_SMCA_BANK_TYPES)
return;
if (bank_type == SMCA_RESERVED) {
pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
return;
}
ip_name = smca_get_long_name(bank_type);
pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec);
/* Only print the decode of valid error codes */
if (xec < smca_mce_descs[bank_type].num_descs)
pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
decode_dram_ecc(topology_die_id(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
{
if (INT_ERROR(ec)) {
pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
return;
}
pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
if (BUS_ERROR(ec))
pr_cont(", mem/io: %s", II_MSG(ec));
else
pr_cont(", tx: %s", TT_MSG(ec));
if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
pr_cont(", mem-tx: %s", R4_MSG(ec));
if (BUS_ERROR(ec))
pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
}
pr_cont("\n");
}
static const char *decode_error_status(struct mce *m)
{
if (m->status & MCI_STATUS_UC) {
if (m->status & MCI_STATUS_PCC)
return "System Fatal error.";
if (m->mcgstatus & MCG_STATUS_RIPV)
return "Uncorrected, software restartable error.";
return "Uncorrected, software containable error.";
}
if (m->status & MCI_STATUS_DEFERRED)
return "Deferred error, no action required.";
return "Corrected error, no action required.";
}
static int
amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
unsigned int fam = x86_family(m->cpuid);
int ecc;
if (m->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
pr_emerg(HW_ERR "%s\n", decode_error_status(m));
pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
m->extcpu,
fam, x86_model(m->cpuid), x86_stepping(m->cpuid),
m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
((m->status & MCI_STATUS_UC) ? "UE" :
(m->status & MCI_STATUS_DEFERRED) ? "-" : "CE"),
((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"),
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"));
if (boot_cpu_has(X86_FEATURE_SMCA)) {
u32 low, high;
u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
if (!rdmsr_safe(addr, &low, &high) &&
(low & MCI_CONFIG_MCAX))
pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-"));
pr_cont("|%s", ((m->status & MCI_STATUS_SYNDV) ? "SyndV" : "-"));
}
/* do the two bits[14:13] together */
ecc = (m->status >> 45) & 0x3;
if (ecc)
pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
if (fam >= 0x15) {
pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
/* F15h, bank4, bit 43 is part of McaStatSubCache. */
if (fam != 0x15 || m->bank != 4)
pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
}
if (fam >= 0x17)
pr_cont("|%s", (m->status & MCI_STATUS_SCRUB ? "Scrub" : "-"));
pr_cont("]: 0x%016llx\n", m->status);
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
if (m->ppin)
pr_emerg(HW_ERR "PPIN: 0x%016llx\n", m->ppin);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
if (m->status & MCI_STATUS_SYNDV)
pr_cont(", Syndrome: 0x%016llx", m->synd);
pr_cont("\n");
decode_smca_error(m);
goto err_code;
}
if (m->tsc)
pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
/* Doesn't matter which member to test. */
if (!fam_ops.mc0_mce)
goto err_code;
switch (m->bank) {
case 0:
decode_mc0_mce(m);
break;
case 1:
decode_mc1_mce(m);
break;
case 2:
decode_mc2_mce(m);
break;
case 3:
decode_mc3_mce(m);
break;
case 4:
decode_mc4_mce(m);
break;
case 5:
decode_mc5_mce(m);
break;
case 6:
decode_mc6_mce(m);
break;
default:
break;
}
err_code:
amd_decode_err_code(m->status & 0xffff);
m->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_OK;
}
static struct notifier_block amd_mce_dec_nb = {
.notifier_call = amd_decode_mce,
.priority = MCE_PRIO_EDAC,
};
static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_AMD &&
c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
if (boot_cpu_has(X86_FEATURE_SMCA)) {
xec_mask = 0x3f;
goto out;
}
switch (c->x86) {
case 0xf:
fam_ops.mc0_mce = k8_mc0_mce;
fam_ops.mc1_mce = k8_mc1_mce;
fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x10:
fam_ops.mc0_mce = f10h_mc0_mce;
fam_ops.mc1_mce = k8_mc1_mce;
fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x11:
fam_ops.mc0_mce = k8_mc0_mce;
fam_ops.mc1_mce = k8_mc1_mce;
fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x12:
fam_ops.mc0_mce = f12h_mc0_mce;
fam_ops.mc1_mce = k8_mc1_mce;
fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x14:
fam_ops.mc0_mce = cat_mc0_mce;
fam_ops.mc1_mce = cat_mc1_mce;
fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
fam_ops.mc0_mce = f15h_mc0_mce;
fam_ops.mc1_mce = f15h_mc1_mce;
fam_ops.mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
fam_ops.mc0_mce = cat_mc0_mce;
fam_ops.mc1_mce = cat_mc1_mce;
fam_ops.mc2_mce = f16h_mc2_mce;
break;
case 0x17:
case 0x18:
pr_warn_once("Decoding supported only on Scalable MCA processors.\n");
return -EINVAL;
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
return -EINVAL;
}
out:
pr_info("MCE: In-kernel MCE decoding enabled.\n");
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
}
early_initcall(mce_amd_init);
#ifdef MODULE
static void __exit mce_amd_exit(void)
{
mce_unregister_decode_chain(&amd_mce_dec_nb);
}
MODULE_DESCRIPTION("AMD MCE decoder");
MODULE_ALIAS("edac-mce-amd");
MODULE_LICENSE("GPL");
module_exit(mce_amd_exit);
#endif
| linux-master | drivers/edac/mce_amd.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2022 Nuvoton Technology Corporation
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "edac_module.h"
#define EDAC_MOD_NAME "npcm-edac"
#define EDAC_MSG_SIZE 256
/* chip serials */
#define NPCM7XX_CHIP BIT(0)
#define NPCM8XX_CHIP BIT(1)
/* syndrome values */
#define UE_SYNDROME 0x03
/* error injection */
#define ERROR_TYPE_CORRECTABLE 0
#define ERROR_TYPE_UNCORRECTABLE 1
#define ERROR_LOCATION_DATA 0
#define ERROR_LOCATION_CHECKCODE 1
#define ERROR_BIT_DATA_MAX 63
#define ERROR_BIT_CHECKCODE_MAX 7
static char data_synd[] = {
0xf4, 0xf1, 0xec, 0xea, 0xe9, 0xe6, 0xe5, 0xe3,
0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd3, 0xce, 0xcb,
0xb5, 0xb0, 0xad, 0xab, 0xa8, 0xa7, 0xa4, 0xa2,
0x9d, 0x9b, 0x98, 0x97, 0x94, 0x92, 0x8f, 0x8a,
0x75, 0x70, 0x6d, 0x6b, 0x68, 0x67, 0x64, 0x62,
0x5e, 0x5b, 0x58, 0x57, 0x54, 0x52, 0x4f, 0x4a,
0x34, 0x31, 0x2c, 0x2a, 0x29, 0x26, 0x25, 0x23,
0x1c, 0x1a, 0x19, 0x16, 0x15, 0x13, 0x0e, 0x0b
};
static struct regmap *npcm_regmap;
struct npcm_platform_data {
/* chip serials */
int chip;
/* memory controller registers */
u32 ctl_ecc_en;
u32 ctl_int_status;
u32 ctl_int_ack;
u32 ctl_int_mask_master;
u32 ctl_int_mask_ecc;
u32 ctl_ce_addr_l;
u32 ctl_ce_addr_h;
u32 ctl_ce_data_l;
u32 ctl_ce_data_h;
u32 ctl_ce_synd;
u32 ctl_ue_addr_l;
u32 ctl_ue_addr_h;
u32 ctl_ue_data_l;
u32 ctl_ue_data_h;
u32 ctl_ue_synd;
u32 ctl_source_id;
u32 ctl_controller_busy;
u32 ctl_xor_check_bits;
/* masks and shifts */
u32 ecc_en_mask;
u32 int_status_ce_mask;
u32 int_status_ue_mask;
u32 int_ack_ce_mask;
u32 int_ack_ue_mask;
u32 int_mask_master_non_ecc_mask;
u32 int_mask_master_global_mask;
u32 int_mask_ecc_non_event_mask;
u32 ce_addr_h_mask;
u32 ce_synd_mask;
u32 ce_synd_shift;
u32 ue_addr_h_mask;
u32 ue_synd_mask;
u32 ue_synd_shift;
u32 source_id_ce_mask;
u32 source_id_ce_shift;
u32 source_id_ue_mask;
u32 source_id_ue_shift;
u32 controller_busy_mask;
u32 xor_check_bits_mask;
u32 xor_check_bits_shift;
u32 writeback_en_mask;
u32 fwc_mask;
};
struct priv_data {
void __iomem *reg;
char message[EDAC_MSG_SIZE];
const struct npcm_platform_data *pdata;
/* error injection */
struct dentry *debugfs;
u8 error_type;
u8 location;
u8 bit;
};
static void handle_ce(struct mem_ctl_info *mci)
{
struct priv_data *priv = mci->pvt_info;
const struct npcm_platform_data *pdata;
u32 val_h = 0, val_l, id, synd;
u64 addr = 0, data = 0;
pdata = priv->pdata;
regmap_read(npcm_regmap, pdata->ctl_ce_addr_l, &val_l);
if (pdata->chip == NPCM8XX_CHIP) {
regmap_read(npcm_regmap, pdata->ctl_ce_addr_h, &val_h);
val_h &= pdata->ce_addr_h_mask;
}
addr = ((addr | val_h) << 32) | val_l;
regmap_read(npcm_regmap, pdata->ctl_ce_data_l, &val_l);
if (pdata->chip == NPCM8XX_CHIP)
regmap_read(npcm_regmap, pdata->ctl_ce_data_h, &val_h);
data = ((data | val_h) << 32) | val_l;
regmap_read(npcm_regmap, pdata->ctl_source_id, &id);
id = (id & pdata->source_id_ce_mask) >> pdata->source_id_ce_shift;
regmap_read(npcm_regmap, pdata->ctl_ce_synd, &synd);
synd = (synd & pdata->ce_synd_mask) >> pdata->ce_synd_shift;
snprintf(priv->message, EDAC_MSG_SIZE,
"addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, addr >> PAGE_SHIFT,
addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, "");
}
static void handle_ue(struct mem_ctl_info *mci)
{
struct priv_data *priv = mci->pvt_info;
const struct npcm_platform_data *pdata;
u32 val_h = 0, val_l, id, synd;
u64 addr = 0, data = 0;
pdata = priv->pdata;
regmap_read(npcm_regmap, pdata->ctl_ue_addr_l, &val_l);
if (pdata->chip == NPCM8XX_CHIP) {
regmap_read(npcm_regmap, pdata->ctl_ue_addr_h, &val_h);
val_h &= pdata->ue_addr_h_mask;
}
addr = ((addr | val_h) << 32) | val_l;
regmap_read(npcm_regmap, pdata->ctl_ue_data_l, &val_l);
if (pdata->chip == NPCM8XX_CHIP)
regmap_read(npcm_regmap, pdata->ctl_ue_data_h, &val_h);
data = ((data | val_h) << 32) | val_l;
regmap_read(npcm_regmap, pdata->ctl_source_id, &id);
id = (id & pdata->source_id_ue_mask) >> pdata->source_id_ue_shift;
regmap_read(npcm_regmap, pdata->ctl_ue_synd, &synd);
synd = (synd & pdata->ue_synd_mask) >> pdata->ue_synd_shift;
snprintf(priv->message, EDAC_MSG_SIZE,
"addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, addr >> PAGE_SHIFT,
addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, "");
}
static irqreturn_t edac_ecc_isr(int irq, void *dev_id)
{
const struct npcm_platform_data *pdata;
struct mem_ctl_info *mci = dev_id;
u32 status;
pdata = ((struct priv_data *)mci->pvt_info)->pdata;
regmap_read(npcm_regmap, pdata->ctl_int_status, &status);
if (status & pdata->int_status_ce_mask) {
handle_ce(mci);
/* acknowledge the CE interrupt */
regmap_write(npcm_regmap, pdata->ctl_int_ack,
pdata->int_ack_ce_mask);
return IRQ_HANDLED;
} else if (status & pdata->int_status_ue_mask) {
handle_ue(mci);
/* acknowledge the UE interrupt */
regmap_write(npcm_regmap, pdata->ctl_int_ack,
pdata->int_ack_ue_mask);
return IRQ_HANDLED;
}
WARN_ON_ONCE(1);
return IRQ_NONE;
}
static ssize_t force_ecc_error(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
struct priv_data *priv = mci->pvt_info;
const struct npcm_platform_data *pdata;
u32 val, syndrome;
int ret;
pdata = priv->pdata;
edac_printk(KERN_INFO, EDAC_MOD_NAME,
"force an ECC error, type = %d, location = %d, bit = %d\n",
priv->error_type, priv->location, priv->bit);
/* ensure no pending writes */
ret = regmap_read_poll_timeout(npcm_regmap, pdata->ctl_controller_busy,
val, !(val & pdata->controller_busy_mask),
1000, 10000);
if (ret) {
edac_printk(KERN_INFO, EDAC_MOD_NAME,
"wait pending writes timeout\n");
return count;
}
regmap_read(npcm_regmap, pdata->ctl_xor_check_bits, &val);
val &= ~pdata->xor_check_bits_mask;
/* write syndrome to XOR_CHECK_BITS */
if (priv->error_type == ERROR_TYPE_CORRECTABLE) {
if (priv->location == ERROR_LOCATION_DATA &&
priv->bit > ERROR_BIT_DATA_MAX) {
edac_printk(KERN_INFO, EDAC_MOD_NAME,
"data bit should not exceed %d (%d)\n",
ERROR_BIT_DATA_MAX, priv->bit);
return count;
}
if (priv->location == ERROR_LOCATION_CHECKCODE &&
priv->bit > ERROR_BIT_CHECKCODE_MAX) {
edac_printk(KERN_INFO, EDAC_MOD_NAME,
"checkcode bit should not exceed %d (%d)\n",
ERROR_BIT_CHECKCODE_MAX, priv->bit);
return count;
}
syndrome = priv->location ? 1 << priv->bit
: data_synd[priv->bit];
regmap_write(npcm_regmap, pdata->ctl_xor_check_bits,
val | (syndrome << pdata->xor_check_bits_shift) |
pdata->writeback_en_mask);
} else if (priv->error_type == ERROR_TYPE_UNCORRECTABLE) {
regmap_write(npcm_regmap, pdata->ctl_xor_check_bits,
val | (UE_SYNDROME << pdata->xor_check_bits_shift));
}
/* force write check */
regmap_update_bits(npcm_regmap, pdata->ctl_xor_check_bits,
pdata->fwc_mask, pdata->fwc_mask);
return count;
}
static const struct file_operations force_ecc_error_fops = {
.open = simple_open,
.write = force_ecc_error,
.llseek = generic_file_llseek,
};
/*
* Setup debugfs for error injection.
*
* Nodes:
* error_type - 0: CE, 1: UE
* location - 0: data, 1: checkcode
* bit - 0 ~ 63 for data and 0 ~ 7 for checkcode
* force_ecc_error - trigger
*
* Examples:
* 1. Inject a correctable error (CE) at checkcode bit 7.
* ~# echo 0 > /sys/kernel/debug/edac/npcm-edac/error_type
* ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/location
* ~# echo 7 > /sys/kernel/debug/edac/npcm-edac/bit
* ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error
*
* 2. Inject an uncorrectable error (UE).
* ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/error_type
* ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error
*/
static void setup_debugfs(struct mem_ctl_info *mci)
{
struct priv_data *priv = mci->pvt_info;
priv->debugfs = edac_debugfs_create_dir(mci->mod_name);
if (!priv->debugfs)
return;
edac_debugfs_create_x8("error_type", 0644, priv->debugfs, &priv->error_type);
edac_debugfs_create_x8("location", 0644, priv->debugfs, &priv->location);
edac_debugfs_create_x8("bit", 0644, priv->debugfs, &priv->bit);
edac_debugfs_create_file("force_ecc_error", 0200, priv->debugfs,
&mci->dev, &force_ecc_error_fops);
}
static int setup_irq(struct mem_ctl_info *mci, struct platform_device *pdev)
{
const struct npcm_platform_data *pdata;
int ret, irq;
pdata = ((struct priv_data *)mci->pvt_info)->pdata;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MOD_NAME, "IRQ not defined in DTS\n");
return irq;
}
ret = devm_request_irq(&pdev->dev, irq, edac_ecc_isr, 0,
dev_name(&pdev->dev), mci);
if (ret < 0) {
edac_printk(KERN_ERR, EDAC_MOD_NAME, "failed to request IRQ\n");
return ret;
}
/* enable the functional group of ECC and mask the others */
regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
pdata->int_mask_master_non_ecc_mask);
if (pdata->chip == NPCM8XX_CHIP)
regmap_write(npcm_regmap, pdata->ctl_int_mask_ecc,
pdata->int_mask_ecc_non_event_mask);
return 0;
}
static const struct regmap_config npcm_regmap_cfg = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
};
static int edac_probe(struct platform_device *pdev)
{
const struct npcm_platform_data *pdata;
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[1];
struct mem_ctl_info *mci;
struct priv_data *priv;
void __iomem *reg;
u32 val;
int rc;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
npcm_regmap = devm_regmap_init_mmio(dev, reg, &npcm_regmap_cfg);
if (IS_ERR(npcm_regmap))
return PTR_ERR(npcm_regmap);
pdata = of_device_get_match_data(dev);
if (!pdata)
return -EINVAL;
/* bail out if ECC is not enabled */
regmap_read(npcm_regmap, pdata->ctl_ecc_en, &val);
if (!(val & pdata->ecc_en_mask)) {
edac_printk(KERN_ERR, EDAC_MOD_NAME, "ECC is not enabled\n");
return -EPERM;
}
edac_op_state = EDAC_OPSTATE_INT;
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = 1;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct priv_data));
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
priv->reg = reg;
priv->pdata = pdata;
platform_set_drvdata(pdev, mci);
mci->mtype_cap = MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = SCRUB_HW_SRC;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->ctl_name = "npcm_ddr_controller";
mci->dev_name = dev_name(&pdev->dev);
mci->mod_name = EDAC_MOD_NAME;
mci->ctl_page_to_phys = NULL;
rc = setup_irq(mci, pdev);
if (rc)
goto free_edac_mc;
rc = edac_mc_add_mc(mci);
if (rc)
goto free_edac_mc;
if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP)
setup_debugfs(mci);
return rc;
free_edac_mc:
edac_mc_free(mci);
return rc;
}
static int edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct priv_data *priv = mci->pvt_info;
const struct npcm_platform_data *pdata;
pdata = priv->pdata;
if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP)
edac_debugfs_remove_recursive(priv->debugfs);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
pdata->int_mask_master_global_mask);
regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0);
return 0;
}
static const struct npcm_platform_data npcm750_edac = {
.chip = NPCM7XX_CHIP,
/* memory controller registers */
.ctl_ecc_en = 0x174,
.ctl_int_status = 0x1d0,
.ctl_int_ack = 0x1d4,
.ctl_int_mask_master = 0x1d8,
.ctl_ce_addr_l = 0x188,
.ctl_ce_data_l = 0x190,
.ctl_ce_synd = 0x18c,
.ctl_ue_addr_l = 0x17c,
.ctl_ue_data_l = 0x184,
.ctl_ue_synd = 0x180,
.ctl_source_id = 0x194,
/* masks and shifts */
.ecc_en_mask = BIT(24),
.int_status_ce_mask = GENMASK(4, 3),
.int_status_ue_mask = GENMASK(6, 5),
.int_ack_ce_mask = GENMASK(4, 3),
.int_ack_ue_mask = GENMASK(6, 5),
.int_mask_master_non_ecc_mask = GENMASK(30, 7) | GENMASK(2, 0),
.int_mask_master_global_mask = BIT(31),
.ce_synd_mask = GENMASK(6, 0),
.ce_synd_shift = 0,
.ue_synd_mask = GENMASK(6, 0),
.ue_synd_shift = 0,
.source_id_ce_mask = GENMASK(29, 16),
.source_id_ce_shift = 16,
.source_id_ue_mask = GENMASK(13, 0),
.source_id_ue_shift = 0,
};
static const struct npcm_platform_data npcm845_edac = {
.chip = NPCM8XX_CHIP,
/* memory controller registers */
.ctl_ecc_en = 0x16c,
.ctl_int_status = 0x228,
.ctl_int_ack = 0x244,
.ctl_int_mask_master = 0x220,
.ctl_int_mask_ecc = 0x260,
.ctl_ce_addr_l = 0x18c,
.ctl_ce_addr_h = 0x190,
.ctl_ce_data_l = 0x194,
.ctl_ce_data_h = 0x198,
.ctl_ce_synd = 0x190,
.ctl_ue_addr_l = 0x17c,
.ctl_ue_addr_h = 0x180,
.ctl_ue_data_l = 0x184,
.ctl_ue_data_h = 0x188,
.ctl_ue_synd = 0x180,
.ctl_source_id = 0x19c,
.ctl_controller_busy = 0x20c,
.ctl_xor_check_bits = 0x174,
/* masks and shifts */
.ecc_en_mask = GENMASK(17, 16),
.int_status_ce_mask = GENMASK(1, 0),
.int_status_ue_mask = GENMASK(3, 2),
.int_ack_ce_mask = GENMASK(1, 0),
.int_ack_ue_mask = GENMASK(3, 2),
.int_mask_master_non_ecc_mask = GENMASK(30, 3) | GENMASK(1, 0),
.int_mask_master_global_mask = BIT(31),
.int_mask_ecc_non_event_mask = GENMASK(8, 4),
.ce_addr_h_mask = GENMASK(1, 0),
.ce_synd_mask = GENMASK(15, 8),
.ce_synd_shift = 8,
.ue_addr_h_mask = GENMASK(1, 0),
.ue_synd_mask = GENMASK(15, 8),
.ue_synd_shift = 8,
.source_id_ce_mask = GENMASK(29, 16),
.source_id_ce_shift = 16,
.source_id_ue_mask = GENMASK(13, 0),
.source_id_ue_shift = 0,
.controller_busy_mask = BIT(0),
.xor_check_bits_mask = GENMASK(23, 16),
.xor_check_bits_shift = 16,
.writeback_en_mask = BIT(24),
.fwc_mask = BIT(8),
};
static const struct of_device_id npcm_edac_of_match[] = {
{
.compatible = "nuvoton,npcm750-memory-controller",
.data = &npcm750_edac
},
{
.compatible = "nuvoton,npcm845-memory-controller",
.data = &npcm845_edac
},
{},
};
MODULE_DEVICE_TABLE(of, npcm_edac_of_match);
static struct platform_driver npcm_edac_driver = {
.driver = {
.name = "npcm-edac",
.of_match_table = npcm_edac_of_match,
},
.probe = edac_probe,
.remove = edac_remove,
};
module_platform_driver(npcm_edac_driver);
MODULE_AUTHOR("Medad CChien <[email protected]>");
MODULE_AUTHOR("Marvin Lin <[email protected]>");
MODULE_DESCRIPTION("Nuvoton NPCM EDAC Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/edac/npcm_edac.c |
/*
* edac_module.c
*
* (C) 2007 www.softwarebitmaker.com
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* Author: Doug Thompson <[email protected]>
*
*/
#include <linux/edac.h>
#include "edac_mc.h"
#include "edac_module.h"
#define EDAC_VERSION "Ver: 3.0.0"
#ifdef CONFIG_EDAC_DEBUG
static int edac_set_debug_level(const char *buf,
const struct kernel_param *kp)
{
unsigned long val;
int ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val > 4)
return -EINVAL;
return param_set_int(buf, kp);
}
/* Values of 0 to 4 will generate output */
int edac_debug_level = 2;
EXPORT_SYMBOL_GPL(edac_debug_level);
module_param_call(edac_debug_level, edac_set_debug_level, param_get_int,
&edac_debug_level, 0644);
MODULE_PARM_DESC(edac_debug_level, "EDAC debug level: [0-4], default: 2");
#endif
/*
* edac_op_state_to_string()
*/
char *edac_op_state_to_string(int opstate)
{
if (opstate == OP_RUNNING_POLL)
return "POLLED";
else if (opstate == OP_RUNNING_INTERRUPT)
return "INTERRUPT";
else if (opstate == OP_RUNNING_POLL_INTR)
return "POLL-INTR";
else if (opstate == OP_ALLOC)
return "ALLOC";
else if (opstate == OP_OFFLINE)
return "OFFLINE";
return "UNKNOWN";
}
/*
* sysfs object: /sys/devices/system/edac
* need to export to other files
*/
static struct bus_type edac_subsys = {
.name = "edac",
.dev_name = "edac",
};
static int edac_subsys_init(void)
{
int err;
/* create the /sys/devices/system/edac directory */
err = subsys_system_register(&edac_subsys, NULL);
if (err)
printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
return err;
}
static void edac_subsys_exit(void)
{
bus_unregister(&edac_subsys);
}
/* return pointer to the 'edac' node in sysfs */
struct bus_type *edac_get_sysfs_subsys(void)
{
return &edac_subsys;
}
EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
/*
* edac_init
* module initialization entry point
*/
static int __init edac_init(void)
{
int err = 0;
edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
err = edac_subsys_init();
if (err)
return err;
/*
* Harvest and clear any boot/initialization PCI parity errors
*
* FIXME: This only clears errors logged by devices present at time of
* module initialization. We should also do an initial clear
* of each newly hotplugged device.
*/
edac_pci_clear_parity_errors();
err = edac_mc_sysfs_init();
if (err)
goto err_sysfs;
edac_debugfs_init();
err = edac_workqueue_setup();
if (err) {
edac_printk(KERN_ERR, EDAC_MC, "Failure initializing workqueue\n");
goto err_wq;
}
return 0;
err_wq:
edac_debugfs_exit();
edac_mc_sysfs_exit();
err_sysfs:
edac_subsys_exit();
return err;
}
/*
* edac_exit()
* module exit/termination function
*/
static void __exit edac_exit(void)
{
edac_dbg(0, "\n");
/* tear down the various subsystems */
edac_workqueue_teardown();
edac_mc_sysfs_exit();
edac_debugfs_exit();
edac_subsys_exit();
}
/*
* Inform the kernel of our entry and exit points
*/
subsys_initcall(edac_init);
module_exit(edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
MODULE_DESCRIPTION("Core library routines for EDAC reporting");
| linux-master | drivers/edac/edac_module.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*
* Copyright (c) 2013 by Cisco Systems, Inc.
* All rights reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/edac.h>
#include <linux/ctype.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
#include "edac_module.h"
#define OCTEON_MAX_MC 4
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
struct octeon_lmc_pvt {
unsigned long inject;
unsigned long error_type;
unsigned long dimm;
unsigned long rank;
unsigned long bank;
unsigned long row;
unsigned long col;
};
static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
{
union cvmx_lmcx_mem_cfg0 cfg0;
bool do_clear = false;
char msg[64];
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
if (cfg0.s.sec_err || cfg0.s.ded_err) {
union cvmx_lmcx_fadr fadr;
fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
snprintf(msg, sizeof(msg),
"DIMM %d rank %d bank %d row %d col %d",
fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
}
if (cfg0.s.sec_err) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, msg, "");
cfg0.s.sec_err = -1; /* Done, re-arm */
do_clear = true;
}
if (cfg0.s.ded_err) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, msg, "");
cfg0.s.ded_err = -1; /* Done, re-arm */
do_clear = true;
}
if (do_clear)
cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
}
static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
{
struct octeon_lmc_pvt *pvt = mci->pvt_info;
union cvmx_lmcx_int int_reg;
bool do_clear = false;
char msg[64];
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
int_reg.s.ded_err = 1;
}
if (int_reg.s.sec_err || int_reg.s.ded_err) {
union cvmx_lmcx_fadr fadr;
if (likely(!pvt->inject))
fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
else {
fadr.cn61xx.fdimm = pvt->dimm;
fadr.cn61xx.fbunk = pvt->rank;
fadr.cn61xx.fbank = pvt->bank;
fadr.cn61xx.frow = pvt->row;
fadr.cn61xx.fcol = pvt->col;
}
snprintf(msg, sizeof(msg),
"DIMM %d rank %d bank %d row %d col %d",
fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
}
if (int_reg.s.sec_err) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, msg, "");
int_reg.s.sec_err = -1; /* Done, re-arm */
do_clear = true;
}
if (int_reg.s.ded_err) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, msg, "");
int_reg.s.ded_err = -1; /* Done, re-arm */
do_clear = true;
}
if (do_clear) {
if (likely(!pvt->inject))
cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
else
pvt->inject = 0;
}
}
/************************ MC SYSFS parts ***********************************/
/* Only a couple naming differences per template, so very similar */
#define TEMPLATE_SHOW(reg) \
static ssize_t octeon_mc_inject_##reg##_show(struct device *dev, \
struct device_attribute *attr, \
char *data) \
{ \
struct mem_ctl_info *mci = to_mci(dev); \
struct octeon_lmc_pvt *pvt = mci->pvt_info; \
return sprintf(data, "%016llu\n", (u64)pvt->reg); \
}
#define TEMPLATE_STORE(reg) \
static ssize_t octeon_mc_inject_##reg##_store(struct device *dev, \
struct device_attribute *attr, \
const char *data, size_t count) \
{ \
struct mem_ctl_info *mci = to_mci(dev); \
struct octeon_lmc_pvt *pvt = mci->pvt_info; \
if (isdigit(*data)) { \
if (!kstrtoul(data, 0, &pvt->reg)) \
return count; \
} \
return 0; \
}
TEMPLATE_SHOW(inject);
TEMPLATE_STORE(inject);
TEMPLATE_SHOW(dimm);
TEMPLATE_STORE(dimm);
TEMPLATE_SHOW(bank);
TEMPLATE_STORE(bank);
TEMPLATE_SHOW(rank);
TEMPLATE_STORE(rank);
TEMPLATE_SHOW(row);
TEMPLATE_STORE(row);
TEMPLATE_SHOW(col);
TEMPLATE_STORE(col);
static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
struct device_attribute *attr,
const char *data,
size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct octeon_lmc_pvt *pvt = mci->pvt_info;
if (!strncmp(data, "single", 6))
pvt->error_type = 1;
else if (!strncmp(data, "double", 6))
pvt->error_type = 2;
return count;
}
static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
struct device_attribute *attr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
struct octeon_lmc_pvt *pvt = mci->pvt_info;
if (pvt->error_type == 1)
return sprintf(data, "single");
else if (pvt->error_type == 2)
return sprintf(data, "double");
return 0;
}
static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
octeon_mc_inject_row_show, octeon_mc_inject_row_store);
static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
octeon_mc_inject_col_show, octeon_mc_inject_col_store);
static struct attribute *octeon_dev_attrs[] = {
&dev_attr_inject.attr,
&dev_attr_error_type.attr,
&dev_attr_dimm.attr,
&dev_attr_rank.attr,
&dev_attr_bank.attr,
&dev_attr_row.attr,
&dev_attr_col.attr,
NULL
};
ATTRIBUTE_GROUPS(octeon_dev);
static int octeon_lmc_edac_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
int mc = pdev->id;
opstate_init();
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = 1;
layers[0].is_virt_csrow = false;
if (OCTEON_IS_OCTEON1PLUS()) {
union cvmx_lmcx_mem_cfg0 cfg0;
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
if (!cfg0.s.ecc_ena) {
dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
return 0;
}
mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
mci->pdev = &pdev->dev;
mci->dev_name = dev_name(&pdev->dev);
mci->mod_name = "octeon-lmc";
mci->ctl_name = "octeon-lmc-err";
mci->edac_check = octeon_lmc_edac_poll;
if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
edac_mc_free(mci);
return -ENXIO;
}
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
cfg0.s.intr_ded_ena = 0; /* We poll */
cfg0.s.intr_sec_ena = 0;
cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
} else {
/* OCTEON II */
union cvmx_lmcx_int_en en;
union cvmx_lmcx_config config;
config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
if (!config.s.ecc_ena) {
dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
return 0;
}
mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
mci->pdev = &pdev->dev;
mci->dev_name = dev_name(&pdev->dev);
mci->mod_name = "octeon-lmc";
mci->ctl_name = "co_lmc_err";
mci->edac_check = octeon_lmc_edac_poll_o2;
if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
edac_mc_free(mci);
return -ENXIO;
}
en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
en.s.intr_ded_ena = 0; /* We poll */
en.s.intr_sec_ena = 0;
cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
}
platform_set_drvdata(pdev, mci);
return 0;
}
static int octeon_lmc_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver octeon_lmc_edac_driver = {
.probe = octeon_lmc_edac_probe,
.remove = octeon_lmc_edac_remove,
.driver = {
.name = "octeon_lmc_edac",
}
};
module_platform_driver(octeon_lmc_edac_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
| linux-master | drivers/edac/octeon_edac-lmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx ZynqMP OCM ECC Driver
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*/
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "edac_module.h"
#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
/* Error/Interrupt registers */
#define ERR_CTRL_OFST 0x0
#define OCM_ISR_OFST 0x04
#define OCM_IMR_OFST 0x08
#define OCM_IEN_OFST 0x0C
#define OCM_IDS_OFST 0x10
/* ECC control register */
#define ECC_CTRL_OFST 0x14
/* Correctable error info registers */
#define CE_FFA_OFST 0x1C
#define CE_FFD0_OFST 0x20
#define CE_FFD1_OFST 0x24
#define CE_FFD2_OFST 0x28
#define CE_FFD3_OFST 0x2C
#define CE_FFE_OFST 0x30
/* Uncorrectable error info registers */
#define UE_FFA_OFST 0x34
#define UE_FFD0_OFST 0x38
#define UE_FFD1_OFST 0x3C
#define UE_FFD2_OFST 0x40
#define UE_FFD3_OFST 0x44
#define UE_FFE_OFST 0x48
/* ECC control register bit field definitions */
#define ECC_CTRL_CLR_CE_ERR 0x40
#define ECC_CTRL_CLR_UE_ERR 0x80
/* Fault injection data and count registers */
#define OCM_FID0_OFST 0x4C
#define OCM_FID1_OFST 0x50
#define OCM_FID2_OFST 0x54
#define OCM_FID3_OFST 0x58
#define OCM_FIC_OFST 0x74
#define UE_MAX_BITPOS_LOWER 31
#define UE_MIN_BITPOS_UPPER 32
#define UE_MAX_BITPOS_UPPER 63
/* Interrupt masks */
#define OCM_CEINTR_MASK BIT(6)
#define OCM_UEINTR_MASK BIT(7)
#define OCM_ECC_ENABLE_MASK BIT(0)
#define OCM_FICOUNT_MASK GENMASK(23, 0)
#define OCM_NUM_UE_BITPOS 2
#define OCM_BASEVAL 0xFFFC0000
#define EDAC_DEVICE "ZynqMP-OCM"
/**
* struct ecc_error_info - ECC error log information
* @addr: Fault generated at this address
* @fault_lo: Generated fault data (lower 32-bit)
* @fault_hi: Generated fault data (upper 32-bit)
*/
struct ecc_error_info {
u32 addr;
u32 fault_lo;
u32 fault_hi;
};
/**
* struct ecc_status - ECC status information to report
* @ce_cnt: Correctable error count
* @ue_cnt: Uncorrectable error count
* @ceinfo: Correctable error log information
* @ueinfo: Uncorrectable error log information
*/
struct ecc_status {
u32 ce_cnt;
u32 ue_cnt;
struct ecc_error_info ceinfo;
struct ecc_error_info ueinfo;
};
/**
* struct edac_priv - OCM private instance data
* @baseaddr: Base address of the OCM
* @message: Buffer for framing the event specific info
* @stat: ECC status information
* @ce_cnt: Correctable Error count
* @ue_cnt: Uncorrectable Error count
* @debugfs_dir: Directory entry for debugfs
* @ce_bitpos: Bit position for Correctable Error
* @ue_bitpos: Array to store UnCorrectable Error bit positions
* @fault_injection_cnt: Fault Injection Counter value
*/
struct edac_priv {
void __iomem *baseaddr;
char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
struct ecc_status stat;
u32 ce_cnt;
u32 ue_cnt;
#ifdef CONFIG_EDAC_DEBUG
struct dentry *debugfs_dir;
u8 ce_bitpos;
u8 ue_bitpos[OCM_NUM_UE_BITPOS];
u32 fault_injection_cnt;
#endif
};
/**
* get_error_info - Get the current ECC error info
* @base: Pointer to the base address of the OCM
* @p: Pointer to the OCM ECC status structure
* @mask: Status register mask value
*
* Determines there is any ECC error or not
*
*/
static void get_error_info(void __iomem *base, struct ecc_status *p, int mask)
{
if (mask & OCM_CEINTR_MASK) {
p->ce_cnt++;
p->ceinfo.fault_lo = readl(base + CE_FFD0_OFST);
p->ceinfo.fault_hi = readl(base + CE_FFD1_OFST);
p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
} else if (mask & OCM_UEINTR_MASK) {
p->ue_cnt++;
p->ueinfo.fault_lo = readl(base + UE_FFD0_OFST);
p->ueinfo.fault_hi = readl(base + UE_FFD1_OFST);
p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
}
}
/**
* handle_error - Handle error types CE and UE
* @dci: Pointer to the EDAC device instance
* @p: Pointer to the OCM ECC status structure
*
* Handles correctable and uncorrectable errors.
*/
static void handle_error(struct edac_device_ctl_info *dci, struct ecc_status *p)
{
struct edac_priv *priv = dci->pvt_info;
struct ecc_error_info *pinf;
if (p->ce_cnt) {
pinf = &p->ceinfo;
snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
"\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
"CE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
edac_device_handle_ce(dci, 0, 0, priv->message);
}
if (p->ue_cnt) {
pinf = &p->ueinfo;
snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
"\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
"UE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
edac_device_handle_ue(dci, 0, 0, priv->message);
}
memset(p, 0, sizeof(*p));
}
/**
* intr_handler - ISR routine
* @irq: irq number
* @dev_id: device id pointer
*
* Return: IRQ_NONE, if CE/UE interrupt not set or IRQ_HANDLED otherwise
*/
static irqreturn_t intr_handler(int irq, void *dev_id)
{
struct edac_device_ctl_info *dci = dev_id;
struct edac_priv *priv = dci->pvt_info;
int regval;
regval = readl(priv->baseaddr + OCM_ISR_OFST);
if (!(regval & (OCM_CEINTR_MASK | OCM_UEINTR_MASK))) {
WARN_ONCE(1, "Unhandled IRQ%d, ISR: 0x%x", irq, regval);
return IRQ_NONE;
}
get_error_info(priv->baseaddr, &priv->stat, regval);
priv->ce_cnt += priv->stat.ce_cnt;
priv->ue_cnt += priv->stat.ue_cnt;
handle_error(dci, &priv->stat);
return IRQ_HANDLED;
}
/**
* get_eccstate - Return the ECC status
* @base: Pointer to the OCM base address
*
* Get the ECC enable/disable status
*
* Return: ECC status 0/1.
*/
static bool get_eccstate(void __iomem *base)
{
return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
}
#ifdef CONFIG_EDAC_DEBUG
/**
* write_fault_count - write fault injection count
* @priv: Pointer to the EDAC private struct
*
* Update the fault injection count register, once the counter reaches
* zero, it injects errors
*/
static void write_fault_count(struct edac_priv *priv)
{
u32 ficount = priv->fault_injection_cnt;
if (ficount & ~OCM_FICOUNT_MASK) {
ficount &= OCM_FICOUNT_MASK;
edac_printk(KERN_INFO, EDAC_DEVICE,
"Fault injection count value truncated to %d\n", ficount);
}
writel(ficount, priv->baseaddr + OCM_FIC_OFST);
}
/*
* To get the Correctable Error injected, the following steps are needed:
* - Setup the optional Fault Injection Count:
* echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
* - Write the Correctable Error bit position value:
* echo <bit_pos val> > /sys/kernel/debug/edac/ocm/inject_ce_bitpos
*/
static ssize_t inject_ce_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct edac_priv *priv = edac_dev->pvt_info;
int ret;
if (!data)
return -EFAULT;
ret = kstrtou8_from_user(data, count, 0, &priv->ce_bitpos);
if (ret)
return ret;
if (priv->ce_bitpos > UE_MAX_BITPOS_UPPER)
return -EINVAL;
if (priv->ce_bitpos <= UE_MAX_BITPOS_LOWER) {
writel(BIT(priv->ce_bitpos), priv->baseaddr + OCM_FID0_OFST);
writel(0, priv->baseaddr + OCM_FID1_OFST);
} else {
writel(BIT(priv->ce_bitpos - UE_MIN_BITPOS_UPPER),
priv->baseaddr + OCM_FID1_OFST);
writel(0, priv->baseaddr + OCM_FID0_OFST);
}
write_fault_count(priv);
return count;
}
static const struct file_operations inject_ce_fops = {
.open = simple_open,
.write = inject_ce_write,
.llseek = generic_file_llseek,
};
/*
* To get the Uncorrectable Error injected, the following steps are needed:
* - Setup the optional Fault Injection Count:
* echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
* - Write the Uncorrectable Error bit position values:
* echo <bit_pos0 val>,<bit_pos1 val> > /sys/kernel/debug/edac/ocm/inject_ue_bitpos
*/
static ssize_t inject_ue_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct edac_priv *priv = edac_dev->pvt_info;
char buf[6], *pbuf, *token[2];
u64 ue_bitpos;
int i, ret;
u8 len;
if (!data)
return -EFAULT;
len = min_t(size_t, count, sizeof(buf));
if (copy_from_user(buf, data, len))
return -EFAULT;
buf[len] = '\0';
pbuf = &buf[0];
for (i = 0; i < OCM_NUM_UE_BITPOS; i++)
token[i] = strsep(&pbuf, ",");
ret = kstrtou8(token[0], 0, &priv->ue_bitpos[0]);
if (ret)
return ret;
ret = kstrtou8(token[1], 0, &priv->ue_bitpos[1]);
if (ret)
return ret;
if (priv->ue_bitpos[0] > UE_MAX_BITPOS_UPPER ||
priv->ue_bitpos[1] > UE_MAX_BITPOS_UPPER)
return -EINVAL;
if (priv->ue_bitpos[0] == priv->ue_bitpos[1]) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Bit positions should not be equal\n");
return -EINVAL;
}
ue_bitpos = BIT(priv->ue_bitpos[0]) | BIT(priv->ue_bitpos[1]);
writel((u32)ue_bitpos, priv->baseaddr + OCM_FID0_OFST);
writel((u32)(ue_bitpos >> 32), priv->baseaddr + OCM_FID1_OFST);
write_fault_count(priv);
return count;
}
static const struct file_operations inject_ue_fops = {
.open = simple_open,
.write = inject_ue_write,
.llseek = generic_file_llseek,
};
static void setup_debugfs(struct edac_device_ctl_info *edac_dev)
{
struct edac_priv *priv = edac_dev->pvt_info;
priv->debugfs_dir = edac_debugfs_create_dir("ocm");
if (!priv->debugfs_dir)
return;
edac_debugfs_create_x32("inject_fault_count", 0644, priv->debugfs_dir,
&priv->fault_injection_cnt);
edac_debugfs_create_file("inject_ue_bitpos", 0644, priv->debugfs_dir,
edac_dev, &inject_ue_fops);
edac_debugfs_create_file("inject_ce_bitpos", 0644, priv->debugfs_dir,
edac_dev, &inject_ce_fops);
}
#endif
static int edac_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci;
struct edac_priv *priv;
void __iomem *baseaddr;
struct resource *res;
int irq, ret;
baseaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
if (!get_eccstate(baseaddr)) {
edac_printk(KERN_INFO, EDAC_DEVICE, "ECC not enabled\n");
return -ENXIO;
}
dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
edac_device_alloc_index());
if (!dci)
return -ENOMEM;
priv = dci->pvt_info;
platform_set_drvdata(pdev, dci);
dci->dev = &pdev->dev;
priv->baseaddr = baseaddr;
dci->mod_name = pdev->dev.driver->name;
dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
dci->dev_name = dev_name(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto free_dev_ctl;
}
ret = devm_request_irq(&pdev->dev, irq, intr_handler, 0,
dev_name(&pdev->dev), dci);
if (ret) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
goto free_dev_ctl;
}
/* Enable UE, CE interrupts */
writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IEN_OFST);
#ifdef CONFIG_EDAC_DEBUG
setup_debugfs(dci);
#endif
ret = edac_device_add_device(dci);
if (ret)
goto free_dev_ctl;
return 0;
free_dev_ctl:
edac_device_free_ctl_info(dci);
return ret;
}
static int edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct edac_priv *priv = dci->pvt_info;
/* Disable UE, CE interrupts */
writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IDS_OFST);
#ifdef CONFIG_EDAC_DEBUG
debugfs_remove_recursive(priv->debugfs_dir);
#endif
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
return 0;
}
static const struct of_device_id zynqmp_ocm_edac_match[] = {
{ .compatible = "xlnx,zynqmp-ocmc-1.0"},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
static struct platform_driver zynqmp_ocm_edac_driver = {
.driver = {
.name = "zynqmp-ocm-edac",
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
.remove = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Xilinx ZynqMP OCM ECC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/edac/zynqmp_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel(R) 10nm server memory controller.
* Copyright (c) 2019, Intel Corporation.
*
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mce.h>
#include "edac_module.h"
#include "skx_common.h"
#define I10NM_REVISION "v0.0.6"
#define EDAC_MOD_STR "i10nm_edac"
/* Debug macros */
#define i10nm_printk(level, fmt, arg...) \
edac_printk(level, "i10nm", fmt, ##arg)
#define I10NM_GET_SCK_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd0, &(reg))
#define I10NM_GET_IMC_BAR(d, i, reg) \
pci_read_config_dword((d)->uracu, \
(res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
#define I10NM_GET_SAD(d, offset, i, reg)\
pci_read_config_dword((d)->sad_all, (offset) + (i) * \
(res_cfg->type == GNR ? 12 : 8), &(reg))
#define I10NM_GET_HBM_IMC_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd4, &(reg))
#define I10NM_GET_CAPID3_CFG(d, reg) \
pci_read_config_dword((d)->pcu_cr3, \
res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
#define I10NM_GET_CAPID5_CFG(d, reg) \
pci_read_config_dword((d)->pcu_cr3, \
res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
#define I10NM_GET_DIMMMTR(m, i, j) \
readl((m)->mbase + ((m)->hbm_mc ? 0x80c : \
(res_cfg->type == GNR ? 0xc0c : 0x2080c)) + \
(i) * (m)->chan_mmio_sz + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i) \
readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_MCMTR(m, i) \
readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
(res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_AMAP(m, i) \
readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \
(res_cfg->type == GNR ? 0xc14 : 0x20814)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_REG32(m, i, offset) \
readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
#define I10NM_GET_REG64(m, i, offset) \
readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
#define I10NM_SET_REG32(m, i, offset, v) \
writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
#define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
#define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
#define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
GET_BITFIELD(reg, 0, 10) + 1) << 12)
#define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
#define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
#define I10NM_GNR_IMC_MMIO_SIZE 0x4000
#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
#define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
#define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
#define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
#define I10NM_MAX_SAD 16
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
#define RETRY_RD_ERR_LOG_UC BIT(1)
#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
#define RETRY_RD_ERR_LOG_EN BIT(15)
#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
static int retry_rd_err_log;
static int decoding_via_mca;
static bool mem_cfg_2lm;
static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
u32 *offsets_scrub, u32 *offsets_demand,
u32 *offsets_demand2)
{
u32 s, d, d2;
s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
if (offsets_demand2)
d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
if (enable) {
/* Save default configurations */
imc->chan[chan].retry_rd_err_log_s = s;
imc->chan[chan].retry_rd_err_log_d = d;
if (offsets_demand2)
imc->chan[chan].retry_rd_err_log_d2 = d2;
s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
s |= RETRY_RD_ERR_LOG_EN;
d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
d |= RETRY_RD_ERR_LOG_EN;
if (offsets_demand2) {
d2 &= ~RETRY_RD_ERR_LOG_UC;
d2 |= RETRY_RD_ERR_LOG_NOOVER;
d2 |= RETRY_RD_ERR_LOG_EN;
}
} else {
/* Restore default configurations */
if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
s |= RETRY_RD_ERR_LOG_UC;
if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
s |= RETRY_RD_ERR_LOG_NOOVER;
if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
s &= ~RETRY_RD_ERR_LOG_EN;
if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
d |= RETRY_RD_ERR_LOG_UC;
if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
d |= RETRY_RD_ERR_LOG_NOOVER;
if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
d &= ~RETRY_RD_ERR_LOG_EN;
if (offsets_demand2) {
if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
d2 |= RETRY_RD_ERR_LOG_UC;
if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
d2 &= ~RETRY_RD_ERR_LOG_EN;
}
}
I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
if (offsets_demand2)
I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
}
static void enable_retry_rd_err_log(bool enable)
{
int i, j, imc_num, chan_num;
struct skx_imc *imc;
struct skx_dev *d;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list) {
imc_num = res_cfg->ddr_imc_num;
chan_num = res_cfg->ddr_chan_num;
for (i = 0; i < imc_num; i++) {
imc = &d->imc[i];
if (!imc->mbase)
continue;
for (j = 0; j < chan_num; j++)
__enable_retry_rd_err_log(imc, j, enable,
res_cfg->offsets_scrub,
res_cfg->offsets_demand,
res_cfg->offsets_demand2);
}
imc_num += res_cfg->hbm_imc_num;
chan_num = res_cfg->hbm_chan_num;
for (; i < imc_num; i++) {
imc = &d->imc[i];
if (!imc->mbase || !imc->hbm_mc)
continue;
for (j = 0; j < chan_num; j++) {
__enable_retry_rd_err_log(imc, j, enable,
res_cfg->offsets_scrub_hbm0,
res_cfg->offsets_demand_hbm0,
NULL);
__enable_retry_rd_err_log(imc, j, enable,
res_cfg->offsets_scrub_hbm1,
res_cfg->offsets_demand_hbm1,
NULL);
}
}
}
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
int len, bool scrub_err)
{
struct skx_imc *imc = &res->dev->imc[res->imc];
u32 log0, log1, log2, log3, log4;
u32 corr0, corr1, corr2, corr3;
u32 lxg0, lxg1, lxg3, lxg4;
u32 *xffsets = NULL;
u64 log2a, log5;
u64 lxg2a, lxg5;
u32 *offsets;
int n, pch;
if (!imc->mbase)
return;
if (imc->hbm_mc) {
pch = res->cs & 1;
if (pch)
offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
res_cfg->offsets_demand_hbm1;
else
offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
res_cfg->offsets_demand_hbm0;
} else {
if (scrub_err) {
offsets = res_cfg->offsets_scrub;
} else {
offsets = res_cfg->offsets_demand;
xffsets = res_cfg->offsets_demand2;
}
}
log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
if (xffsets) {
lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
}
if (res_cfg->type == SPR) {
log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
log0, log1, log2a, log3, log4, log5);
if (len - n > 0) {
if (xffsets) {
lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
} else {
n += snprintf(msg + n, len - n, "]");
}
}
} else {
log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
log0, log1, log2, log3, log4, log5);
}
if (imc->hbm_mc) {
if (pch) {
corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
} else {
corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
}
} else {
corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
}
if (len - n > 0)
snprintf(msg + n, len - n,
" correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
corr0 & 0xffff, corr0 >> 16,
corr1 & 0xffff, corr1 >> 16,
corr2 & 0xffff, corr2 >> 16,
corr3 & 0xffff, corr3 >> 16);
/* Clear status bits */
if (retry_rd_err_log == 2) {
if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
}
if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
}
}
}
static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
unsigned int dev, unsigned int fun)
{
struct pci_dev *pdev;
pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
if (!pdev) {
edac_dbg(2, "No device %02x:%02x.%x\n",
bus, dev, fun);
return NULL;
}
if (unlikely(pci_enable_device(pdev) < 0)) {
edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
bus, dev, fun);
pci_dev_put(pdev);
return NULL;
}
return pdev;
}
/**
* i10nm_get_imc_num() - Get the number of present DDR memory controllers.
*
* @cfg : The pointer to the structure of EDAC resource configurations.
*
* For Granite Rapids CPUs, the number of present DDR memory controllers read
* at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
* For other CPUs, the number of present DDR memory controllers is statically
* configured in @cfg->ddr_imc_num.
*
* RETURNS : 0 on success, < 0 on failure.
*/
static int i10nm_get_imc_num(struct res_config *cfg)
{
int n, imc_num, chan_num = 0;
struct skx_dev *d;
u32 reg;
list_for_each_entry(d, i10nm_edac_list, list) {
d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
res_cfg->pcu_cr3_bdf.dev,
res_cfg->pcu_cr3_bdf.fun);
if (!d->pcu_cr3)
continue;
if (I10NM_GET_CAPID5_CFG(d, reg))
continue;
n = I10NM_DDR_IMC_CH_CNT(reg);
if (!chan_num) {
chan_num = n;
edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
} else if (chan_num != n) {
i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
}
}
switch (cfg->type) {
case GNR:
/*
* One channel per DDR memory controller for Granite Rapids CPUs.
*/
imc_num = chan_num;
if (!imc_num) {
i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
return -ENODEV;
}
if (imc_num > I10NM_NUM_DDR_IMC) {
i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
return -EINVAL;
}
if (cfg->ddr_imc_num != imc_num) {
/*
* Store the number of present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
}
return 0;
default:
/*
* For other CPUs, the number of present DDR memory controllers
* is statically pre-configured in cfg->ddr_imc_num.
*/
return 0;
}
}
static bool i10nm_check_2lm(struct res_config *cfg)
{
struct skx_dev *d;
u32 reg;
int i;
list_for_each_entry(d, i10nm_edac_list, list) {
d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
res_cfg->sad_all_bdf.dev,
res_cfg->sad_all_bdf.fun);
if (!d->sad_all)
continue;
for (i = 0; i < I10NM_MAX_SAD; i++) {
I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
edac_dbg(2, "2-level memory configuration.\n");
return true;
}
}
}
return false;
}
/*
* Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
* Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
*/
static bool i10nm_mscod_is_ddrt(u32 mscod)
{
switch (res_cfg->type) {
case I10NM:
switch (mscod) {
case 0x0106: case 0x0107:
case 0x0800: case 0x0804:
case 0x0806 ... 0x0808:
case 0x080a ... 0x080e:
case 0x0810: case 0x0811:
case 0x0816: case 0x081e:
case 0x081f:
return true;
}
break;
case SPR:
switch (mscod) {
case 0x0800: case 0x0804:
case 0x0806 ... 0x0808:
case 0x080a ... 0x080e:
case 0x0810: case 0x0811:
case 0x0816: case 0x081e:
case 0x081f:
return true;
}
break;
default:
return false;
}
return false;
}
static bool i10nm_mc_decode_available(struct mce *mce)
{
#define ICX_IMCx_CHy 0x06666000
u8 bank;
if (!decoding_via_mca || mem_cfg_2lm)
return false;
if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
!= (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
return false;
bank = mce->bank;
switch (res_cfg->type) {
case I10NM:
/* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
if (!(ICX_IMCx_CHy & (1 << bank)))
return false;
break;
case SPR:
if (bank < 13 || bank > 20)
return false;
break;
default:
return false;
}
/* DDRT errors can't be decoded from MCA bank registers */
if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
return false;
if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
return false;
return true;
}
static bool i10nm_mc_decode(struct decoded_addr *res)
{
struct mce *m = res->mce;
struct skx_dev *d;
u8 bank;
if (!i10nm_mc_decode_available(m))
return false;
list_for_each_entry(d, i10nm_edac_list, list) {
if (d->imc[0].src_id == m->socketid) {
res->socket = m->socketid;
res->dev = d;
break;
}
}
switch (res_cfg->type) {
case I10NM:
bank = m->bank - 13;
res->imc = bank / 4;
res->channel = bank % 2;
res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
res->row = GET_BITFIELD(m->misc, 19, 39);
res->bank_group = GET_BITFIELD(m->misc, 40, 41);
res->bank_address = GET_BITFIELD(m->misc, 42, 43);
res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
res->rank = GET_BITFIELD(m->misc, 56, 58);
res->dimm = res->rank >> 2;
res->rank = res->rank % 4;
break;
case SPR:
bank = m->bank - 13;
res->imc = bank / 2;
res->channel = bank % 2;
res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
res->row = GET_BITFIELD(m->misc, 19, 36);
res->bank_group = GET_BITFIELD(m->misc, 37, 38);
res->bank_address = GET_BITFIELD(m->misc, 39, 40);
res->bank_group |= GET_BITFIELD(m->misc, 41, 41) << 2;
res->rank = GET_BITFIELD(m->misc, 57, 57);
res->dimm = GET_BITFIELD(m->misc, 58, 58);
break;
default:
return false;
}
if (!res->dev) {
skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
m->socketid, res->imc);
return false;
}
return true;
}
/**
* get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
*
* @d : The pointer to the structure of CPU socket EDAC device.
* @logical_idx : The logical index of the present memory controller (0 ~ max present MC# - 1).
* @physical_idx : To store the corresponding physical index of @logical_idx.
*
* RETURNS : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
*/
static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
{
#define GNR_MAX_IMC_PCI_CNT 28
struct pci_dev *mdev;
int i, logical = 0;
/*
* Detect present memory controllers from { PCI device: 8-5, function 7-1 }
*/
for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
mdev = pci_get_dev_wrapper(d->seg,
d->bus[res_cfg->ddr_mdev_bdf.bus],
res_cfg->ddr_mdev_bdf.dev + i / 7,
res_cfg->ddr_mdev_bdf.fun + i % 7);
if (mdev) {
if (logical == logical_idx) {
*physical_idx = i;
return mdev;
}
pci_dev_put(mdev);
logical++;
}
}
return NULL;
}
/**
* get_ddr_munit() - Get the resource of the i-th DDR memory controller.
*
* @d : The pointer to the structure of CPU socket EDAC device.
* @i : The index of the CPU socket relative DDR memory controller.
* @offset : To store the MMIO offset of the i-th DDR memory controller.
* @size : To store the MMIO size of the i-th DDR memory controller.
*
* RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
*/
static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
{
struct pci_dev *mdev;
int physical_idx;
u32 reg;
switch (res_cfg->type) {
case GNR:
if (I10NM_GET_IMC_BAR(d, 0, reg)) {
i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
return NULL;
}
mdev = get_gnr_mdev(d, i, &physical_idx);
if (!mdev)
return NULL;
*offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
I10NM_GNR_IMC_MMIO_OFFSET +
physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
*size = I10NM_GNR_IMC_MMIO_SIZE;
break;
default:
if (I10NM_GET_IMC_BAR(d, i, reg)) {
i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
return NULL;
}
mdev = pci_get_dev_wrapper(d->seg,
d->bus[res_cfg->ddr_mdev_bdf.bus],
res_cfg->ddr_mdev_bdf.dev + i,
res_cfg->ddr_mdev_bdf.fun);
if (!mdev)
return NULL;
*offset = I10NM_GET_IMC_MMIO_OFFSET(reg);
*size = I10NM_GET_IMC_MMIO_SIZE(reg);
}
return mdev;
}
/**
* i10nm_imc_absent() - Check whether the memory controller @imc is absent
*
* @imc : The pointer to the structure of memory controller EDAC device.
*
* RETURNS : true if the memory controller EDAC device is absent, false otherwise.
*/
static bool i10nm_imc_absent(struct skx_imc *imc)
{
u32 mcmtr;
int i;
switch (res_cfg->type) {
case SPR:
for (i = 0; i < res_cfg->ddr_chan_num; i++) {
mcmtr = I10NM_GET_MCMTR(imc, i);
edac_dbg(1, "ch%d mcmtr reg %x\n", i, mcmtr);
if (mcmtr != ~0)
return false;
}
/*
* Some workstations' absent memory controllers still
* appear as PCIe devices, misleading the EDAC driver.
* By observing that the MMIO registers of these absent
* memory controllers consistently hold the value of ~0.
*
* We identify a memory controller as absent by checking
* if its MMIO register "mcmtr" == ~0 in all its channels.
*/
return true;
default:
return false;
}
}
static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
void __iomem *mbase;
unsigned long size;
struct skx_dev *d;
int i, lmc, j = 0;
u32 reg, off;
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
res_cfg->util_all_bdf.dev,
res_cfg->util_all_bdf.fun);
if (!d->util_all)
return -ENODEV;
d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
res_cfg->uracu_bdf.dev,
res_cfg->uracu_bdf.fun);
if (!d->uracu)
return -ENODEV;
if (I10NM_GET_SCK_BAR(d, reg)) {
i10nm_printk(KERN_ERR, "Failed to socket bar\n");
return -ENODEV;
}
base = I10NM_GET_SCK_MMIO_BASE(reg);
edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
j++, base, reg);
for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) {
mdev = get_ddr_munit(d, i, &off, &size);
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No IMC found\n");
return -ENODEV;
}
if (!mdev)
continue;
edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
i, base + off, size, reg);
mbase = ioremap(base + off, size);
if (!mbase) {
i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
base + off);
return -ENODEV;
}
d->imc[lmc].mbase = mbase;
if (i10nm_imc_absent(&d->imc[lmc])) {
pci_dev_put(mdev);
iounmap(mbase);
d->imc[lmc].mbase = NULL;
edac_dbg(2, "Skip absent mc%d\n", i);
continue;
} else {
d->imc[lmc].mdev = mdev;
lmc++;
}
}
}
return 0;
}
static bool i10nm_check_hbm_imc(struct skx_dev *d)
{
u32 reg;
if (I10NM_GET_CAPID3_CFG(d, reg)) {
i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
return false;
}
return I10NM_IS_HBM_PRESENT(reg) != 0;
}
static int i10nm_get_hbm_munits(void)
{
struct pci_dev *mdev;
void __iomem *mbase;
u32 reg, off, mcmtr;
struct skx_dev *d;
int i, lmc;
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
if (!d->pcu_cr3)
return -ENODEV;
if (!i10nm_check_hbm_imc(d)) {
i10nm_printk(KERN_DEBUG, "No hbm memory\n");
return -ENODEV;
}
if (I10NM_GET_SCK_BAR(d, reg)) {
i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
return -ENODEV;
}
base = I10NM_GET_SCK_MMIO_BASE(reg);
if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
return -ENODEV;
}
base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
lmc = res_cfg->ddr_imc_num;
for (i = 0; i < res_cfg->hbm_imc_num; i++) {
mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
res_cfg->hbm_mdev_bdf.dev + i / 4,
res_cfg->hbm_mdev_bdf.fun + i % 4);
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No hbm mc found\n");
return -ENODEV;
}
if (!mdev)
continue;
d->imc[lmc].mdev = mdev;
off = i * I10NM_HBM_IMC_MMIO_SIZE;
edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
if (!mbase) {
pci_dev_put(d->imc[lmc].mdev);
d->imc[lmc].mdev = NULL;
i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
base + off);
return -ENOMEM;
}
d->imc[lmc].mbase = mbase;
d->imc[lmc].hbm_mc = true;
mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
if (!I10NM_IS_HBM_IMC(mcmtr)) {
iounmap(d->imc[lmc].mbase);
d->imc[lmc].mbase = NULL;
d->imc[lmc].hbm_mc = false;
pci_dev_put(d->imc[lmc].mdev);
d->imc[lmc].mdev = NULL;
i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
return -ENODEV;
}
lmc++;
}
}
return 0;
}
static struct res_config i10nm_cfg0 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xcc,
.ddr_imc_num = 4,
.ddr_chan_num = 2,
.ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
.sad_all_bdf = {1, 29, 0},
.pcu_cr3_bdf = {1, 30, 3},
.util_all_bdf = {1, 29, 1},
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
};
static struct res_config i10nm_cfg1 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xd0,
.ddr_imc_num = 4,
.ddr_chan_num = 2,
.ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
.sad_all_bdf = {1, 29, 0},
.pcu_cr3_bdf = {1, 30, 3},
.util_all_bdf = {1, 29, 1},
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
};
static struct res_config spr_cfg = {
.type = SPR,
.decs_did = 0x3252,
.busno_cfg_offset = 0xd0,
.ddr_imc_num = 4,
.ddr_chan_num = 2,
.ddr_dimm_num = 2,
.hbm_imc_num = 16,
.hbm_chan_num = 2,
.hbm_dimm_num = 1,
.ddr_chan_mmio_sz = 0x8000,
.hbm_chan_mmio_sz = 0x4000,
.support_ddr5 = true,
.sad_all_bdf = {1, 10, 0},
.pcu_cr3_bdf = {1, 30, 3},
.util_all_bdf = {1, 29, 1},
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
.offsets_scrub = offsets_scrub_spr,
.offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
.offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
.offsets_demand = offsets_demand_spr,
.offsets_demand2 = offsets_demand2_spr,
.offsets_demand_hbm0 = offsets_demand_spr_hbm0,
.offsets_demand_hbm1 = offsets_demand_spr_hbm1,
};
static struct res_config gnr_cfg = {
.type = GNR,
.decs_did = 0x3252,
.busno_cfg_offset = 0xd0,
.ddr_imc_num = 12,
.ddr_chan_num = 1,
.ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
.support_ddr5 = true,
.sad_all_bdf = {0, 13, 0},
.pcu_cr3_bdf = {0, 5, 0},
.util_all_bdf = {0, 13, 1},
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 5, 1},
.sad_all_offset = 0x300,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
{
u32 mcmtr;
mcmtr = I10NM_GET_MCMTR(imc, chan);
edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
struct skx_pvt *pvt = mci->pvt_info;
struct skx_imc *imc = pvt->imc;
u32 mtr, amap, mcddrtcfg = 0;
struct dimm_info *dimm;
int i, j, ndimms;
for (i = 0; i < imc->num_channels; i++) {
if (!imc->mbase)
continue;
ndimms = 0;
amap = I10NM_GET_AMAP(imc, i);
if (res_cfg->type != GNR)
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
for (j = 0; j < imc->num_dimms; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
ndimms += skx_get_dimm_info(mtr, 0, amap, dimm,
imc, i, j, cfg);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
}
if (ndimms && !i10nm_check_ecc(imc, i)) {
i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
imc->mc, i);
return -ENODEV;
}
}
return 0;
}
static struct notifier_block i10nm_mce_dec = {
.notifier_call = skx_mce_check_error,
.priority = MCE_PRIO_EDAC,
};
#ifdef CONFIG_EDAC_DEBUG
/*
* Debug feature.
* Exercise the address decode logic by writing an address to
* /sys/kernel/debug/edac/i10nm_test/addr.
*/
static struct dentry *i10nm_test;
static int debugfs_u64_set(void *data, u64 val)
{
struct mce m;
pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
memset(&m, 0, sizeof(m));
/* ADDRV + MemRd + Unknown channel */
m.status = MCI_STATUS_ADDRV + 0x90;
/* One corrected error */
m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
m.addr = val;
skx_mce_check_error(NULL, 0, &m);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
static void setup_i10nm_debug(void)
{
i10nm_test = edac_debugfs_create_dir("i10nm_test");
if (!i10nm_test)
return;
if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
NULL, &fops_u64_wo)) {
debugfs_remove(i10nm_test);
i10nm_test = NULL;
}
}
static void teardown_i10nm_debug(void)
{
debugfs_remove_recursive(i10nm_test);
}
#else
static inline void setup_i10nm_debug(void) {}
static inline void teardown_i10nm_debug(void) {}
#endif /*CONFIG_EDAC_DEBUG*/
static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
struct skx_dev *d;
int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
u64 tolm, tohm;
int imc_num;
edac_dbg(2, "\n");
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(i10nm_cpuids);
if (!id)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
res_cfg = cfg;
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
if (rc)
return rc;
rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
i10nm_printk(KERN_ERR, "No memory controllers found\n");
return -ENODEV;
}
rc = i10nm_get_imc_num(cfg);
if (rc < 0)
goto fail;
mem_cfg_2lm = i10nm_check_2lm(cfg);
skx_set_mem_cfg(mem_cfg_2lm);
rc = i10nm_get_ddr_munits();
if (i10nm_get_hbm_munits() && rc)
goto fail;
imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
list_for_each_entry(d, i10nm_edac_list, list) {
rc = skx_get_src_id(d, 0xf8, &src_id);
if (rc < 0)
goto fail;
rc = skx_get_node_id(d, &node_id);
if (rc < 0)
goto fail;
edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
d->imc[i].node_id = node_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
d->imc[i].num_dimms = cfg->hbm_dimm_num;
} else {
d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
d->imc[i].num_channels = cfg->ddr_chan_num;
d->imc[i].num_dimms = cfg->ddr_dimm_num;
}
rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
"Intel_10nm Socket", EDAC_MOD_STR,
i10nm_get_dimm_config, cfg);
if (rc < 0)
goto fail;
}
}
rc = skx_adxl_get();
if (rc)
goto fail;
opstate_init();
mce_register_decode_chain(&i10nm_mce_dec);
setup_i10nm_debug();
if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
} else {
skx_set_decode(i10nm_mc_decode, NULL);
}
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
return 0;
fail:
skx_remove();
return rc;
}
static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
skx_set_decode(NULL, NULL);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(false);
}
teardown_i10nm_debug();
mce_unregister_decode_chain(&i10nm_mce_dec);
skx_adxl_put();
skx_remove();
}
module_init(i10nm_init);
module_exit(i10nm_exit);
static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
{
unsigned long val;
int ret;
ret = kstrtoul(buf, 0, &val);
if (ret || val > 1)
return -EINVAL;
if (val && mem_cfg_2lm) {
i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
return -EIO;
}
ret = param_set_int(buf, kp);
return ret;
}
static const struct kernel_param_ops decoding_via_mca_param_ops = {
.set = set_decoding_via_mca,
.get = param_get_int,
};
module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
module_param(retry_rd_err_log, int, 0444);
MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
| linux-master | drivers/edac/i10nm_base.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Egor Martovetsky <[email protected]>
* Maintained by: Olof Johansson <[email protected]>
*
* Driver for the PWRficient onchip memory controllers
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define MODULE_NAME "pasemi_edac"
#define MCCFG_MCEN 0x300
#define MCCFG_MCEN_MMC_EN 0x00000001
#define MCCFG_ERRCOR 0x388
#define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100
#define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010
#define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001
#define MCCFG_SCRUB 0x384
#define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001
#define MCDEBUG_ERRCTL1 0x728
#define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000
#define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000
#define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000
#define MCDEBUG_ERRSTA 0x730
#define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004
#define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002
#define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001
#define MCDEBUG_ERRCNT1 0x734
#define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080
#define MCDEBUG_ERRLOG1A 0x738
#define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000
#define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000
#define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000
#define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000
#define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000
#define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000
#define MCDEBUG_ERRLOG1A_MERR_BA_S 20
#define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000
#define MCDEBUG_ERRLOG1A_MERR_CS_S 16
#define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff
#define MCDRAM_RANKCFG 0x114
#define MCDRAM_RANKCFG_EN 0x00000001
#define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0
#define MCDRAM_RANKCFG_TYPE_SIZE_S 6
#define PASEMI_EDAC_NR_CSROWS 8
#define PASEMI_EDAC_NR_CHANS 1
#define PASEMI_EDAC_ERROR_GRAIN 64
static int last_page_in_mmc;
static int system_mmc_id;
static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev = to_pci_dev(mci->pdev);
u32 tmp;
pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
&tmp);
tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
| MCDEBUG_ERRSTA_SBE_STATUS);
if (tmp) {
if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
}
return tmp;
}
static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
{
struct pci_dev *pdev = to_pci_dev(mci->pdev);
u32 errlog1a;
u32 cs;
if (!errsta)
return;
pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
MCDEBUG_ERRLOG1A_MERR_CS_S;
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
mci->csrows[cs]->first_page, 0, 0,
cs, 0, -1, mci->ctl_name, "");
}
/* correctable/single-bit errors */
if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
mci->csrows[cs]->first_page, 0, 0,
cs, 0, -1, mci->ctl_name, "");
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
{
u32 errsta;
errsta = pasemi_edac_get_error_info(mci);
if (errsta)
pasemi_edac_process_error_info(mci, errsta);
}
static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
u32 rankcfg;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
&rankcfg);
if (!(rankcfg & MCDRAM_RANKCFG_EN))
continue;
switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
MCDRAM_RANKCFG_TYPE_SIZE_S) {
case 0:
dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
break;
case 1:
dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
break;
case 2:
case 3:
dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
break;
case 4:
dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
break;
case 5:
dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
break;
default:
edac_mc_printk(mci, KERN_ERR,
"Unrecognized Rank Config. rankcfg=%u\n",
rankcfg);
return -EINVAL;
}
csrow->first_page = last_page_in_mmc;
csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
last_page_in_mmc += dimm->nr_pages;
csrow->page_mask = 0;
dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
dimm->mtype = MEM_DDR;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = edac_mode;
}
return 0;
}
static int pasemi_edac_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
u32 errctl1, errcor, scrub, mcen;
pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
if (!(mcen & MCCFG_MCEN_MMC_EN))
return -ENODEV;
/*
* We should think about enabling other error detection later on
*/
pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
MCDEBUG_ERRCTL1_MBE_LOG_EN |
MCDEBUG_ERRCTL1_RFL_LOG_EN;
pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = PASEMI_EDAC_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = PASEMI_EDAC_NR_CHANS;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
0);
if (mci == NULL)
return -ENOMEM;
pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
MCCFG_ERRCOR_ECC_GEN_EN |
MCCFG_ERRCOR_ECC_CRR_EN;
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
EDAC_FLAG_NONE;
mci->mod_name = MODULE_NAME;
mci->dev_name = pci_name(pdev);
mci->ctl_name = "pasemi,pwrficient-mc";
mci->edac_check = pasemi_edac_check;
mci->ctl_page_to_phys = NULL;
pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
mci->scrub_mode =
((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
if (pasemi_edac_init_csrows(mci, pdev,
(mci->edac_cap & EDAC_FLAG_SECDED) ?
EDAC_SECDED :
((mci->edac_cap & EDAC_FLAG_EC) ?
EDAC_EC : EDAC_NONE)))
goto fail;
/*
* Clear status
*/
pasemi_edac_get_error_info(mci);
if (edac_mc_add_mc(mci))
goto fail;
/* get this far and it's successful */
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
static void pasemi_edac_remove(struct pci_dev *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
edac_mc_free(mci);
}
static const struct pci_device_id pasemi_edac_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
{ }
};
MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
static struct pci_driver pasemi_edac_driver = {
.name = MODULE_NAME,
.probe = pasemi_edac_probe,
.remove = pasemi_edac_remove,
.id_table = pasemi_edac_pci_tbl,
};
static int __init pasemi_edac_init(void)
{
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
return pci_register_driver(&pasemi_edac_driver);
}
static void __exit pasemi_edac_exit(void)
{
pci_unregister_driver(&pasemi_edac_driver);
}
module_init(pasemi_edac_init);
module_exit(pasemi_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <[email protected]>");
MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/pasemi_edac.c |
/*
* edac_device.c
* (C) 2007 www.douglaskthompson.com
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Doug Thompson <[email protected]>
*
* edac_device API implementation
* 19 Jan 2007
*/
#include <asm/page.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/timer.h>
#include "edac_device.h"
#include "edac_module.h"
/* lock for the list: 'edac_device_list', manipulation of this list
* is protected by the 'device_ctls_mutex' lock
*/
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
/* Default workqueue processing interval on this instance, in msecs */
#define DEFAULT_POLL_INTERVAL 1000
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
edac_dev, edac_dev->dev_idx);
edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
edac_dev->mod_name, edac_dev->ctl_name);
edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
/*
* @off_val: zero, 1, or other based offset
*/
struct edac_device_ctl_info *
edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
char *blk_name, unsigned nr_blocks, unsigned off_val,
struct edac_dev_sysfs_block_attribute *attrib_spec,
unsigned nr_attrib, int device_index)
{
struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
struct edac_device_block *dev_blk, *blk_p, *blk;
struct edac_device_instance *dev_inst, *inst;
struct edac_device_ctl_info *dev_ctl;
unsigned instance, block, attr;
void *pvt;
int err;
edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
dev_ctl = kzalloc(sizeof(struct edac_device_ctl_info), GFP_KERNEL);
if (!dev_ctl)
return NULL;
dev_inst = kcalloc(nr_instances, sizeof(struct edac_device_instance), GFP_KERNEL);
if (!dev_inst)
goto free;
dev_ctl->instances = dev_inst;
dev_blk = kcalloc(nr_instances * nr_blocks, sizeof(struct edac_device_block), GFP_KERNEL);
if (!dev_blk)
goto free;
dev_ctl->blocks = dev_blk;
if (nr_attrib) {
dev_attrib = kcalloc(nr_attrib, sizeof(struct edac_dev_sysfs_block_attribute),
GFP_KERNEL);
if (!dev_attrib)
goto free;
dev_ctl->attribs = dev_attrib;
}
if (pvt_sz) {
pvt = kzalloc(pvt_sz, GFP_KERNEL);
if (!pvt)
goto free;
dev_ctl->pvt_info = pvt;
}
dev_ctl->dev_idx = device_index;
dev_ctl->nr_instances = nr_instances;
/* Default logging of CEs and UEs */
dev_ctl->log_ce = 1;
dev_ctl->log_ue = 1;
/* Name of this edac device */
snprintf(dev_ctl->name, sizeof(dev_ctl->name),"%s", dev_name);
/* Initialize every Instance */
for (instance = 0; instance < nr_instances; instance++) {
inst = &dev_inst[instance];
inst->ctl = dev_ctl;
inst->nr_blocks = nr_blocks;
blk_p = &dev_blk[instance * nr_blocks];
inst->blocks = blk_p;
/* name of this instance */
snprintf(inst->name, sizeof(inst->name), "%s%u", dev_name, instance);
/* Initialize every block in each instance */
for (block = 0; block < nr_blocks; block++) {
blk = &blk_p[block];
blk->instance = inst;
snprintf(blk->name, sizeof(blk->name),
"%s%d", blk_name, block + off_val);
edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
instance, inst, block, blk, blk->name);
/* if there are NO attributes OR no attribute pointer
* then continue on to next block iteration
*/
if ((nr_attrib == 0) || (attrib_spec == NULL))
continue;
/* setup the attribute array for this block */
blk->nr_attribs = nr_attrib;
attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
blk->block_attributes = attrib_p;
edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
blk->block_attributes);
/* Initialize every user specified attribute in this
* block with the data the caller passed in
* Each block gets its own copy of pointers,
* and its unique 'value'
*/
for (attr = 0; attr < nr_attrib; attr++) {
attrib = &attrib_p[attr];
/* populate the unique per attrib
* with the code pointers and info
*/
attrib->attr = attrib_spec[attr].attr;
attrib->show = attrib_spec[attr].show;
attrib->store = attrib_spec[attr].store;
attrib->block = blk; /* up link */
edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
attrib, attrib->attr.name,
&attrib_spec[attr],
attrib_spec[attr].attr.name
);
}
}
}
/* Mark this instance as merely ALLOCATED */
dev_ctl->op_state = OP_ALLOC;
/*
* Initialize the 'root' kobj for the edac_device controller
*/
err = edac_device_register_sysfs_main_kobj(dev_ctl);
if (err)
goto free;
/* at this point, the root kobj is valid, and in order to
* 'free' the object, then the function:
* edac_device_unregister_sysfs_main_kobj() must be called
* which will perform kobj unregistration and the actual free
* will occur during the kobject callback operation
*/
return dev_ctl;
free:
__edac_device_free_ctl_info(dev_ctl);
return NULL;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
{
edac_device_unregister_sysfs_main_kobj(ctl_info);
}
EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
/*
* find_edac_device_by_dev
* scans the edac_device list for a specific 'struct device *'
*
* lock to be held prior to call: device_ctls_mutex
*
* Return:
* pointer to control structure managing 'dev'
* NULL if not found on list
*/
static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
struct list_head *item;
edac_dbg(0, "\n");
list_for_each(item, &edac_device_list) {
edac_dev = list_entry(item, struct edac_device_ctl_info, link);
if (edac_dev->dev == dev)
return edac_dev;
}
return NULL;
}
/*
* add_edac_dev_to_global_list
* Before calling this function, caller must
* assign a unique value to edac_dev->dev_idx.
*
* lock to be held prior to call: device_ctls_mutex
*
* Return:
* 0 on success
* 1 on failure.
*/
static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
{
struct list_head *item, *insert_before;
struct edac_device_ctl_info *rover;
insert_before = &edac_device_list;
/* Determine if already on the list */
rover = find_edac_device_by_dev(edac_dev->dev);
if (unlikely(rover != NULL))
goto fail0;
/* Insert in ascending order by 'dev_idx', so find position */
list_for_each(item, &edac_device_list) {
rover = list_entry(item, struct edac_device_ctl_info, link);
if (rover->dev_idx >= edac_dev->dev_idx) {
if (unlikely(rover->dev_idx == edac_dev->dev_idx))
goto fail1;
insert_before = item;
break;
}
}
list_add_tail_rcu(&edac_dev->link, insert_before);
return 0;
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
"%s (%s) %s %s already assigned %d\n",
dev_name(rover->dev), edac_dev_name(rover),
rover->mod_name, rover->ctl_name, rover->dev_idx);
return 1;
fail1:
edac_printk(KERN_WARNING, EDAC_MC,
"bug in low-level driver: attempt to assign\n"
" duplicate dev_idx %d in %s()\n", rover->dev_idx,
__func__);
return 1;
}
/*
* del_edac_device_from_global_list
*/
static void del_edac_device_from_global_list(struct edac_device_ctl_info
*edac_device)
{
list_del_rcu(&edac_device->link);
/* these are for safe removal of devices from global list while
* NMI handlers may be traversing list
*/
synchronize_rcu();
INIT_LIST_HEAD(&edac_device->link);
}
/*
* edac_device_workq_function
* performs the operation scheduled by a workq request
*
* this workq is embedded within an edac_device_ctl_info
* structure, that needs to be polled for possible error events.
*
* This operation is to acquire the list mutex lock
* (thus preventing insertation or deletion)
* and then call the device's poll function IFF this device is
* running polled and there is a poll function defined.
*/
static void edac_device_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = to_delayed_work(work_req);
struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
mutex_lock(&device_ctls_mutex);
/* If we are being removed, bail out immediately */
if (edac_dev->op_state == OP_OFFLINE) {
mutex_unlock(&device_ctls_mutex);
return;
}
/* Only poll controllers that are running polled and have a check */
if ((edac_dev->op_state == OP_RUNNING_POLL) &&
(edac_dev->edac_check != NULL)) {
edac_dev->edac_check(edac_dev);
}
mutex_unlock(&device_ctls_mutex);
/* Reschedule the workq for the next time period to start again
* if the number of msec is for 1 sec, then adjust to the next
* whole one second to save timers firing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
}
/*
* edac_device_workq_setup
* initialize a workq item for this edac_device instance
* passing in the new delay period in msec
*/
static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
unsigned msec)
{
edac_dbg(0, "\n");
/* take the arg 'msec' and set it into the control structure
* to used in the time period calculation
* then calc the number of jiffies that represents
*/
edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec);
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
/* optimize here for the 1 second case, which will be normal value, to
* fire ON the 1 second time event. This helps reduce all sorts of
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
}
/*
* edac_device_workq_teardown
* stop the workq processing on this edac_dev
*/
static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{
if (!edac_dev->edac_check)
return;
edac_dev->op_state = OP_OFFLINE;
edac_stop_work(&edac_dev->work);
}
/*
* edac_device_reset_delay_period
*
* need to stop any outstanding workq queued up at this time
* because we will be resetting the sleep time.
* Then restart the workq on the new delay
*/
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
unsigned long msec)
{
edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
}
int edac_device_alloc_index(void)
{
static atomic_t device_indexes = ATOMIC_INIT(0);
return atomic_inc_return(&device_indexes) - 1;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_device_dump_device(edac_dev);
#endif
mutex_lock(&device_ctls_mutex);
if (add_edac_dev_to_global_list(edac_dev))
goto fail0;
/* set load time so that error rate can be tracked */
edac_dev->start_time = jiffies;
/* create this instance's sysfs entries */
if (edac_device_create_sysfs(edac_dev)) {
edac_device_printk(edac_dev, KERN_WARNING,
"failed to create sysfs device\n");
goto fail1;
}
/* If there IS a check routine, then we are running POLLED */
if (edac_dev->edac_check != NULL) {
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
/* Report action taken */
edac_device_printk(edac_dev, KERN_INFO,
"Giving out device to module %s controller %s: DEV %s (%s)\n",
edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
edac_op_state_to_string(edac_dev->op_state));
mutex_unlock(&device_ctls_mutex);
return 0;
fail1:
/* Some error, so remove the entry from the lsit */
del_edac_device_from_global_list(edac_dev);
fail0:
mutex_unlock(&device_ctls_mutex);
return 1;
}
EXPORT_SYMBOL_GPL(edac_device_add_device);
struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
edac_dbg(0, "\n");
mutex_lock(&device_ctls_mutex);
/* Find the structure on the list, if not there, then leave */
edac_dev = find_edac_device_by_dev(dev);
if (edac_dev == NULL) {
mutex_unlock(&device_ctls_mutex);
return NULL;
}
/* mark this instance as OFFLINE */
edac_dev->op_state = OP_OFFLINE;
/* deregister from global list */
del_edac_device_from_global_list(edac_dev);
mutex_unlock(&device_ctls_mutex);
/* clear workq processing on this instance */
edac_device_workq_teardown(edac_dev);
/* Tear down the sysfs entries for this instance */
edac_device_remove_sysfs(edac_dev);
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n",
edac_dev->dev_idx,
edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
return edac_dev;
}
EXPORT_SYMBOL_GPL(edac_device_del_device);
static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
{
return edac_dev->log_ce;
}
static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
{
return edac_dev->log_ue;
}
static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
*edac_dev)
{
return edac_dev->panic_on_ue;
}
void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
unsigned int count, int inst_nr, int block_nr,
const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if (!count)
return;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
"(%d >= %d)\n", inst_nr,
edac_dev->nr_instances);
return;
}
instance = edac_dev->instances + inst_nr;
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: instance %d 'block' "
"out of range (%d >= %d)\n",
inst_nr, block_nr,
instance->nr_blocks);
return;
}
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
block->counters.ce_count += count;
}
/* Propagate the count up the 'totals' tree */
instance->counters.ce_count += count;
edac_dev->counters.ce_count += count;
if (edac_device_get_log_ce(edac_dev))
edac_device_printk(edac_dev, KERN_WARNING,
"CE: %s instance: %s block: %s count: %d '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce_count);
void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
unsigned int count, int inst_nr, int block_nr,
const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if (!count)
return;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
"(%d >= %d)\n", inst_nr,
edac_dev->nr_instances);
return;
}
instance = edac_dev->instances + inst_nr;
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: instance %d 'block' "
"out of range (%d >= %d)\n",
inst_nr, block_nr,
instance->nr_blocks);
return;
}
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
block->counters.ue_count += count;
}
/* Propagate the count up the 'totals' tree */
instance->counters.ue_count += count;
edac_dev->counters.ue_count += count;
if (edac_device_get_log_ue(edac_dev))
edac_device_printk(edac_dev, KERN_EMERG,
"UE: %s instance: %s block: %s count: %d '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", count, msg);
if (edac_device_get_panic_on_ue(edac_dev))
panic("EDAC %s: UE instance: %s block %s count: %d '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
| linux-master | drivers/edac/edac_device.c |
/*
* Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
* module (C) 2006 Tim Small
*
* This file may be distributed under the terms of the GNU General
* Public License.
*
* Written by Tim Small <[email protected]>, based on work by Linux
* Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
* others.
*
* 440GX fix by Jason Uhlenkott <[email protected]>.
*
* Written with reference to 82443BX Host Bridge Datasheet:
* http://download.intel.com/design/chipsets/datashts/29063301.pdf
* references to this document given in [].
*
* This module doesn't support the 440LX, but it may be possible to
* make it do so (the 440LX's register definitions are different, but
* not completely so - I haven't studied them in enough detail to know
* how easy this would be).
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "i82443bxgx_edac"
/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
* Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
* rows" "The 82443BX supports multiple-bit error detection and
* single-bit error correction when ECC mode is enabled and
* single/multi-bit error detection when correction is disabled.
* During writes to the DRAM, the 82443BX generates ECC for the data
* on a QWord basis. Partial QWord writes require a read-modify-write
* cycle when ECC is enabled."
*/
/* "Additionally, the 82443BX ensures that the data is corrected in
* main memory so that accumulation of errors is prevented. Another
* error within the same QWord would result in a double-bit error
* which is unrecoverable. This is known as hardware scrubbing since
* it requires no software intervention to correct the data in memory."
*/
/* [Also see page 100 (section 4.3), "DRAM Interface"]
* [Also see page 112 (section 4.6.1.4), ECC]
*/
#define I82443BXGX_NR_CSROWS 8
#define I82443BXGX_NR_CHANS 1
#define I82443BXGX_NR_DIMMS 4
/* 82443 PCI Device 0 */
#define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI
* config space offset */
#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if
* row is non-ECC */
#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */
#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */
#define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */
#define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */
#define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */
#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */
#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6
/* 82443 PCI Device 0 */
#define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI
* config space offset, Error Address
* Pointer Register */
#define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */
#define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */
#define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */
#define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI
* config space offset. */
#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */
#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */
#define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI
* config space offset. */
#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */
#define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */
#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */
#define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */
#define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI
* config space offset. */
#define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */
#define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */
#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */
#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */
#define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI
* config space offset. */
/* FIXME - don't poll when ECC disabled? */
struct i82443bxgx_edacmc_error_info {
u32 eap;
};
static struct edac_pci_ctl_info *i82443bxgx_pci;
static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
* already registered driver
*/
static int i82443bxgx_registered = 1;
static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
struct i82443bxgx_edacmc_error_info
*info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
/* Clear error to allow next error to be reported [p.61] */
pci_write_bits32(pdev, I82443BXGX_EAP,
I82443BXGX_EAP_OFFSET_SBE,
I82443BXGX_EAP_OFFSET_SBE);
if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
/* Clear error to allow next error to be reported [p.61] */
pci_write_bits32(pdev, I82443BXGX_EAP,
I82443BXGX_EAP_OFFSET_MBE,
I82443BXGX_EAP_OFFSET_MBE);
}
static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
struct
i82443bxgx_edacmc_error_info
*info, int handle_errors)
{
int error_found = 0;
u32 eapaddr, page, pageoffset;
/* bits 30:12 hold the 4kb block in which the error occurred
* [p.61] */
eapaddr = (info->eap & 0xfffff000);
page = eapaddr >> PAGE_SHIFT;
pageoffset = eapaddr - (page << PAGE_SHIFT);
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, pageoffset, 0,
edac_mc_find_csrow_by_page(mci, page),
0, -1, mci->ctl_name, "");
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, pageoffset, 0,
edac_mc_find_csrow_by_page(mci, page),
0, -1, mci->ctl_name, "");
}
return error_found;
}
static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
{
struct i82443bxgx_edacmc_error_info info;
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev,
enum edac_type edac_mode,
enum mem_type mtype)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
int index;
u8 drbar, dramc;
u32 row_base, row_high_limit, row_high_limit_last;
pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n",
mci->mc_idx, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n",
mci->mc_idx, index, row_high_limit,
row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
if (row_high_limit_last && !row_high_limit)
row_high_limit = 1UL << 31;
/* This row is empty [p.49] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* EAP reports in 4kilobyte granularity [61] */
dimm->grain = 1 << 12;
dimm->mtype = mtype;
/* I don't think 440BX can tell you device type? FIXME? */
dimm->dtype = DEV_UNKNOWN;
/* Mode is global to all rows on 440BX */
dimm->edac_mode = edac_mode;
row_high_limit_last = row_high_limit;
}
}
static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
u8 dramc;
u32 nbxcfg, ecc_mode;
enum mem_type mtype;
enum edac_type edac_mode;
edac_dbg(0, "MC:\n");
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
*/
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I82443BXGX_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = I82443BXGX_NR_CHANS;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
case I82443BXGX_DRAMC_DRAM_IS_EDO:
mtype = MEM_EDO;
break;
case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
mtype = MEM_SDR;
break;
case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
mtype = MEM_RDR;
break;
default:
edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n");
mtype = -MEM_UNKNOWN;
}
if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
mci->edac_cap = mci->edac_ctl_cap;
else
mci->edac_cap = EDAC_FLAG_NONE;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
(BIT(0) | BIT(1)));
mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
? SCRUB_HW_SRC : SCRUB_NONE;
switch (ecc_mode) {
case I82443BXGX_NBXCFG_INTEGRITY_NONE:
edac_mode = EDAC_NONE;
break;
case I82443BXGX_NBXCFG_INTEGRITY_EC:
edac_mode = EDAC_EC;
break;
case I82443BXGX_NBXCFG_INTEGRITY_ECC:
case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
edac_mode = EDAC_SECDED;
break;
default:
edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n");
edac_mode = EDAC_UNKNOWN;
break;
}
i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
/* Many BIOSes don't clear error flags on boot, so do this
* here, or we get "phantom" errors occurring at module-load
* time. */
pci_write_bits32(pdev, I82443BXGX_EAP,
(I82443BXGX_EAP_OFFSET_SBE |
I82443BXGX_EAP_OFFSET_MBE),
(I82443BXGX_EAP_OFFSET_SBE |
I82443BXGX_EAP_OFFSET_MBE));
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = "I82443BXGX";
mci->dev_name = pci_name(pdev);
mci->edac_check = i82443bxgx_edacmc_check;
mci->ctl_page_to_phys = NULL;
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* allocating generic PCI control info */
i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i82443bxgx_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
edac_dbg(3, "MC: success\n");
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
/* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
edac_mc_free(mci);
}
static const struct pci_device_id i82443bxgx_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
static struct pci_driver i82443bxgx_edacmc_driver = {
.name = EDAC_MOD_STR,
.probe = i82443bxgx_edacmc_init_one,
.remove = i82443bxgx_edacmc_remove_one,
.id_table = i82443bxgx_pci_tbl,
};
static int __init i82443bxgx_edacmc_init(void)
{
int pci_rc;
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
if (pci_rc < 0)
goto fail0;
if (mci_pdev == NULL) {
const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
int i = 0;
i82443bxgx_registered = 0;
while (mci_pdev == NULL && id->vendor != 0) {
mci_pdev = pci_get_device(id->vendor,
id->device, NULL);
i++;
id = &i82443bxgx_pci_tbl[i];
}
if (!mci_pdev) {
edac_dbg(0, "i82443bxgx pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "i82443bxgx init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i82443bxgx_edacmc_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i82443bxgx_edacmc_exit(void)
{
pci_unregister_driver(&i82443bxgx_edacmc_driver);
if (!i82443bxgx_registered)
i82443bxgx_edacmc_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
module_init(i82443bxgx_edacmc_init);
module_exit(i82443bxgx_edacmc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <[email protected]> - WPAD");
MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i82443bxgx_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011-2012 Calxeda, Inc.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include "edac_module.h"
/* DDR Ctrlr Error Registers */
#define HB_DDR_ECC_ERR_BASE 0x128
#define MW_DDR_ECC_ERR_BASE 0x1b4
#define HB_DDR_ECC_OPT 0x00
#define HB_DDR_ECC_U_ERR_ADDR 0x08
#define HB_DDR_ECC_U_ERR_STAT 0x0c
#define HB_DDR_ECC_U_ERR_DATAL 0x10
#define HB_DDR_ECC_U_ERR_DATAH 0x14
#define HB_DDR_ECC_C_ERR_ADDR 0x18
#define HB_DDR_ECC_C_ERR_STAT 0x1c
#define HB_DDR_ECC_C_ERR_DATAL 0x20
#define HB_DDR_ECC_C_ERR_DATAH 0x24
#define HB_DDR_ECC_OPT_MODE_MASK 0x3
#define HB_DDR_ECC_OPT_FWC 0x100
#define HB_DDR_ECC_OPT_XOR_SHIFT 16
/* DDR Ctrlr Interrupt Registers */
#define HB_DDR_ECC_INT_BASE 0x180
#define MW_DDR_ECC_INT_BASE 0x218
#define HB_DDR_ECC_INT_STATUS 0x00
#define HB_DDR_ECC_INT_ACK 0x04
#define HB_DDR_ECC_INT_STAT_CE 0x8
#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
#define HB_DDR_ECC_INT_STAT_UE 0x20
#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
struct hb_mc_drvdata {
void __iomem *mc_err_base;
void __iomem *mc_int_base;
};
static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct hb_mc_drvdata *drvdata = mci->pvt_info;
u32 status, err_addr;
/* Read the interrupt status register */
status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
if (status & HB_DDR_ECC_INT_STAT_UE) {
err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, 0,
0, 0, -1,
mci->ctl_name, "");
}
if (status & HB_DDR_ECC_INT_STAT_CE) {
u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
syndrome = (syndrome >> 8) & 0xff;
err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, syndrome,
0, 0, -1,
mci->ctl_name, "");
}
/* clear the error, clears the interrupt */
writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
return IRQ_HANDLED;
}
static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
{
struct hb_mc_drvdata *pdata = mci->pvt_info;
u32 reg;
reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
reg &= HB_DDR_ECC_OPT_MODE_MASK;
reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
}
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
static ssize_t highbank_mc_inject_ctrl(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
u8 synd;
if (kstrtou8(buf, 16, &synd))
return -EINVAL;
highbank_mc_err_inject(mci, synd);
return count;
}
static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
static struct attribute *highbank_dev_attrs[] = {
&dev_attr_inject_ctrl.attr,
NULL
};
ATTRIBUTE_GROUPS(highbank_dev);
struct hb_mc_settings {
int err_offset;
int int_offset;
};
static struct hb_mc_settings hb_settings = {
.err_offset = HB_DDR_ECC_ERR_BASE,
.int_offset = HB_DDR_ECC_INT_BASE,
};
static struct hb_mc_settings mw_settings = {
.err_offset = MW_DDR_ECC_ERR_BASE,
.int_offset = MW_DDR_ECC_INT_BASE,
};
static const struct of_device_id hb_ddr_ctrl_of_match[] = {
{ .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
{ .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
{},
};
MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
static int highbank_mc_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
const struct hb_mc_settings *settings;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct hb_mc_drvdata *drvdata;
struct dimm_info *dimm;
struct resource *r;
void __iomem *base;
u32 control;
int irq;
int res = 0;
id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
if (!id)
return -ENODEV;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct hb_mc_drvdata));
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
drvdata = mci->pvt_info;
platform_set_drvdata(pdev, mci);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
res = -ENOMEM;
goto free;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "Unable to get mem resource\n");
res = -ENODEV;
goto err;
}
if (!devm_request_mem_region(&pdev->dev, r->start,
resource_size(r), dev_name(&pdev->dev))) {
dev_err(&pdev->dev, "Error while requesting mem region\n");
res = -EBUSY;
goto err;
}
base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!base) {
dev_err(&pdev->dev, "Unable to map regs\n");
res = -ENOMEM;
goto err;
}
settings = id->data;
drvdata->mc_err_base = base + settings->err_offset;
drvdata->mc_int_base = base + settings->int_offset;
control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
if (!control || (control == 0x2)) {
dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
res = -ENODEV;
goto err;
}
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = pdev->dev.driver->name;
mci->ctl_name = id->compatible;
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
/* Only a single 4GB DIMM is supported */
dimm = *mci->dimms;
dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
dimm->grain = 8;
dimm->dtype = DEV_X8;
dimm->mtype = MEM_DDR3;
dimm->edac_mode = EDAC_SECDED;
res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
if (res < 0)
goto err;
irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
0, dev_name(&pdev->dev), mci);
if (res < 0) {
dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
goto err2;
}
devres_close_group(&pdev->dev, NULL);
return 0;
err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
free:
edac_mc_free(mci);
return res;
}
static int highbank_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
.remove = highbank_mc_remove,
.driver = {
.name = "hb_mc_edac",
.of_match_table = hb_ddr_ctrl_of_match,
},
};
module_platform_driver(highbank_mc_edac_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Calxeda, Inc.");
MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
| linux-master | drivers/edac/highbank_mc_edac.c |
/*
* Intel 5000(P/V/X) class Memory Controllers kernel module
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Douglas Thompson Linux Networx (http://lnxi.com)
* [email protected]
*
* This module is based on the following document:
*
* Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <asm/mmzone.h>
#include "edac_module.h"
/*
* Alter this version for the I5000 module when modifications are made
*/
#define I5000_REVISION " Ver: 2.0.12"
#define EDAC_MOD_STR "i5000_edac"
#define i5000_printk(level, fmt, arg...) \
edac_printk(level, "i5000", fmt, ##arg)
#define i5000_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_FBD_0
#define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5
#endif
#ifndef PCI_DEVICE_ID_INTEL_FBD_1
#define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6
#endif
/* Device 16,
* Function 0: System Address
* Function 1: Memory Branch Map, Control, Errors Register
* Function 2: FSB Error Registers
*
* All 3 functions of Device 16 (0,1,2) share the SAME DID
*/
#define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0
/* OFFSETS for Function 0 */
/* OFFSETS for Function 1 */
#define AMBASE 0x48
#define MAXCH 0x56
#define MAXDIMMPERCH 0x57
#define TOLM 0x6C
#define REDMEMB 0x7C
#define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF)
#define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF)
#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00)
#define MIR0 0x80
#define MIR1 0x84
#define MIR2 0x88
#define AMIR0 0x8C
#define AMIR1 0x90
#define AMIR2 0x94
#define FERR_FAT_FBD 0x98
#define NERR_FAT_FBD 0x9C
#define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3)
#define FERR_FAT_FBDCHAN 0x30000000
#define FERR_FAT_M3ERR 0x00000004
#define FERR_FAT_M2ERR 0x00000002
#define FERR_FAT_M1ERR 0x00000001
#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
FERR_FAT_M2ERR | \
FERR_FAT_M3ERR)
#define FERR_NF_FBD 0xA0
/* Thermal and SPD or BFD errors */
#define FERR_NF_M28ERR 0x01000000
#define FERR_NF_M27ERR 0x00800000
#define FERR_NF_M26ERR 0x00400000
#define FERR_NF_M25ERR 0x00200000
#define FERR_NF_M24ERR 0x00100000
#define FERR_NF_M23ERR 0x00080000
#define FERR_NF_M22ERR 0x00040000
#define FERR_NF_M21ERR 0x00020000
/* Correctable errors */
#define FERR_NF_M20ERR 0x00010000
#define FERR_NF_M19ERR 0x00008000
#define FERR_NF_M18ERR 0x00004000
#define FERR_NF_M17ERR 0x00002000
/* Non-Retry or redundant Retry errors */
#define FERR_NF_M16ERR 0x00001000
#define FERR_NF_M15ERR 0x00000800
#define FERR_NF_M14ERR 0x00000400
#define FERR_NF_M13ERR 0x00000200
/* Uncorrectable errors */
#define FERR_NF_M12ERR 0x00000100
#define FERR_NF_M11ERR 0x00000080
#define FERR_NF_M10ERR 0x00000040
#define FERR_NF_M9ERR 0x00000020
#define FERR_NF_M8ERR 0x00000010
#define FERR_NF_M7ERR 0x00000008
#define FERR_NF_M6ERR 0x00000004
#define FERR_NF_M5ERR 0x00000002
#define FERR_NF_M4ERR 0x00000001
#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
FERR_NF_M11ERR | \
FERR_NF_M10ERR | \
FERR_NF_M9ERR | \
FERR_NF_M8ERR | \
FERR_NF_M7ERR | \
FERR_NF_M6ERR | \
FERR_NF_M5ERR | \
FERR_NF_M4ERR)
#define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \
FERR_NF_M19ERR | \
FERR_NF_M18ERR | \
FERR_NF_M17ERR)
#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \
FERR_NF_M28ERR)
#define FERR_NF_THERMAL (FERR_NF_M26ERR | \
FERR_NF_M25ERR | \
FERR_NF_M24ERR | \
FERR_NF_M23ERR)
#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR)
#define FERR_NF_NORTH_CRC (FERR_NF_M21ERR)
#define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \
FERR_NF_M14ERR | \
FERR_NF_M15ERR)
#define NERR_NF_FBD 0xA4
#define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \
FERR_NF_CORRECTABLE | \
FERR_NF_DIMM_SPARE | \
FERR_NF_THERMAL | \
FERR_NF_SPD_PROTOCOL | \
FERR_NF_NORTH_CRC | \
FERR_NF_NON_RETRY)
#define EMASK_FBD 0xA8
#define EMASK_FBD_M28ERR 0x08000000
#define EMASK_FBD_M27ERR 0x04000000
#define EMASK_FBD_M26ERR 0x02000000
#define EMASK_FBD_M25ERR 0x01000000
#define EMASK_FBD_M24ERR 0x00800000
#define EMASK_FBD_M23ERR 0x00400000
#define EMASK_FBD_M22ERR 0x00200000
#define EMASK_FBD_M21ERR 0x00100000
#define EMASK_FBD_M20ERR 0x00080000
#define EMASK_FBD_M19ERR 0x00040000
#define EMASK_FBD_M18ERR 0x00020000
#define EMASK_FBD_M17ERR 0x00010000
#define EMASK_FBD_M15ERR 0x00004000
#define EMASK_FBD_M14ERR 0x00002000
#define EMASK_FBD_M13ERR 0x00001000
#define EMASK_FBD_M12ERR 0x00000800
#define EMASK_FBD_M11ERR 0x00000400
#define EMASK_FBD_M10ERR 0x00000200
#define EMASK_FBD_M9ERR 0x00000100
#define EMASK_FBD_M8ERR 0x00000080
#define EMASK_FBD_M7ERR 0x00000040
#define EMASK_FBD_M6ERR 0x00000020
#define EMASK_FBD_M5ERR 0x00000010
#define EMASK_FBD_M4ERR 0x00000008
#define EMASK_FBD_M3ERR 0x00000004
#define EMASK_FBD_M2ERR 0x00000002
#define EMASK_FBD_M1ERR 0x00000001
#define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \
EMASK_FBD_M2ERR | \
EMASK_FBD_M3ERR)
#define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \
EMASK_FBD_M5ERR | \
EMASK_FBD_M6ERR | \
EMASK_FBD_M7ERR | \
EMASK_FBD_M8ERR | \
EMASK_FBD_M9ERR | \
EMASK_FBD_M10ERR | \
EMASK_FBD_M11ERR | \
EMASK_FBD_M12ERR)
#define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \
EMASK_FBD_M18ERR | \
EMASK_FBD_M19ERR | \
EMASK_FBD_M20ERR)
#define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \
EMASK_FBD_M28ERR)
#define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \
EMASK_FBD_M25ERR | \
EMASK_FBD_M24ERR | \
EMASK_FBD_M23ERR)
#define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR)
#define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR)
#define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \
EMASK_FBD_M14ERR | \
EMASK_FBD_M13ERR)
#define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \
ENABLE_EMASK_FBD_NORTH_CRC | \
ENABLE_EMASK_FBD_SPD_PROTOCOL | \
ENABLE_EMASK_FBD_THERMALS | \
ENABLE_EMASK_FBD_DIMM_SPARE | \
ENABLE_EMASK_FBD_FATAL_ERRORS | \
ENABLE_EMASK_FBD_CORRECTABLE | \
ENABLE_EMASK_FBD_UNCORRECTABLE)
#define ERR0_FBD 0xAC
#define ERR1_FBD 0xB0
#define ERR2_FBD 0xB4
#define MCERR_FBD 0xB8
#define NRECMEMA 0xBE
#define NREC_BANK(x) (((x)>>12) & 0x7)
#define NREC_RDWR(x) (((x)>>11) & 1)
#define NREC_RANK(x) (((x)>>8) & 0x7)
#define NRECMEMB 0xC0
#define NREC_CAS(x) (((x)>>16) & 0xFFF)
#define NREC_RAS(x) ((x) & 0x7FFF)
#define NRECFGLOG 0xC4
#define NREEECFBDA 0xC8
#define NREEECFBDB 0xCC
#define NREEECFBDC 0xD0
#define NREEECFBDD 0xD4
#define NREEECFBDE 0xD8
#define REDMEMA 0xDC
#define RECMEMA 0xE2
#define REC_BANK(x) (((x)>>12) & 0x7)
#define REC_RDWR(x) (((x)>>11) & 1)
#define REC_RANK(x) (((x)>>8) & 0x7)
#define RECMEMB 0xE4
#define REC_CAS(x) (((x)>>16) & 0xFFFFFF)
#define REC_RAS(x) ((x) & 0x7FFF)
#define RECFGLOG 0xE8
#define RECFBDA 0xEC
#define RECFBDB 0xF0
#define RECFBDC 0xF4
#define RECFBDD 0xF8
#define RECFBDE 0xFC
/* OFFSETS for Function 2 */
/*
* Device 21,
* Function 0: Memory Map Branch 0
*
* Device 22,
* Function 0: Memory Map Branch 1
*/
#define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5
#define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6
#define AMB_PRESENT_0 0x64
#define AMB_PRESENT_1 0x66
#define MTR0 0x80
#define MTR1 0x84
#define MTR2 0x88
#define MTR3 0x8C
#define NUM_MTRS 4
#define CHANNELS_PER_BRANCH 2
#define MAX_BRANCHES 2
/* Defines to extract the various fields from the
* MTRx - Memory Technology Registers
*/
#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
#define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4)
#define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4)
#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
#define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1)
#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
/* enables the report of miscellaneous messages as CE errors - default off */
static int misc_messages;
/* Enumeration of supported devices */
enum i5000_chips {
I5000P = 0,
I5000V = 1, /* future */
I5000X = 2 /* future */
};
/* Device name and register DID (Device ID) */
struct i5000_dev_info {
const char *ctl_name; /* name for this device */
u16 fsb_mapping_errors; /* DID for the branchmap,control */
};
/* Table of devices attributes supported by this driver */
static const struct i5000_dev_info i5000_devs[] = {
[I5000P] = {
.ctl_name = "I5000",
.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
},
};
struct i5000_dimm_info {
int megabytes; /* size, 0 means not present */
int dual_rank;
};
#define MAX_CHANNELS 6 /* max possible channels */
#define MAX_CSROWS (8*2) /* max possible csrows per channel */
/* driver private data structure */
struct i5000_pvt {
struct pci_dev *system_address; /* 16.0 */
struct pci_dev *branchmap_werrors; /* 16.1 */
struct pci_dev *fsb_error_regs; /* 16.2 */
struct pci_dev *branch_0; /* 21.0 */
struct pci_dev *branch_1; /* 22.0 */
u16 tolm; /* top of low memory */
union {
u64 ambase; /* AMB BAR */
struct {
u32 ambase_bottom;
u32 ambase_top;
} u __packed;
};
u16 mir0, mir1, mir2;
u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
int maxdimmperch; /* Max DIMMs per channel */
};
/* I5000 MCH error information retrieved from Hardware */
struct i5000_error_info {
/* These registers are always read from the MC */
u32 ferr_fat_fbd; /* First Errors Fatal */
u32 nerr_fat_fbd; /* Next Errors Fatal */
u32 ferr_nf_fbd; /* First Errors Non-Fatal */
u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
/* These registers are input ONLY if there was a Recoverable Error */
u32 redmemb; /* Recoverable Mem Data Error log B */
u16 recmema; /* Recoverable Mem Error log A */
u32 recmemb; /* Recoverable Mem Error log B */
/* These registers are input ONLY if there was a
* Non-Recoverable Error */
u16 nrecmema; /* Non-Recoverable Mem log A */
u32 nrecmemb; /* Non-Recoverable Mem log B */
};
static struct edac_pci_ctl_info *i5000_pci;
/*
* i5000_get_error_info Retrieve the hardware error information from
* the hardware and cache it in the 'info'
* structure
*/
static void i5000_get_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info)
{
struct i5000_pvt *pvt;
u32 value;
pvt = mci->pvt_info;
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
/* Mask only the bits that the doc says are valid
*/
value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
/* If there is an error, then read in the */
/* NEXT FATAL error register and the Memory Error Log Register A */
if (value & FERR_FAT_MASK) {
info->ferr_fat_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMA, &info->nrecmema);
pci_read_config_dword(pvt->branchmap_werrors,
NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_FAT_FBD, value);
} else {
info->ferr_fat_fbd = 0;
info->nerr_fat_fbd = 0;
info->nrecmema = 0;
info->nrecmemb = 0;
}
/* read in the 1st NON-FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
/* If there is an error, then read in the 1st NON-FATAL error
* register as well */
if (value & FERR_NF_MASK) {
info->ferr_nf_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_NF_FBD, &info->nerr_nf_fbd);
pci_read_config_word(pvt->branchmap_werrors,
RECMEMA, &info->recmema);
pci_read_config_dword(pvt->branchmap_werrors,
RECMEMB, &info->recmemb);
pci_read_config_dword(pvt->branchmap_werrors,
REDMEMB, &info->redmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_NF_FBD, value);
} else {
info->ferr_nf_fbd = 0;
info->nerr_nf_fbd = 0;
info->recmema = 0;
info->recmemb = 0;
info->redmemb = 0;
}
}
/*
* i5000_process_fatal_error_info(struct mem_ctl_info *mci,
* struct i5000_error_info *info,
* int handle_errors);
*
* handle the Intel FATAL errors, if any
*/
static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info,
int handle_errors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 160];
char *specific = NULL;
u32 allErrors;
int channel;
int bank;
int rank;
int rdwr;
int ras, cas;
/* mask off the Error bits that are possible */
allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
if (!allErrors)
return; /* if no error, return now */
channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
/* Use the NON-Recoverable macros to extract data */
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, bank,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
switch (allErrors) {
case FERR_FAT_M1ERR:
specific = "Alert on non-redundant retry or fast "
"reset timeout";
break;
case FERR_FAT_M2ERR:
specific = "Northbound CRC error on non-redundant "
"retry";
break;
case FERR_FAT_M3ERR:
{
static int done;
/*
* This error is generated to inform that the intelligent
* throttling is disabled and the temperature passed the
* specified middle point. Since this is something the BIOS
* should take care of, we'll warn only once to avoid
* worthlessly flooding the log.
*/
if (done)
return;
done++;
specific = ">Tmid Thermal event with intelligent "
"throttling disabled";
}
break;
}
/* Form out message */
snprintf(msg, sizeof(msg),
"Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
channel >> 1, channel & 1, rank,
rdwr ? "Write error" : "Read error",
msg);
}
/*
* i5000_process_fatal_error_info(struct mem_ctl_info *mci,
* struct i5000_error_info *info,
* int handle_errors);
*
* handle the Intel NON-FATAL errors, if any
*/
static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info,
int handle_errors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 170];
char *specific = NULL;
u32 allErrors;
u32 ue_errors;
u32 ce_errors;
u32 misc_errors;
int branch;
int channel;
int bank;
int rank;
int rdwr;
int ras, cas;
/* mask off the Error bits that are possible */
allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
if (!allErrors)
return; /* if no error, return now */
/* ONLY ONE of the possible error bits will be set, as per the docs */
ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
if (ue_errors) {
edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
/*
* According with i5000 datasheet, bit 28 has no significance
* for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
*/
channel = branch & 2;
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas);
switch (ue_errors) {
case FERR_NF_M12ERR:
specific = "Non-Aliased Uncorrectable Patrol Data ECC";
break;
case FERR_NF_M11ERR:
specific = "Non-Aliased Uncorrectable Spare-Copy "
"Data ECC";
break;
case FERR_NF_M10ERR:
specific = "Non-Aliased Uncorrectable Mirrored Demand "
"Data ECC";
break;
case FERR_NF_M9ERR:
specific = "Non-Aliased Uncorrectable Non-Mirrored "
"Demand Data ECC";
break;
case FERR_NF_M8ERR:
specific = "Aliased Uncorrectable Patrol Data ECC";
break;
case FERR_NF_M7ERR:
specific = "Aliased Uncorrectable Spare-Copy Data ECC";
break;
case FERR_NF_M6ERR:
specific = "Aliased Uncorrectable Mirrored Demand "
"Data ECC";
break;
case FERR_NF_M5ERR:
specific = "Aliased Uncorrectable Non-Mirrored Demand "
"Data ECC";
break;
case FERR_NF_M4ERR:
specific = "Uncorrectable Data ECC on Replay";
break;
}
/* Form out message */
snprintf(msg, sizeof(msg),
"Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
channel >> 1, -1, rank,
rdwr ? "Write error" : "Read error",
msg);
}
/* Check correctable errors */
ce_errors = allErrors & FERR_NF_CORRECTABLE;
if (ce_errors) {
edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
channel = 0;
if (REC_ECC_LOCATOR_ODD(info->redmemb))
channel = 1;
/* Convert channel to be based from zero, instead of
* from branch base of 0 */
channel += branch;
bank = REC_BANK(info->recmema);
rank = REC_RANK(info->recmema);
rdwr = REC_RDWR(info->recmema);
ras = REC_RAS(info->recmemb);
cas = REC_CAS(info->recmemb);
edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas);
switch (ce_errors) {
case FERR_NF_M17ERR:
specific = "Correctable Non-Mirrored Demand Data ECC";
break;
case FERR_NF_M18ERR:
specific = "Correctable Mirrored Demand Data ECC";
break;
case FERR_NF_M19ERR:
specific = "Correctable Spare-Copy Data ECC";
break;
case FERR_NF_M20ERR:
specific = "Correctable Patrol Data ECC";
break;
}
/* Form out message */
snprintf(msg, sizeof(msg),
"Rank=%d Bank=%d RDWR=%s RAS=%d "
"CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas, ce_errors,
specific);
/* Call the helper to output message */
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
channel >> 1, channel % 2, rank,
rdwr ? "Write error" : "Read error",
msg);
}
if (!misc_messages)
return;
misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC |
FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE);
if (misc_errors) {
switch (misc_errors) {
case FERR_NF_M13ERR:
specific = "Non-Retry or Redundant Retry FBD Memory "
"Alert or Redundant Fast Reset Timeout";
break;
case FERR_NF_M14ERR:
specific = "Non-Retry or Redundant Retry FBD "
"Configuration Alert";
break;
case FERR_NF_M15ERR:
specific = "Non-Retry or Redundant Retry FBD "
"Northbound CRC error on read data";
break;
case FERR_NF_M21ERR:
specific = "FBD Northbound CRC error on "
"FBD Sync Status";
break;
case FERR_NF_M22ERR:
specific = "SPD protocol error";
break;
case FERR_NF_M27ERR:
specific = "DIMM-spare copy started";
break;
case FERR_NF_M28ERR:
specific = "DIMM-spare copy completed";
break;
}
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
/* Form out message */
snprintf(msg, sizeof(msg),
"Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
branch >> 1, -1, -1,
"Misc error", msg);
}
}
/*
* i5000_process_error_info Process the error info that is
* in the 'info' structure, previously retrieved from hardware
*/
static void i5000_process_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info,
int handle_errors)
{
/* First handle any fatal errors that occurred */
i5000_process_fatal_error_info(mci, info, handle_errors);
/* now handle any non-fatal errors that occurred */
i5000_process_nonfatal_error_info(mci, info, handle_errors);
}
/*
* i5000_clear_error Retrieve any error from the hardware
* but do NOT process that error.
* Used for 'clearing' out of previous errors
* Called by the Core module.
*/
static void i5000_clear_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
i5000_get_error_info(mci, &info);
}
/*
* i5000_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
/*
* i5000_get_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
{
//const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
struct i5000_pvt *pvt;
struct pci_dev *pdev;
pvt = mci->pvt_info;
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
/* End of list, leave */
if (pdev == NULL) {
i5000_printk(KERN_ERR,
"'system address,Process Bus' "
"device not found:"
"vendor 0x%x device 0x%x FUNC 1 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I5000_DEV16);
return 1;
}
/* Scan for device 16 func 1 */
if (PCI_FUNC(pdev->devfn) == 1)
break;
}
pvt->branchmap_werrors = pdev;
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
"MC: 'branchmap,control,errors' "
"device not found:"
"vendor 0x%x device 0x%x Func 2 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I5000_DEV16);
pci_dev_put(pvt->branchmap_werrors);
return 1;
}
/* Scan for device 16 func 1 */
if (PCI_FUNC(pdev->devfn) == 2)
break;
}
pvt->fsb_error_regs = pdev;
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->system_address),
pvt->system_address->vendor, pvt->system_address->device);
edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->branchmap_werrors),
pvt->branchmap_werrors->vendor,
pvt->branchmap_werrors->device);
edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->fsb_error_regs),
pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
"MC: 'BRANCH 0' device not found:"
"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
pci_dev_put(pvt->branchmap_werrors);
pci_dev_put(pvt->fsb_error_regs);
return 1;
}
pvt->branch_0 = pdev;
/* If this device claims to have more than 2 channels then
* fetch Branch 1's information
*/
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
"MC: 'BRANCH 1' device not found:"
"vendor 0x%x device 0x%x Func 0 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_I5000_BRANCH_1);
pci_dev_put(pvt->branchmap_werrors);
pci_dev_put(pvt->fsb_error_regs);
pci_dev_put(pvt->branch_0);
return 1;
}
pvt->branch_1 = pdev;
}
return 0;
}
/*
* i5000_put_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i5000_put_devices(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
pvt = mci->pvt_info;
pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */
pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */
pci_dev_put(pvt->branch_0); /* DEV 21 */
/* Only if more than 2 channels do we release the second branch */
if (pvt->maxch >= CHANNELS_PER_BRANCH)
pci_dev_put(pvt->branch_1); /* DEV 22 */
}
/*
* determine_amb_resent
*
* the information is contained in NUM_MTRS different registers
* determineing which of the NUM_MTRS requires knowing
* which channel is in question
*
* 2 branches, each with 2 channels
* b0_ambpresent0 for channel '0'
* b0_ambpresent1 for channel '1'
* b1_ambpresent0 for channel '2'
* b1_ambpresent1 for channel '3'
*/
static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
{
int amb_present;
if (channel < CHANNELS_PER_BRANCH) {
if (channel & 0x1)
amb_present = pvt->b0_ambpresent1;
else
amb_present = pvt->b0_ambpresent0;
} else {
if (channel & 0x1)
amb_present = pvt->b1_ambpresent1;
else
amb_present = pvt->b1_ambpresent0;
}
return amb_present;
}
/*
* determine_mtr(pvt, csrow, channel)
*
* return the proper MTR register as determine by the csrow and channel desired
*/
static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
{
int mtr;
if (channel < CHANNELS_PER_BRANCH)
mtr = pvt->b0_mtr[slot];
else
mtr = pvt->b1_mtr[slot];
return mtr;
}
/*
*/
static void decode_mtr(int slot_row, u16 mtr)
{
int ans;
ans = MTR_DIMMS_PRESENT(mtr);
edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
MTR_DIMM_RANK(mtr) ? "double" : "single");
edac_dbg(2, "\t\tNUMROW: %s\n",
MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
"reserved");
edac_dbg(2, "\t\tNUMCOL: %s\n",
MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
"reserved");
}
static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
mtr = determine_mtr(pvt, slot, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
if (amb_present_reg) {
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
/* Add the number of ROW bits */
addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
/* add the number of COLUMN bits */
addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
/* Dual-rank memories have twice the size */
if (dinfo->dual_rank)
addrBits++;
addrBits += 6; /* add 64 bits per DIMM */
addrBits -= 20; /* divide by 2^^20 */
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
}
}
}
/*
* calculate_dimm_size
*
* also will output a DIMM matrix map, if debug is enabled, for viewing
* how the DIMMs are populated
*/
static void calculate_dimm_size(struct i5000_pvt *pvt)
{
struct i5000_dimm_info *dinfo;
int slot, channel, branch;
char *p, *mem_buffer;
int space, n;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
mem_buffer = p = kmalloc(space, GFP_KERNEL);
if (p == NULL) {
i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
__FILE__, __func__);
return;
}
/* Scan all the actual slots
* and calculate the information for each DIMM
* Start with the highest slot first, to display it first
* and work toward the 0th slot
*/
for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
/* on an odd slot, first output a 'boundary' marker,
* then reset the message buffer */
if (slot & 0x1) {
n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
n = snprintf(p, space, "slot %2d ", slot);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
dinfo = &pvt->dimm_info[slot][channel];
handle_channel(pvt, slot, channel, dinfo);
if (dinfo->megabytes)
n = snprintf(p, space, "%4d MB %dR| ",
dinfo->megabytes, dinfo->dual_rank + 1);
else
n = snprintf(p, space, "%4d MB | ", 0);
p += n;
space -= n;
}
p += n;
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
/* now output the 'channel' labels */
n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
n = snprintf(p, space, "channel %d | ", channel);
p += n;
space -= n;
}
edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
n = snprintf(p, space, " ");
p += n;
for (branch = 0; branch < MAX_BRANCHES; branch++) {
n = snprintf(p, space, " branch %d | ", branch);
p += n;
space -= n;
}
/* output the last message and free buffer */
edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
/*
* i5000_get_mc_regs read in the necessary registers and
* cache locally
*
* Fills in the private data members
*/
static void i5000_get_mc_regs(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
u32 actual_tolm;
u16 limit;
int slot_row;
int way0, way1;
pvt = mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
&pvt->u.ambase_bottom);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
&pvt->u.ambase_top);
edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
pvt->tolm, pvt->tolm);
actual_tolm = pvt->tolm << 28;
edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n",
actual_tolm, actual_tolm);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
/* Get the MIR[0-2] regs */
limit = (pvt->mir0 >> 4) & 0x0FFF;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0x0FFF;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
limit, way1, way0);
limit = (pvt->mir2 >> 4) & 0x0FFF;
way0 = pvt->mir2 & 0x1;
way1 = pvt->mir2 & 0x2;
edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n",
limit, way1, way0);
/* Get the MTR[0-3] regs */
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u32));
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
slot_row, where, pvt->b1_mtr[slot_row]);
} else {
pvt->b1_mtr[slot_row] = 0;
}
}
/* Read and dump branch 0's MTRs */
edac_dbg(2, "Memory Technology Registers:\n");
edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
&pvt->b0_ambpresent0);
edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
&pvt->b1_ambpresent0);
edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
&pvt->b1_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
* orderly matrix */
calculate_dimm_size(pvt);
}
/*
* i5000_init_csrows Initialize the 'csrows' table within
* the mci control structure with the
* addressing of memory.
*
* return:
* 0 success
* 1 no actual memory found on this MC
*/
static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
struct dimm_info *dimm;
int empty;
int max_csrows;
int mtr;
int csrow_megs;
int channel;
int slot;
pvt = mci->pvt_info;
max_csrows = pvt->maxdimmperch * 2;
empty = 1; /* Assume NO memory */
/*
* FIXME: The memory layout used to map slot/channel into the
* real memory architecture is weird: branch+slot are "csrows"
* and channel is channel. That required an extra array (dimm_info)
* to map the dimms. A good cleanup would be to remove this array,
* and do a loop here with branch, channel, slot
*/
for (slot = 0; slot < max_csrows; slot++) {
for (channel = 0; channel < pvt->maxch; channel++) {
mtr = determine_mtr(pvt, slot, channel);
if (!MTR_DIMMS_PRESENT(mtr))
continue;
dimm = edac_get_dimm(mci, channel / MAX_BRANCHES,
channel % MAX_BRANCHES, slot);
csrow_megs = pvt->dimm_info[slot][channel].megabytes;
dimm->grain = 8;
/* Assume DDR2 for now */
dimm->mtype = MEM_FB_DDR2;
/* ask what device type on this row */
if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->dtype = DEV_X8;
else
dimm->dtype = DEV_X4;
dimm->edac_mode = EDAC_S8ECD8ED;
dimm->nr_pages = csrow_megs << 8;
}
empty = 0;
}
return empty;
}
/*
* i5000_enable_error_reporting
* Turn on the memory reporting features of the hardware
*/
static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
u32 fbd_error_mask;
pvt = mci->pvt_info;
/* Read the FBD Error Mask Register */
pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
&fbd_error_mask);
/* Enable with a '0' */
fbd_error_mask &= ~(ENABLE_EMASK_ALL);
pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
fbd_error_mask);
}
/*
* i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
*
* ask the device how many channels are present and how many CSROWS
* as well
*/
static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
int *num_dimms_per_channel,
int *num_channels)
{
u8 value;
/* Need to retrieve just how many channels and dimms per channel are
* supported on this memory controller
*/
pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
*num_dimms_per_channel = (int)value;
pci_read_config_byte(pdev, MAXCH, &value);
*num_channels = (int)value;
}
/*
* i5000_probe1 Probe for ONE instance of device to see if it is
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[3];
struct i5000_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
/* Ask the devices for the number of CSROWS and CHANNELS so
* that we can calculate the memory resources, etc
*
* The Chipset will report what it can handle which will be greater
* or equal to what the motherboard manufacturer will implement.
*
* As we don't have a motherboard identification routine to determine
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
* allows the driver to support up to the chipset max, without
* some fancy mobo determination.
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_BRANCH;
layers[0].size = MAX_BRANCHES;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = num_channels / MAX_BRANCHES;
layers[1].is_virt_csrow = false;
layers[2].type = EDAC_MC_LAYER_SLOT;
layers[2].size = num_dimms_per_channel;
layers[2].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
pvt->maxch = num_channels;
pvt->maxdimmperch = num_dimms_per_channel;
/* 'get' the pci devices we want to reserve for our use */
if (i5000_get_devices(mci, dev_idx))
goto fail0;
/* Time to get serious */
i5000_get_mc_regs(mci); /* retrieve the hardware registers */
mci->mc_idx = 0;
mci->mtype_cap = MEM_FLAG_FB_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5000_edac.c";
mci->ctl_name = i5000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
/* Set the function pointer to an actual operation function */
mci->edac_check = i5000_check_error;
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i5000_init_csrows(mci)) {
edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
edac_dbg(1, "MC: Enable error reporting now\n");
i5000_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
goto fail1;
}
i5000_clear_error(mci);
/* allocating generic PCI control info */
i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i5000_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
return 0;
/* Error exit unwinding stack */
fail1:
i5000_put_devices(mci);
fail0:
edac_mc_free(mci);
return -ENODEV;
}
/*
* i5000_init_one constructor for one instance of device
*
* returns:
* negative on error
* count (>= 0)
*/
static int i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
if (rc)
return rc;
/* now probe and enable the device */
return i5000_probe1(pdev, id->driver_data);
}
/*
* i5000_remove_one destructor for one instance of device
*
*/
static void i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
/* retrieve references to resources, and free those resources */
i5000_put_devices(mci);
edac_mc_free(mci);
}
/*
* pci_device_id table for which devices we are looking for
*
* The "E500P" device is the first device supported.
*/
static const struct pci_device_id i5000_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
.driver_data = I5000P},
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
/*
* i5000_driver pci_driver structure for this module
*
*/
static struct pci_driver i5000_driver = {
.name = KBUILD_BASENAME,
.probe = i5000_init_one,
.remove = i5000_remove_one,
.id_table = i5000_pci_tbl,
};
/*
* i5000_init Module entry function
* Try to initialize this module for its devices
*/
static int __init i5000_init(void)
{
int pci_rc;
edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i5000_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
/*
* i5000_exit() Module exit function
* Unregister the driver
*/
static void __exit i5000_exit(void)
{
edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5000_driver);
}
module_init(i5000_init);
module_exit(i5000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Doug Thompson <[email protected]>");
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " I5000_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
module_param(misc_messages, int, 0444);
MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
| linux-master | drivers/edac/i5000_edac.c |
/*
* Intel 5100 Memory Controllers kernel module
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* This module is based on the following document:
*
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
* The intel 5100 has two independent channels. EDAC core currently
* can not reflect this configuration so instead the chip-select
* rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
*
* This driver is for DDR2 DIMMs, and it uses chip select to select among the
* several ranks. However, instead of showing memories as ranks, it outputs
* them as DIMM's. An internal table creates the association between ranks
* and DIMM's.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/delay.h>
#include <linux/mmzone.h>
#include <linux/debugfs.h>
#include "edac_module.h"
/* register addresses */
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
#define I5100_MC_SCRBEN_MASK (1 << 7)
#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
#define I5100_TOLM 0x6c /* Top of Low Memory */
#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
#define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1)
#define I5100_FERR_NF_MEM_ANY_MASK \
(I5100_FERR_NF_MEM_M16ERR_MASK | \
I5100_FERR_NF_MEM_M15ERR_MASK | \
I5100_FERR_NF_MEM_M14ERR_MASK | \
I5100_FERR_NF_MEM_M12ERR_MASK | \
I5100_FERR_NF_MEM_M11ERR_MASK | \
I5100_FERR_NF_MEM_M10ERR_MASK | \
I5100_FERR_NF_MEM_M6ERR_MASK | \
I5100_FERR_NF_MEM_M5ERR_MASK | \
I5100_FERR_NF_MEM_M4ERR_MASK | \
I5100_FERR_NF_MEM_M1ERR_MASK)
#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
#define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */
#define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */
#define I5100_MEMXEINJMSK0_EINJEN (1 << 27)
#define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */
#define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */
/* Device 19, Function 0 */
#define I5100_DINJ0 0x9a
/* device 21 and 22, func 0 */
#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
#define I5100_DMIR 0x15c /* DIMM Interleave Range */
#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
/* bit field accessors */
static inline u32 i5100_mc_scrben(u32 mc)
{
return mc >> 7 & 1;
}
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
static inline u32 i5100_mc_scrbdone(u32 mc)
{
return mc >> 4 & 1;
}
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
}
static inline u16 i5100_spddata_sbe(u16 a)
{
return a >> 13 & 1;
}
static inline u16 i5100_spddata_busy(u16 a)
{
return a >> 12 & 1;
}
static inline u16 i5100_spddata_data(u16 a)
{
return a & ((1 << 8) - 1);
}
static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
u32 data, u32 cmd)
{
return ((dti & ((1 << 4) - 1)) << 28) |
((ckovrd & 1) << 27) |
((sa & ((1 << 3) - 1)) << 24) |
((ba & ((1 << 8) - 1)) << 16) |
((data & ((1 << 8) - 1)) << 8) |
(cmd & 1);
}
static inline u16 i5100_tolm_tolm(u16 a)
{
return a >> 12 & ((1 << 4) - 1);
}
static inline u16 i5100_mir_limit(u16 a)
{
return a >> 4 & ((1 << 12) - 1);
}
static inline u16 i5100_mir_way1(u16 a)
{
return a >> 1 & 1;
}
static inline u16 i5100_mir_way0(u16 a)
{
return a & 1;
}
static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
{
return a >> 28 & 1;
}
static inline u32 i5100_ferr_nf_mem_any(u32 a)
{
return a & I5100_FERR_NF_MEM_ANY_MASK;
}
static inline u32 i5100_nerr_nf_mem_any(u32 a)
{
return i5100_ferr_nf_mem_any(a);
}
static inline u32 i5100_dmir_limit(u32 a)
{
return a >> 16 & ((1 << 11) - 1);
}
static inline u32 i5100_dmir_rank(u32 a, u32 i)
{
return a >> (4 * i) & ((1 << 2) - 1);
}
static inline u16 i5100_mtr_present(u16 a)
{
return a >> 10 & 1;
}
static inline u16 i5100_mtr_ethrottle(u16 a)
{
return a >> 9 & 1;
}
static inline u16 i5100_mtr_width(u16 a)
{
return a >> 8 & 1;
}
static inline u16 i5100_mtr_numbank(u16 a)
{
return a >> 6 & 1;
}
static inline u16 i5100_mtr_numrow(u16 a)
{
return a >> 2 & ((1 << 2) - 1);
}
static inline u16 i5100_mtr_numcol(u16 a)
{
return a & ((1 << 2) - 1);
}
static inline u32 i5100_validlog_redmemvalid(u32 a)
{
return a >> 2 & 1;
}
static inline u32 i5100_validlog_recmemvalid(u32 a)
{
return a >> 1 & 1;
}
static inline u32 i5100_validlog_nrecmemvalid(u32 a)
{
return a & 1;
}
static inline u32 i5100_nrecmema_merr(u32 a)
{
return a >> 15 & ((1 << 5) - 1);
}
static inline u32 i5100_nrecmema_bank(u32 a)
{
return a >> 12 & ((1 << 3) - 1);
}
static inline u32 i5100_nrecmema_rank(u32 a)
{
return a >> 8 & ((1 << 3) - 1);
}
static inline u32 i5100_nrecmemb_cas(u32 a)
{
return a >> 16 & ((1 << 13) - 1);
}
static inline u32 i5100_nrecmemb_ras(u32 a)
{
return a & ((1 << 16) - 1);
}
static inline u32 i5100_recmema_merr(u32 a)
{
return i5100_nrecmema_merr(a);
}
static inline u32 i5100_recmema_bank(u32 a)
{
return i5100_nrecmema_bank(a);
}
static inline u32 i5100_recmema_rank(u32 a)
{
return i5100_nrecmema_rank(a);
}
static inline u32 i5100_recmemb_cas(u32 a)
{
return i5100_nrecmemb_cas(a);
}
static inline u32 i5100_recmemb_ras(u32 a)
{
return i5100_nrecmemb_ras(a);
}
/* some generic limits */
#define I5100_MAX_RANKS_PER_CHAN 6
#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
* channel, the mapping is fairly obvious but not unique.
* we map -1 -> NC and assume both channels use the same
* map...
*
*/
int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
} mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
} dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
unsigned present; /* 0 or 1 */
unsigned ethrottle; /* 0 or 1 */
unsigned width; /* 4 or 8 bits */
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
} mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *einj; /* device 19 func 0 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
struct delayed_work i5100_scrubbing;
int scrub_enable;
/* Error injection */
u8 inject_channel;
u8 inject_hlinesel;
u8 inject_deviceptr1;
u8 inject_deviceptr2;
u16 inject_eccmask1;
u16 inject_eccmask2;
struct dentry *debugfs;
};
static struct dentry *i5100_debugfs;
/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
return i * 2 + chan;
}
return -1;
}
static const char *i5100_err_msg(unsigned err)
{
static const char *merrs[] = {
"unknown", /* 0 */
"uncorrectable data ECC on replay", /* 1 */
"unknown", /* 2 */
"unknown", /* 3 */
"aliased uncorrectable demand data ECC", /* 4 */
"aliased uncorrectable spare-copy data ECC", /* 5 */
"aliased uncorrectable patrol data ECC", /* 6 */
"unknown", /* 7 */
"unknown", /* 8 */
"unknown", /* 9 */
"non-aliased uncorrectable demand data ECC", /* 10 */
"non-aliased uncorrectable spare-copy data ECC", /* 11 */
"non-aliased uncorrectable patrol data ECC", /* 12 */
"unknown", /* 13 */
"correctable demand data ECC", /* 14 */
"correctable spare-copy data ECC", /* 15 */
"correctable patrol data ECC", /* 16 */
"unknown", /* 17 */
"SPD protocol error", /* 18 */
"unknown", /* 19 */
"spare copy initiated", /* 20 */
"spare copy completed", /* 21 */
};
unsigned i;
for (i = 0; i < ARRAY_SIZE(merrs); i++)
if (1 << i & err)
return merrs[i];
return "none";
}
/* convert csrow index into a rank (per channel -- 0..5) */
static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return csrow % priv->ranksperchan;
}
/* convert csrow index into a channel (0..1) */
static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return csrow / priv->ranksperchan;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
unsigned cas,
unsigned ras,
const char *msg)
{
char detail[80];
/* Form out message */
snprintf(detail, sizeof(detail),
"bank %u, cas %u, ras %u\n",
bank, cas, ras);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, syndrome,
chan, rank, -1,
msg, detail);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
unsigned cas,
unsigned ras,
const char *msg)
{
char detail[80];
/* Form out message */
snprintf(detail, sizeof(detail),
"bank %u, cas %u, ras %u\n",
bank, cas, ras);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, syndrome,
chan, rank, -1,
msg, detail);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
unsigned merr;
unsigned bank;
unsigned rank;
unsigned cas;
unsigned ras;
pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
if (i5100_validlog_redmemvalid(dw)) {
pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
syndrome = dw2;
pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
}
if (i5100_validlog_recmemvalid(dw)) {
const char *msg;
pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
merr = i5100_recmema_merr(dw2);
bank = i5100_recmema_bank(dw2);
rank = i5100_recmema_rank(dw2);
pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
cas = i5100_recmemb_cas(dw2);
ras = i5100_recmemb_ras(dw2);
/* FIXME: not really sure if this is what merr is...
*/
if (!merr)
msg = i5100_err_msg(ferr);
else
msg = i5100_err_msg(nerr);
i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
const char *msg;
pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
merr = i5100_nrecmema_merr(dw2);
bank = i5100_nrecmema_bank(dw2);
rank = i5100_nrecmema_rank(dw2);
pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
cas = i5100_nrecmemb_cas(dw2);
ras = i5100_nrecmemb_ras(dw2);
/* FIXME: not really sure if this is what merr is...
*/
if (!merr)
msg = i5100_err_msg(ferr);
else
msg = i5100_err_msg(nerr);
i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
}
static void i5100_check_error(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
u32 dw, dw2;
pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
if (i5100_ferr_nf_mem_any(dw)) {
pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
i5100_ferr_nf_mem_any(dw),
i5100_nerr_nf_mem_any(dw2));
pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2);
}
pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
}
/* The i5100 chipset will scrub the entire memory once, then
* set a done bit. Continuous scrubbing is achieved by enqueing
* delayed work to a workqueue, checking every few minutes if
* the scrubbing has completed and if so reinitiating it.
*/
static void i5100_refresh_scrubbing(struct work_struct *work)
{
struct delayed_work *i5100_scrubbing = to_delayed_work(work);
struct i5100_priv *priv = container_of(i5100_scrubbing,
struct i5100_priv,
i5100_scrubbing);
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (priv->scrub_enable) {
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (i5100_mc_scrbdone(dw)) {
dw |= I5100_MC_SCRBEN_MASK;
pci_write_config_dword(priv->mc, I5100_MC, dw);
pci_read_config_dword(priv->mc, I5100_MC, &dw);
}
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
}
}
/*
* The bandwidth is based on experimentation, feel free to refine it.
*/
static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
{
struct i5100_priv *priv = mci->pvt_info;
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (bandwidth) {
priv->scrub_enable = 1;
dw |= I5100_MC_SCRBEN_MASK;
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
} else {
priv->scrub_enable = 0;
dw &= ~I5100_MC_SCRBEN_MASK;
cancel_delayed_work(&(priv->i5100_scrubbing));
}
pci_write_config_dword(priv->mc, I5100_MC, dw);
pci_read_config_dword(priv->mc, I5100_MC, &dw);
bandwidth = 5900000 * i5100_mc_scrben(dw);
return bandwidth;
}
static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
return 5900000 * i5100_mc_scrben(dw);
}
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
{
struct pci_dev *ret = NULL;
while (1) {
ret = pci_get_device(vendor, device, ret);
if (!ret)
break;
if (PCI_FUNC(ret->devfn) == func)
break;
}
return ret;
}
static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
priv->mtr[chan][chan_rank].numcol +
priv->mtr[chan][chan_rank].numrow +
priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
}
static void i5100_init_mtr(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
u16 w;
pci_read_config_word(pdev, addr, &w);
priv->mtr[i][j].present = i5100_mtr_present(w);
priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
}
}
}
/*
* FIXME: make this into a real i2c adapter (so that dimm-decode
* will work)?
*/
static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
u8 ch, u8 slot, u8 addr, u8 *byte)
{
struct i5100_priv *priv = mci->pvt_info;
u16 w;
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
if (i5100_spddata_busy(w))
return -1;
pci_write_config_dword(priv->mc, I5100_SPDCMD,
i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
0, 0));
/* wait up to 100ms */
udelay(100);
while (1) {
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
if (!i5100_spddata_busy(w))
break;
udelay(100);
}
if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
return -1;
*byte = i5100_spddata_data(w);
return 0;
}
/*
* fill dimm chip select map
*
* FIXME:
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
static void i5100_init_dimm_csmap(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
priv->dimm_csmap[i][j] = -1; /* default NC */
}
/* only 2 chip selects per slot... */
if (priv->ranksperchan == 4) {
priv->dimm_csmap[0][0] = 0;
priv->dimm_csmap[0][1] = 3;
priv->dimm_csmap[1][0] = 1;
priv->dimm_csmap[1][1] = 2;
priv->dimm_csmap[2][0] = 2;
priv->dimm_csmap[3][0] = 3;
} else {
priv->dimm_csmap[0][0] = 0;
priv->dimm_csmap[0][1] = 1;
priv->dimm_csmap[1][0] = 2;
priv->dimm_csmap[1][1] = 3;
priv->dimm_csmap[2][0] = 4;
priv->dimm_csmap[2][1] = 5;
}
}
static void i5100_init_dimm_layout(struct pci_dev *pdev,
struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
priv->dimm_numrank[i][j] = 0;
else
priv->dimm_numrank[i][j] = (rank & 3) + 1;
}
}
i5100_init_dimm_csmap(mci);
}
static void i5100_init_interleaving(struct pci_dev *pdev,
struct mem_ctl_info *mci)
{
u16 w;
u32 dw;
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
pci_read_config_word(pdev, I5100_TOLM, &w);
priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
pci_read_config_word(pdev, I5100_MIR0, &w);
priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
priv->mir[0].way[1] = i5100_mir_way1(w);
priv->mir[0].way[0] = i5100_mir_way0(w);
pci_read_config_word(pdev, I5100_MIR1, &w);
priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
priv->mir[1].way[1] = i5100_mir_way1(w);
priv->mir[1].way[0] = i5100_mir_way0(w);
pci_read_config_word(pdev, I5100_AMIR_0, &w);
priv->amir[0] = w;
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
int k;
pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
priv->dmir[i][j].limit =
(u64) i5100_dmir_limit(dw) << 28;
for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
priv->dmir[i][j].rank[k] =
i5100_dmir_rank(dw, k);
}
}
i5100_init_mtr(mci);
}
static void i5100_init_csrows(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
struct dimm_info *dimm;
mci_for_each_dimm(mci, dimm) {
const unsigned long npages = i5100_npages(mci, dimm->idx);
const unsigned int chan = i5100_csrow_to_chan(mci, dimm->idx);
const unsigned int rank = i5100_csrow_to_rank(mci, dimm->idx);
if (!npages)
continue;
dimm->nr_pages = npages;
dimm->grain = 32;
dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
DEV_X4 : DEV_X8;
dimm->mtype = MEM_RDDR2;
dimm->edac_mode = EDAC_SECDED;
snprintf(dimm->label, sizeof(dimm->label), "DIMM%u",
i5100_rank_to_slot(mci, chan, rank));
edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
chan, rank, (long)PAGES_TO_MiB(npages));
}
}
/****************************************************************************
* Error injection routines
****************************************************************************/
static void i5100_do_inject(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
u32 mask0;
u16 mask1;
/* MEM[1:0]EINJMSK0
* 31 - ADDRMATCHEN
* 29:28 - HLINESEL
* 00 Reserved
* 01 Lower half of cache line
* 10 Upper half of cache line
* 11 Both upper and lower parts of cache line
* 27 - EINJEN
* 25:19 - XORMASK1 for deviceptr1
* 9:5 - SEC2RAM for deviceptr2
* 4:0 - FIR2RAM for deviceptr1
*/
mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
I5100_MEMXEINJMSK0_EINJEN |
((priv->inject_eccmask1 & 0xffff) << 10) |
((priv->inject_deviceptr2 & 0x1f) << 5) |
(priv->inject_deviceptr1 & 0x1f);
/* MEM[1:0]EINJMSK1
* 15:0 - XORMASK2 for deviceptr2
*/
mask1 = priv->inject_eccmask2;
if (priv->inject_channel == 0) {
pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
} else {
pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
}
/* Error Injection Response Function
* Intel 5100 Memory Controller Hub Chipset (318378) datasheet
* hints about this register but carry no data about them. All
* data regarding device 19 is based on experimentation and the
* Intel 7300 Chipset Memory Controller Hub (318082) datasheet
* which appears to be accurate for the i5100 in this area.
*
* The injection code don't work without setting this register.
* The register needs to be flipped off then on else the hardware
* will only perform the first injection.
*
* Stop condition bits 7:4
* 1010 - Stop after one injection
* 1011 - Never stop injecting faults
*
* Start condition bits 3:0
* 1010 - Never start
* 1011 - Start immediately
*/
pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
}
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
static ssize_t inject_enable_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
i5100_do_inject(mci);
return count;
}
static const struct file_operations i5100_inject_enable_fops = {
.open = simple_open,
.write = inject_enable_write,
.llseek = generic_file_llseek,
};
static int i5100_setup_debugfs(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
if (!i5100_debugfs)
return -ENODEV;
priv->debugfs = edac_debugfs_create_dir_at(mci->bus->name, i5100_debugfs);
if (!priv->debugfs)
return -ENOMEM;
edac_debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_channel);
edac_debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_hlinesel);
edac_debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_deviceptr1);
edac_debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_deviceptr2);
edac_debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_eccmask1);
edac_debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
&priv->inject_eccmask2);
edac_debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
&mci->dev, &i5100_inject_enable_fops);
return 0;
}
static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i5100_priv *priv;
struct pci_dev *ch0mm, *ch1mm, *einj;
int ret = 0;
u32 dw;
int ranksperch;
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
rc = pci_enable_device(pdev);
if (rc < 0) {
ret = rc;
goto bail;
}
/* ECC enabled? */
pci_read_config_dword(pdev, I5100_MC, &dw);
if (!i5100_mc_errdeten(dw)) {
printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
ret = -ENODEV;
goto bail_pdev;
}
/* figure out how many ranks, from strapped state of 48GB_Mode input */
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
/* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5100_21, 0);
if (!ch0mm) {
ret = -ENODEV;
goto bail_pdev;
}
rc = pci_enable_device(ch0mm);
if (rc < 0) {
ret = rc;
goto bail_ch0;
}
/* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5100_22, 0);
if (!ch1mm) {
ret = -ENODEV;
goto bail_disable_ch0;
}
rc = pci_enable_device(ch1mm);
if (rc < 0) {
ret = rc;
goto bail_ch1;
}
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = 2;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = ranksperch;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(*priv));
if (!mci) {
ret = -ENOMEM;
goto bail_disable_ch1;
}
/* device 19, func 0, Error injection */
einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5100_19, 0);
if (!einj) {
ret = -ENODEV;
goto bail_mc_free;
}
rc = pci_enable_device(einj);
if (rc < 0) {
ret = rc;
goto bail_einj;
}
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
priv->einj = einj;
INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
/* If scrubbing was already enabled by the bios, start maintaining it */
pci_read_config_dword(pdev, I5100_MC, &dw);
if (i5100_mc_scrben(dw)) {
priv->scrub_enable = 1;
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
}
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
mci->mtype_cap = MEM_FLAG_FB_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "i5100_edac.c";
mci->ctl_name = "i5100";
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
priv->inject_channel = 0;
priv->inject_hlinesel = 0;
priv->inject_deviceptr1 = 0;
priv->inject_deviceptr2 = 0;
priv->inject_eccmask1 = 0;
priv->inject_eccmask2 = 0;
i5100_init_csrows(mci);
/* this strange construction seems to be in every driver, dunno why */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_NMI:
break;
default:
edac_op_state = EDAC_OPSTATE_POLL;
break;
}
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
goto bail_scrub;
}
i5100_setup_debugfs(mci);
return ret;
bail_scrub:
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
pci_disable_device(einj);
bail_einj:
pci_dev_put(einj);
bail_mc_free:
edac_mc_free(mci);
bail_disable_ch1:
pci_disable_device(ch1mm);
bail_ch1:
pci_dev_put(ch1mm);
bail_disable_ch0:
pci_disable_device(ch0mm);
bail_ch0:
pci_dev_put(ch0mm);
bail_pdev:
pci_disable_device(pdev);
bail:
return ret;
}
static void i5100_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i5100_priv *priv;
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
priv = mci->pvt_info;
edac_debugfs_remove_recursive(priv->debugfs);
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
pci_disable_device(priv->einj);
pci_dev_put(priv->ch0mm);
pci_dev_put(priv->ch1mm);
pci_dev_put(priv->einj);
edac_mc_free(mci);
}
static const struct pci_device_id i5100_pci_tbl[] = {
/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
static struct pci_driver i5100_driver = {
.name = KBUILD_BASENAME,
.probe = i5100_init_one,
.remove = i5100_remove_one,
.id_table = i5100_pci_tbl,
};
static int __init i5100_init(void)
{
int pci_rc;
i5100_debugfs = edac_debugfs_create_dir_at("i5100_edac", NULL);
pci_rc = pci_register_driver(&i5100_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
static void __exit i5100_exit(void)
{
edac_debugfs_remove(i5100_debugfs);
pci_unregister_driver(&i5100_driver);
}
module_init(i5100_init);
module_exit(i5100_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arthur Jones <[email protected]>");
MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
| linux-master | drivers/edac/i5100_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EDAC driver for Intel(R) Xeon(R) Skylake processors
* Copyright (c) 2016, Intel Corporation.
*/
#include <linux/kernel.h>
#include <linux/processor.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mce.h>
#include "edac_module.h"
#include "skx_common.h"
#define EDAC_MOD_STR "skx_edac"
/*
* Debug macros
*/
#define skx_printk(level, fmt, arg...) \
edac_printk(level, "skx", fmt, ##arg)
#define skx_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
static struct list_head *skx_edac_list;
static u64 skx_tolm, skx_tohm;
static int skx_num_sockets;
static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
list_for_each_entry(d, skx_edac_list, list) {
if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
return d;
}
return NULL;
}
enum munittype {
CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD,
ERRCHAN0, ERRCHAN1, ERRCHAN2,
};
struct munit {
u16 did;
u16 devfn[SKX_NUM_IMC];
u8 busidx;
u8 per_socket;
enum munittype mtype;
};
/*
* List of PCI device ids that we need together with some device
* number and function numbers to tell which memory controller the
* device belongs to.
*/
static const struct munit skx_all_munits[] = {
{ 0x2054, { }, 1, 1, SAD_ALL },
{ 0x2055, { }, 1, 1, UTIL_ALL },
{ 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
{ 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
{ 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
{ 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 },
{ 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 },
{ 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 },
{ 0x208e, { }, 1, 0, SAD },
{ }
};
static int get_all_munits(const struct munit *m)
{
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
int i = 0, ndev = 0;
prev = NULL;
for (;;) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
if (!pdev)
break;
ndev++;
if (m->per_socket == SKX_NUM_IMC) {
for (i = 0; i < SKX_NUM_IMC; i++)
if (m->devfn[i] == pdev->devfn)
break;
if (i == SKX_NUM_IMC)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
if (!d)
goto fail;
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n",
PCI_VENDOR_ID_INTEL, m->did);
goto fail;
}
switch (m->mtype) {
case CHAN0:
case CHAN1:
case CHAN2:
pci_dev_get(pdev);
d->imc[i].chan[m->mtype].cdev = pdev;
break;
case ERRCHAN0:
case ERRCHAN1:
case ERRCHAN2:
pci_dev_get(pdev);
d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev;
break;
case SAD_ALL:
pci_dev_get(pdev);
d->sad_all = pdev;
break;
case UTIL_ALL:
pci_dev_get(pdev);
d->util_all = pdev;
break;
case SAD:
/*
* one of these devices per core, including cores
* that don't exist on this SKU. Ignore any that
* read a route table of zero, make sure all the
* non-zero values match.
*/
pci_read_config_dword(pdev, 0xB4, ®);
if (reg != 0) {
if (d->mcroute == 0) {
d->mcroute = reg;
} else if (d->mcroute != reg) {
skx_printk(KERN_ERR, "mcroute mismatch\n");
goto fail;
}
}
ndev--;
break;
}
prev = pdev;
}
return ndev;
fail:
pci_dev_put(pdev);
return -ENODEV;
}
static struct res_config skx_cfg = {
.type = SKX,
.decs_did = 0x2016,
.busno_cfg_offset = 0xcc,
};
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
static bool skx_check_ecc(u32 mcmtr)
{
return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
{
struct skx_pvt *pvt = mci->pvt_info;
u32 mtr, mcmtr, amap, mcddrtcfg;
struct skx_imc *imc = pvt->imc;
struct dimm_info *dimm;
int i, j;
int ndimms;
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
for (i = 0; i < SKX_NUM_CHANNELS; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
for (j = 0; j < SKX_NUM_DIMMS; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j, cfg);
} else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
nvdimm_count++;
}
}
if (ndimms && !skx_check_ecc(mcmtr)) {
skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
return -ENODEV;
}
}
return 0;
}
#define SKX_MAX_SAD 24
#define SKX_GET_SAD(d, i, reg) \
pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &(reg))
#define SKX_GET_ILV(d, i, reg) \
pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &(reg))
#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
static void skx_show_retry_rd_err_log(struct decoded_addr *res,
char *msg, int len,
bool scrub_err)
{
u32 log0, log1, log2, log3, log4;
u32 corr0, corr1, corr2, corr3;
struct pci_dev *edev;
int n;
edev = res->dev->imc[res->imc].chan[res->channel].edev;
pci_read_config_dword(edev, 0x154, &log0);
pci_read_config_dword(edev, 0x148, &log1);
pci_read_config_dword(edev, 0x150, &log2);
pci_read_config_dword(edev, 0x15c, &log3);
pci_read_config_dword(edev, 0x114, &log4);
n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]",
log0, log1, log2, log3, log4);
pci_read_config_dword(edev, 0x104, &corr0);
pci_read_config_dword(edev, 0x108, &corr1);
pci_read_config_dword(edev, 0x10c, &corr2);
pci_read_config_dword(edev, 0x110, &corr3);
if (len - n > 0)
snprintf(msg + n, len - n,
" correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
corr0 & 0xffff, corr0 >> 16,
corr1 & 0xffff, corr1 >> 16,
corr2 & 0xffff, corr2 >> 16,
corr3 & 0xffff, corr3 >> 16);
}
static bool skx_sad_decode(struct decoded_addr *res)
{
struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list);
u64 addr = res->addr;
int i, idx, tgt, lchan, shift;
u32 sad, ilv;
u64 limit, prev_limit;
int remote = 0;
/* Simple sanity check for I/O space or out of range */
if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
edac_dbg(0, "Address 0x%llx out of range\n", addr);
return false;
}
restart:
prev_limit = 0;
for (i = 0; i < SKX_MAX_SAD; i++) {
SKX_GET_SAD(d, i, sad);
limit = SKX_SAD_LIMIT(sad);
if (SKX_SAD_ENABLE(sad)) {
if (addr >= prev_limit && addr <= limit)
goto sad_found;
}
prev_limit = limit + 1;
}
edac_dbg(0, "No SAD entry for 0x%llx\n", addr);
return false;
sad_found:
SKX_GET_ILV(d, i, ilv);
switch (SKX_SAD_INTERLEAVE(sad)) {
case 0:
idx = GET_BITFIELD(addr, 6, 8);
break;
case 1:
idx = GET_BITFIELD(addr, 8, 10);
break;
case 2:
idx = GET_BITFIELD(addr, 12, 14);
break;
case 3:
idx = GET_BITFIELD(addr, 30, 32);
break;
}
tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
/* If point to another node, find it and start over */
if (SKX_ILV_REMOTE(tgt)) {
if (remote) {
edac_dbg(0, "Double remote!\n");
return false;
}
remote = 1;
list_for_each_entry(d, skx_edac_list, list) {
if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
goto restart;
}
edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
return false;
}
if (SKX_SAD_MOD3(sad) == 0) {
lchan = SKX_ILV_TARGET(tgt);
} else {
switch (SKX_SAD_MOD3MODE(sad)) {
case 0:
shift = 6;
break;
case 1:
shift = 8;
break;
case 2:
shift = 12;
break;
default:
edac_dbg(0, "illegal mod3mode\n");
return false;
}
switch (SKX_SAD_MOD3ASMOD2(sad)) {
case 0:
lchan = (addr >> shift) % 3;
break;
case 1:
lchan = (addr >> shift) % 2;
break;
case 2:
lchan = (addr >> shift) % 2;
lchan = (lchan << 1) | !lchan;
break;
case 3:
lchan = ((addr >> shift) % 2) << 1;
break;
}
lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
}
res->dev = d;
res->socket = d->imc[0].src_id;
res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n",
res->addr, res->socket, res->imc, res->channel);
return true;
}
#define SKX_MAX_TAD 8
#define SKX_GET_TADBASE(d, mc, i, reg) \
pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &(reg))
#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &(reg))
#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &(reg))
#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
/* which bit used for both socket and channel interleave */
static int skx_granularity[] = { 6, 8, 12, 30 };
static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
{
addr >>= shift;
addr /= ways;
addr <<= shift;
return addr | (lowbits & ((1ull << shift) - 1));
}
static bool skx_tad_decode(struct decoded_addr *res)
{
int i;
u32 base, wayness, chnilvoffset;
int skt_interleave_bit, chn_interleave_bit;
u64 channel_addr;
for (i = 0; i < SKX_MAX_TAD; i++) {
SKX_GET_TADBASE(res->dev, res->imc, i, base);
SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
goto tad_found;
}
edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr);
return false;
tad_found:
res->sktways = SKX_TAD_SKTWAYS(wayness);
res->chanways = SKX_TAD_CHNWAYS(wayness);
skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
/* Must handle channel first, then socket */
channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
res->chanways, channel_addr);
channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
res->sktways, channel_addr);
} else {
/* Handle socket then channel. Preserve low bits from original address */
channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
res->sktways, res->addr);
channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
res->chanways, res->addr);
}
res->chan_addr = channel_addr;
edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n",
res->addr, res->chan_addr, res->sktways, res->chanways);
return true;
}
#define SKX_MAX_RIR 4
#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
0x108 + 4 * (i), &(reg))
#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
0x120 + 16 * (idx) + 4 * (i), &(reg))
#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
static bool skx_rir_decode(struct decoded_addr *res)
{
int i, idx, chan_rank;
int shift;
u32 rirway, rirlv;
u64 rank_addr, prev_limit = 0, limit;
if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
shift = 6;
else
shift = 13;
for (i = 0; i < SKX_MAX_RIR; i++) {
SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
limit = SKX_RIR_LIMIT(rirway);
if (SKX_RIR_VALID(rirway)) {
if (prev_limit <= res->chan_addr &&
res->chan_addr <= limit)
goto rir_found;
}
prev_limit = limit;
}
edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr);
return false;
rir_found:
rank_addr = res->chan_addr >> shift;
rank_addr /= SKX_RIR_WAYS(rirway);
rank_addr <<= shift;
rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
res->rank_address = rank_addr;
idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
chan_rank = SKX_RIR_CHAN_RANK(rirlv);
res->channel_rank = chan_rank;
res->dimm = chan_rank / 4;
res->rank = chan_rank % 4;
edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n",
res->addr, res->dimm, res->rank,
res->channel_rank, res->rank_address);
return true;
}
static u8 skx_close_row[] = {
15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};
static u8 skx_close_column[] = {
3, 4, 5, 14, 19, 23, 24, 25, 26, 27
};
static u8 skx_open_row[] = {
14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};
static u8 skx_open_column[] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 12
};
static u8 skx_open_fine_column[] = {
3, 4, 5, 7, 8, 9, 10, 11, 12, 13
};
static int skx_bits(u64 addr, int nbits, u8 *bits)
{
int i, res = 0;
for (i = 0; i < nbits; i++)
res |= ((addr >> bits[i]) & 1) << i;
return res;
}
static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
{
int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
if (do_xor)
ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
return ret;
}
static bool skx_mad_decode(struct decoded_addr *r)
{
struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
int bg0 = dimm->fine_grain_bank ? 6 : 13;
if (dimm->close_pg) {
r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
r->column |= 0x400; /* C10 is autoprecharge, always set */
r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
} else {
r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
if (dimm->fine_grain_bank)
r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
else
r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
}
r->row &= (1u << dimm->rowbits) - 1;
edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n",
r->addr, r->row, r->column, r->bank_address,
r->bank_group);
return true;
}
static bool skx_decode(struct decoded_addr *res)
{
return skx_sad_decode(res) && skx_tad_decode(res) &&
skx_rir_decode(res) && skx_mad_decode(res);
}
static struct notifier_block skx_mce_dec = {
.notifier_call = skx_mce_check_error,
.priority = MCE_PRIO_EDAC,
};
#ifdef CONFIG_EDAC_DEBUG
/*
* Debug feature.
* Exercise the address decode logic by writing an address to
* /sys/kernel/debug/edac/skx_test/addr.
*/
static struct dentry *skx_test;
static int debugfs_u64_set(void *data, u64 val)
{
struct mce m;
pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
memset(&m, 0, sizeof(m));
/* ADDRV + MemRd + Unknown channel */
m.status = MCI_STATUS_ADDRV + 0x90;
/* One corrected error */
m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
m.addr = val;
skx_mce_check_error(NULL, 0, &m);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
static void setup_skx_debug(void)
{
skx_test = edac_debugfs_create_dir("skx_test");
if (!skx_test)
return;
if (!edac_debugfs_create_file("addr", 0200, skx_test,
NULL, &fops_u64_wo)) {
debugfs_remove(skx_test);
skx_test = NULL;
}
}
static void teardown_skx_debug(void)
{
debugfs_remove_recursive(skx_test);
}
#else
static inline void setup_skx_debug(void) {}
static inline void teardown_skx_debug(void) {}
#endif /*CONFIG_EDAC_DEBUG*/
/*
* skx_init:
* make sure we are running on the correct cpu model
* search for all the devices we need
* check which DIMMs are present.
*/
static int __init skx_init(void)
{
const struct x86_cpu_id *id;
struct res_config *cfg;
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
u8 mc = 0, src_id, node_id;
struct skx_dev *d;
edac_dbg(2, "\n");
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
return rc;
rc = skx_get_all_bus_mappings(cfg, &skx_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
edac_dbg(2, "No memory controllers found\n");
return -ENODEV;
}
skx_num_sockets = rc;
for (m = skx_all_munits; m->did; m++) {
rc = get_all_munits(m);
if (rc < 0)
goto fail;
if (rc != m->per_socket * skx_num_sockets) {
edac_dbg(2, "Expected %d, got %d of 0x%x\n",
m->per_socket * skx_num_sockets, rc, m->did);
rc = -ENODEV;
goto fail;
}
}
list_for_each_entry(d, skx_edac_list, list) {
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
rc = skx_get_node_id(d, &node_id);
if (rc < 0)
goto fail;
edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
for (i = 0; i < SKX_NUM_IMC; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
d->imc[i].node_id = node_id;
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
if (rc < 0)
goto fail;
}
}
skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
if (nvdimm_count && skx_adxl_get() != -ENODEV) {
skx_set_decode(NULL, skx_show_retry_rd_err_log);
} else {
if (nvdimm_count)
skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
}
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
setup_skx_debug();
mce_register_decode_chain(&skx_mce_dec);
return 0;
fail:
skx_remove();
return rc;
}
static void __exit skx_exit(void)
{
edac_dbg(2, "\n");
mce_unregister_decode_chain(&skx_mce_dec);
teardown_skx_debug();
if (nvdimm_count)
skx_adxl_put();
skx_remove();
}
module_init(skx_init);
module_exit(skx_exit);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tony Luck");
MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
| linux-master | drivers/edac/skx_base.c |
/*
* (C) 2005, 2006 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written Doug Thompson <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/edac.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include "edac_pci.h"
#include "edac_module.h"
#define EDAC_PCI_SYMLINK "device"
/* data variables exported via sysfs */
static int check_pci_errors; /* default NO check PCI parity */
static int edac_pci_panic_on_pe; /* default NO panic on PCI Parity */
static int edac_pci_log_pe = 1; /* log PCI parity errors */
static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
static int edac_pci_poll_msec = 1000; /* one second workq period */
static atomic_t pci_parity_count = ATOMIC_INIT(0);
static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
static struct kobject *edac_pci_top_main_kobj;
static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
/* getter functions for the data variables */
int edac_pci_get_check_errors(void)
{
return check_pci_errors;
}
static int edac_pci_get_log_pe(void)
{
return edac_pci_log_pe;
}
static int edac_pci_get_log_npe(void)
{
return edac_pci_log_npe;
}
static int edac_pci_get_panic_on_pe(void)
{
return edac_pci_panic_on_pe;
}
int edac_pci_get_poll_msec(void)
{
return edac_pci_poll_msec;
}
/**************************** EDAC PCI sysfs instance *******************/
static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
{
return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
}
static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
char *data)
{
return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
}
#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
/* DEVICE instance kobject release() function */
static void edac_pci_instance_release(struct kobject *kobj)
{
struct edac_pci_ctl_info *pci;
edac_dbg(0, "\n");
/* Form pointer to containing struct, the pci control struct */
pci = to_instance(kobj);
/* decrement reference count on top main kobj */
kobject_put(edac_pci_top_main_kobj);
kfree(pci); /* Free the control struct */
}
/* instance specific attribute structure */
struct instance_attribute {
struct attribute attr;
ssize_t(*show) (struct edac_pci_ctl_info *, char *);
ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
};
/* Function to 'show' fields from the edac_pci 'instance' structure */
static ssize_t edac_pci_instance_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_pci_ctl_info *pci = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->show)
return instance_attr->show(pci, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_pci 'instance' structure */
static ssize_t edac_pci_instance_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_pci_ctl_info *pci = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->store)
return instance_attr->store(pci, buffer, count);
return -EIO;
}
/* fs_ops table */
static const struct sysfs_ops pci_instance_ops = {
.show = edac_pci_instance_show,
.store = edac_pci_instance_store
};
#define INSTANCE_ATTR(_name, _mode, _show, _store) \
static struct instance_attribute attr_instance_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
/* pci instance attributes */
static struct attribute *pci_instance_attrs[] = {
&attr_instance_pe_count.attr,
&attr_instance_npe_count.attr,
NULL
};
ATTRIBUTE_GROUPS(pci_instance);
/* the ktype for a pci instance */
static struct kobj_type ktype_pci_instance = {
.release = edac_pci_instance_release,
.sysfs_ops = &pci_instance_ops,
.default_groups = pci_instance_groups,
};
/*
* edac_pci_create_instance_kobj
*
* construct one EDAC PCI instance's kobject for use
*/
static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
{
struct kobject *main_kobj;
int err;
edac_dbg(0, "\n");
/* First bump the ref count on the top main kobj, which will
* track the number of PCI instances we have, and thus nest
* properly on keeping the module loaded
*/
main_kobj = kobject_get(edac_pci_top_main_kobj);
if (!main_kobj) {
err = -ENODEV;
goto error_out;
}
/* And now register this new kobject under the main kobj */
err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
edac_pci_top_main_kobj, "pci%d", idx);
if (err != 0) {
edac_dbg(2, "failed to register instance pci%d\n", idx);
kobject_put(edac_pci_top_main_kobj);
goto error_out;
}
kobject_uevent(&pci->kobj, KOBJ_ADD);
edac_dbg(1, "Register instance 'pci%d' kobject\n", idx);
return 0;
/* Error unwind statck */
error_out:
return err;
}
/*
* edac_pci_unregister_sysfs_instance_kobj
*
* unregister the kobj for the EDAC PCI instance
*/
static void edac_pci_unregister_sysfs_instance_kobj(
struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "\n");
/* Unregister the instance kobject and allow its release
* function release the main reference count and then
* kfree the memory
*/
kobject_put(&pci->kobj);
}
/***************************** EDAC PCI sysfs root **********************/
#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
/* simple show/store functions for attributes */
static ssize_t edac_pci_int_show(void *ptr, char *buffer)
{
int *value = ptr;
return sprintf(buffer, "%d\n", *value);
}
static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
{
int *value = ptr;
if (isdigit(*buffer))
*value = simple_strtoul(buffer, NULL, 0);
return count;
}
struct edac_pci_dev_attribute {
struct attribute attr;
void *value;
ssize_t(*show) (void *, char *);
ssize_t(*store) (void *, const char *, size_t);
};
/* Set of show/store abstract level functions for PCI Parity object */
static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
if (edac_pci_dev->show)
return edac_pci_dev->show(edac_pci_dev->value, buffer);
return -EIO;
}
static ssize_t edac_pci_dev_store(struct kobject *kobj,
struct attribute *attr, const char *buffer,
size_t count)
{
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
if (edac_pci_dev->store)
return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
return -EIO;
}
static const struct sysfs_ops edac_pci_sysfs_ops = {
.show = edac_pci_dev_show,
.store = edac_pci_dev_store
};
#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.value = &_name, \
.show = _show, \
.store = _store, \
};
#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.value = _data, \
.show = _show, \
.store = _store, \
};
/* PCI Parity control files */
EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
edac_pci_int_store);
EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
/* Base Attributes of the memory ECC object */
static struct attribute *edac_pci_attrs[] = {
&edac_pci_attr_check_pci_errors.attr,
&edac_pci_attr_edac_pci_log_pe.attr,
&edac_pci_attr_edac_pci_log_npe.attr,
&edac_pci_attr_edac_pci_panic_on_pe.attr,
&edac_pci_attr_pci_parity_count.attr,
&edac_pci_attr_pci_nonparity_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(edac_pci);
/*
* edac_pci_release_main_kobj
*
* This release function is called when the reference count to the
* passed kobj goes to zero.
*
* This kobj is the 'main' kobject that EDAC PCI instances
* link to, and thus provide for proper nesting counts
*/
static void edac_pci_release_main_kobj(struct kobject *kobj)
{
edac_dbg(0, "here to module_put(THIS_MODULE)\n");
kfree(kobj);
/* last reference to top EDAC PCI kobject has been removed,
* NOW release our ref count on the core module
*/
module_put(THIS_MODULE);
}
/* ktype struct for the EDAC PCI main kobj */
static struct kobj_type ktype_edac_pci_main_kobj = {
.release = edac_pci_release_main_kobj,
.sysfs_ops = &edac_pci_sysfs_ops,
.default_groups = edac_pci_groups,
};
/**
* edac_pci_main_kobj_setup: Setup the sysfs for EDAC PCI attributes.
*/
static int edac_pci_main_kobj_setup(void)
{
int err = -ENODEV;
struct bus_type *edac_subsys;
struct device *dev_root;
edac_dbg(0, "\n");
/* check and count if we have already created the main kobject */
if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
return 0;
/* First time, so create the main kobject and its
* controls and attributes
*/
edac_subsys = edac_get_sysfs_subsys();
/* Bump the reference count on this module to ensure the
* modules isn't unloaded until we deconstruct the top
* level main kobj for EDAC PCI
*/
if (!try_module_get(THIS_MODULE)) {
edac_dbg(1, "try_module_get() failed\n");
goto decrement_count_fail;
}
edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!edac_pci_top_main_kobj) {
edac_dbg(1, "Failed to allocate\n");
err = -ENOMEM;
goto kzalloc_fail;
}
/* Instanstiate the pci object */
dev_root = bus_get_dev_root(edac_subsys);
if (dev_root) {
err = kobject_init_and_add(edac_pci_top_main_kobj,
&ktype_edac_pci_main_kobj,
&dev_root->kobj, "pci");
put_device(dev_root);
}
if (err) {
edac_dbg(1, "Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
}
/* At this point, to 'release' the top level kobject
* for EDAC PCI, then edac_pci_main_kobj_teardown()
* must be used, for resources to be cleaned up properly
*/
kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
edac_dbg(1, "Registered '.../edac/pci' kobject\n");
return 0;
/* Error unwind statck */
kobject_init_and_add_fail:
kobject_put(edac_pci_top_main_kobj);
kzalloc_fail:
module_put(THIS_MODULE);
decrement_count_fail:
/* if are on this error exit, nothing to tear down */
atomic_dec(&edac_pci_sysfs_refcount);
return err;
}
/*
* edac_pci_main_kobj_teardown()
*
* if no longer linked (needed) remove the top level EDAC PCI
* kobject with its controls and attributes
*/
static void edac_pci_main_kobj_teardown(void)
{
edac_dbg(0, "\n");
/* Decrement the count and only if no more controller instances
* are connected perform the unregisteration of the top level
* main kobj
*/
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
edac_dbg(0, "called kobject_put on main kobj\n");
kobject_put(edac_pci_top_main_kobj);
}
}
int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
{
int err;
struct kobject *edac_kobj = &pci->kobj;
edac_dbg(0, "idx=%d\n", pci->pci_idx);
/* create the top main EDAC PCI kobject, IF needed */
err = edac_pci_main_kobj_setup();
if (err)
return err;
/* Create this instance's kobject under the MAIN kobject */
err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
if (err)
goto unregister_cleanup;
err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
if (err) {
edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto symlink_fail;
}
return 0;
/* Error unwind stack */
symlink_fail:
edac_pci_unregister_sysfs_instance_kobj(pci);
unregister_cleanup:
edac_pci_main_kobj_teardown();
return err;
}
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "index=%d\n", pci->pci_idx);
/* Remove the symlink */
sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
/* remove this PCI instance's sysfs entries */
edac_pci_unregister_sysfs_instance_kobj(pci);
/* Call the main unregister function, which will determine
* if this 'pci' is the last instance.
* If it is, the main kobject will be unregistered as a result
*/
edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n");
edac_pci_main_kobj_teardown();
}
/************************ PCI error handling *************************/
static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
{
int where;
u16 status;
where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
pci_read_config_word(dev, where, &status);
/* If we get back 0xFFFF then we must suspect that the card has been
* pulled but the Linux PCI layer has not yet finished cleaning up.
* We don't want to report on such devices
*/
if (status == 0xFFFF) {
u32 sanity;
pci_read_config_dword(dev, 0, &sanity);
if (sanity == 0xFFFFFFFF)
return 0;
}
status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
PCI_STATUS_PARITY;
if (status)
/* reset only the bits we are interested in */
pci_write_config_word(dev, where, status);
return status;
}
/* Clear any PCI parity errors logged by this device. */
static void edac_pci_dev_parity_clear(struct pci_dev *dev)
{
u8 header_type;
get_pci_parity_status(dev, 0);
/* read the device TYPE, looking for bridges */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
get_pci_parity_status(dev, 1);
}
/*
* PCI Parity polling
*
* Function to retrieve the current parity status
* and decode it
*
*/
static void edac_pci_dev_parity_test(struct pci_dev *dev)
{
unsigned long flags;
u16 status;
u8 header_type;
/* stop any interrupts until we can acquire the status */
local_irq_save(flags);
/* read the STATUS register on this device */
status = get_pci_parity_status(dev, 0);
/* read the device TYPE, looking for bridges */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
local_irq_restore(flags);
edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
/* check the status reg for errors on boards NOT marked as broken
* if broken, we cannot trust any of the status bits
*/
if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI,
"Signaled System Error on %s\n",
pci_name(dev));
atomic_inc(&pci_nonparity_count);
}
if (status & (PCI_STATUS_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI,
"Master Data Parity Error on %s\n",
pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI,
"Detected Parity Error on %s\n",
pci_name(dev));
atomic_inc(&pci_parity_count);
}
}
edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
header_type, dev_name(&dev->dev));
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
status, dev_name(&dev->dev));
/* check the secondary status reg for errors,
* on NOT broken boards
*/
if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Signaled System Error on %s\n",
pci_name(dev));
atomic_inc(&pci_nonparity_count);
}
if (status & (PCI_STATUS_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Master Data Parity Error on "
"%s\n", pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Detected Parity Error on %s\n",
pci_name(dev));
atomic_inc(&pci_parity_count);
}
}
}
}
/* reduce some complexity in definition of the iterator */
typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
/*
* pci_dev parity list iterator
*
* Scan the PCI device list looking for SERRORs, Master Parity ERRORS or
* Parity ERRORs on primary or secondary devices.
*/
static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev)
fn(dev);
}
/*
* edac_pci_do_parity_check
*
* performs the actual PCI parity check operation
*/
void edac_pci_do_parity_check(void)
{
int before_count;
edac_dbg(3, "\n");
/* if policy has PCI check off, leave now */
if (!check_pci_errors)
return;
before_count = atomic_read(&pci_parity_count);
/* scan all PCI devices looking for a Parity Error on devices and
* bridges.
* The iterator calls pci_get_device() which might sleep, thus
* we cannot disable interrupts in this scan.
*/
edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
/* Only if operator has selected panic on PCI Error */
if (edac_pci_get_panic_on_pe()) {
/* If the count is different 'after' from 'before' */
if (before_count != atomic_read(&pci_parity_count))
panic("EDAC: PCI Parity Error");
}
}
/*
* edac_pci_clear_parity_errors
*
* function to perform an iteration over the PCI devices
* and clearn their current status
*/
void edac_pci_clear_parity_errors(void)
{
/* Clear any PCI bus parity errors that devices initially have logged
* in their registers.
*/
edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
}
/*
* edac_pci_handle_pe
*
* Called to handle a PARITY ERROR event
*/
void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
{
/* global PE counter incremented by edac_pci_do_parity_check() */
atomic_inc(&pci->counters.pe_count);
if (edac_pci_get_log_pe())
edac_pci_printk(pci, KERN_WARNING,
"Parity Error ctl: %s %d: %s\n",
pci->ctl_name, pci->pci_idx, msg);
/*
* poke all PCI devices and see which one is the troublemaker
* panic() is called if set
*/
edac_pci_do_parity_check();
}
EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
/*
* edac_pci_handle_npe
*
* Called to handle a NON-PARITY ERROR event
*/
void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
{
/* global NPE counter incremented by edac_pci_do_parity_check() */
atomic_inc(&pci->counters.npe_count);
if (edac_pci_get_log_npe())
edac_pci_printk(pci, KERN_WARNING,
"Non-Parity Error ctl: %s %d: %s\n",
pci->ctl_name, pci->pci_idx, msg);
/*
* poke all PCI devices and see which one is the troublemaker
* panic() is called if set
*/
edac_pci_do_parity_check();
}
EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
/*
* Define the PCI parameter to the module
*/
module_param(check_pci_errors, int, 0644);
MODULE_PARM_DESC(check_pci_errors,
"Check for PCI bus parity errors: 0=off 1=on");
module_param(edac_pci_panic_on_pe, int, 0644);
MODULE_PARM_DESC(edac_pci_panic_on_pe,
"Panic on PCI Bus Parity error: 0=off 1=on");
| linux-master | drivers/edac/edac_pci_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel client SoC with integrated memory controller using IBECC
*
* Copyright (C) 2020 Intel Corporation
*
* The In-Band ECC (IBECC) IP provides ECC protection to all or specific
* regions of the physical memory space. It's used for memory controllers
* that don't support the out-of-band ECC which often needs an additional
* storage device to each channel for storing ECC data.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <linux/llist.h>
#include <linux/genalloc.h>
#include <linux/edac.h>
#include <linux/bits.h>
#include <linux/io.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
#include <asm/mce.h>
#include "edac_mc.h"
#include "edac_module.h"
#define IGEN6_REVISION "v2.5.1"
#define EDAC_MOD_STR "igen6_edac"
#define IGEN6_NMI_NAME "igen6_ibecc"
/* Debug macros */
#define igen6_printk(level, fmt, arg...) \
edac_printk(level, "igen6", fmt, ##arg)
#define igen6_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "igen6", fmt, ##arg)
#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
#define NUM_IMC 2 /* Max memory controllers */
#define NUM_CHANNELS 2 /* Max channels */
#define NUM_DIMMS 2 /* Max DIMMs per channel */
#define _4GB BIT_ULL(32)
/* Size of physical memory */
#define TOM_OFFSET 0xa0
/* Top of low usable DRAM */
#define TOLUD_OFFSET 0xbc
/* Capability register C */
#define CAPID_C_OFFSET 0xec
#define CAPID_C_IBECC BIT(15)
/* Capability register E */
#define CAPID_E_OFFSET 0xf0
#define CAPID_E_IBECC BIT(12)
/* Error Status */
#define ERRSTS_OFFSET 0xc8
#define ERRSTS_CE BIT_ULL(6)
#define ERRSTS_UE BIT_ULL(7)
/* Error Command */
#define ERRCMD_OFFSET 0xca
#define ERRCMD_CE BIT_ULL(6)
#define ERRCMD_UE BIT_ULL(7)
/* IBECC MMIO base address */
#define IBECC_BASE (res_cfg->ibecc_base)
#define IBECC_ACTIVATE_OFFSET IBECC_BASE
#define IBECC_ACTIVATE_EN BIT(0)
/* IBECC error log */
#define ECC_ERROR_LOG_OFFSET (IBECC_BASE + res_cfg->ibecc_error_log_offset)
#define ECC_ERROR_LOG_CE BIT_ULL(62)
#define ECC_ERROR_LOG_UE BIT_ULL(63)
#define ECC_ERROR_LOG_ADDR_SHIFT 5
#define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38)
#define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61)
/* Host MMIO base address */
#define MCHBAR_OFFSET 0x48
#define MCHBAR_EN BIT_ULL(0)
#define MCHBAR_BASE(v) (GET_BITFIELD(v, 16, 38) << 16)
#define MCHBAR_SIZE 0x10000
/* Parameters for the channel decode stage */
#define IMC_BASE (res_cfg->imc_base)
#define MAD_INTER_CHANNEL_OFFSET IMC_BASE
#define MAD_INTER_CHANNEL_DDR_TYPE(v) GET_BITFIELD(v, 0, 2)
#define MAD_INTER_CHANNEL_ECHM(v) GET_BITFIELD(v, 3, 3)
#define MAD_INTER_CHANNEL_CH_L_MAP(v) GET_BITFIELD(v, 4, 4)
#define MAD_INTER_CHANNEL_CH_S_SIZE(v) ((u64)GET_BITFIELD(v, 12, 19) << 29)
/* Parameters for DRAM decode stage */
#define MAD_INTRA_CH0_OFFSET (IMC_BASE + 4)
#define MAD_INTRA_CH_DIMM_L_MAP(v) GET_BITFIELD(v, 0, 0)
/* DIMM characteristics */
#define MAD_DIMM_CH0_OFFSET (IMC_BASE + 0xc)
#define MAD_DIMM_CH_DIMM_L_SIZE(v) ((u64)GET_BITFIELD(v, 0, 6) << 29)
#define MAD_DIMM_CH_DLW(v) GET_BITFIELD(v, 7, 8)
#define MAD_DIMM_CH_DIMM_S_SIZE(v) ((u64)GET_BITFIELD(v, 16, 22) << 29)
#define MAD_DIMM_CH_DSW(v) GET_BITFIELD(v, 24, 25)
/* Hash for memory controller selection */
#define MAD_MC_HASH_OFFSET (IMC_BASE + 0x1b8)
#define MAC_MC_HASH_LSB(v) GET_BITFIELD(v, 1, 3)
/* Hash for channel selection */
#define CHANNEL_HASH_OFFSET (IMC_BASE + 0x24)
/* Hash for enhanced channel selection */
#define CHANNEL_EHASH_OFFSET (IMC_BASE + 0x28)
#define CHANNEL_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define CHANNEL_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
#define CHANNEL_HASH_MODE(v) GET_BITFIELD(v, 28, 28)
/* Parameters for memory slice decode stage */
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
static struct res_config {
bool machine_check;
int num_imc;
u32 imc_base;
u32 cmf_base;
u32 cmf_size;
u32 ms_hash_offset;
u32 ibecc_base;
u32 ibecc_error_log_offset;
bool (*ibecc_available)(struct pci_dev *pdev);
/* Convert error address logged in IBECC to system physical address */
u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
/* Convert error address logged in IBECC to integrated memory controller address */
u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc);
} *res_cfg;
struct igen6_imc {
int mc;
struct mem_ctl_info *mci;
struct pci_dev *pdev;
struct device dev;
void __iomem *window;
u64 size;
u64 ch_s_size;
int ch_l_map;
u64 dimm_s_size[NUM_CHANNELS];
u64 dimm_l_size[NUM_CHANNELS];
int dimm_l_map[NUM_CHANNELS];
};
static struct igen6_pvt {
struct igen6_imc imc[NUM_IMC];
u64 ms_hash;
u64 ms_s_size;
int ms_l_map;
} *igen6_pvt;
/* The top of low usable DRAM */
static u32 igen6_tolud;
/* The size of physical memory */
static u64 igen6_tom;
struct decoded_addr {
int mc;
u64 imc_addr;
u64 sys_addr;
int channel_idx;
u64 channel_addr;
int sub_channel_idx;
u64 sub_channel_addr;
};
struct ecclog_node {
struct llist_node llnode;
int mc;
u64 ecclog;
};
/*
* In the NMI handler, the driver uses the lock-less memory allocator
* to allocate memory to store the IBECC error logs and links the logs
* to the lock-less list. Delay printk() and the work of error reporting
* to EDAC core in a worker.
*/
#define ECCLOG_POOL_SIZE PAGE_SIZE
static LLIST_HEAD(ecclog_llist);
static struct gen_pool *ecclog_pool;
static char ecclog_buf[ECCLOG_POOL_SIZE];
static struct irq_work ecclog_irq_work;
static struct work_struct ecclog_work;
/* Compute die IDs for Elkhart Lake with IBECC */
#define DID_EHL_SKU5 0x4514
#define DID_EHL_SKU6 0x4528
#define DID_EHL_SKU7 0x452a
#define DID_EHL_SKU8 0x4516
#define DID_EHL_SKU9 0x452c
#define DID_EHL_SKU10 0x452e
#define DID_EHL_SKU11 0x4532
#define DID_EHL_SKU12 0x4518
#define DID_EHL_SKU13 0x451a
#define DID_EHL_SKU14 0x4534
#define DID_EHL_SKU15 0x4536
/* Compute die IDs for ICL-NNPI with IBECC */
#define DID_ICL_SKU8 0x4581
#define DID_ICL_SKU10 0x4585
#define DID_ICL_SKU11 0x4589
#define DID_ICL_SKU12 0x458d
/* Compute die IDs for Tiger Lake with IBECC */
#define DID_TGL_SKU 0x9a14
/* Compute die IDs for Alder Lake with IBECC */
#define DID_ADL_SKU1 0x4601
#define DID_ADL_SKU2 0x4602
#define DID_ADL_SKU3 0x4621
#define DID_ADL_SKU4 0x4641
static bool ehl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
return false;
return !!(CAPID_C_IBECC & v);
}
static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc)
{
return eaddr;
}
static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
{
if (eaddr < igen6_tolud)
return eaddr;
if (igen6_tom <= _4GB)
return eaddr + igen6_tolud - _4GB;
if (eaddr < _4GB)
return eaddr + igen6_tolud - igen6_tom;
return eaddr;
}
static bool icl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
return false;
return !(CAPID_C_IBECC & v) &&
(boot_cpu_data.x86_stepping >= 1);
}
static bool tgl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
return false;
return !(CAPID_E_IBECC & v);
}
static u64 mem_addr_to_sys_addr(u64 maddr)
{
if (maddr < igen6_tolud)
return maddr;
if (igen6_tom <= _4GB)
return maddr - igen6_tolud + _4GB;
if (maddr < _4GB)
return maddr - igen6_tolud + igen6_tom;
return maddr;
}
static u64 mem_slice_hash(u64 addr, u64 mask, u64 hash_init, int intlv_bit)
{
u64 hash_addr = addr & mask, hash = hash_init;
u64 intlv = (addr >> intlv_bit) & 1;
int i;
for (i = 6; i < 20; i++)
hash ^= (hash_addr >> i) & 1;
return hash ^ intlv;
}
static u64 tgl_err_addr_to_mem_addr(u64 eaddr, int mc)
{
u64 maddr, hash, mask, ms_s_size;
int intlv_bit;
u32 ms_hash;
ms_s_size = igen6_pvt->ms_s_size;
if (eaddr >= ms_s_size)
return eaddr + ms_s_size;
ms_hash = igen6_pvt->ms_hash;
mask = MEM_SLICE_HASH_MASK(ms_hash);
intlv_bit = MEM_SLICE_HASH_LSB_MASK_BIT(ms_hash) + 6;
maddr = GET_BITFIELD(eaddr, intlv_bit, 63) << (intlv_bit + 1) |
GET_BITFIELD(eaddr, 0, intlv_bit - 1);
hash = mem_slice_hash(maddr, mask, mc, intlv_bit);
return maddr | (hash << intlv_bit);
}
static u64 tgl_err_addr_to_sys_addr(u64 eaddr, int mc)
{
u64 maddr = tgl_err_addr_to_mem_addr(eaddr, mc);
return mem_addr_to_sys_addr(maddr);
}
static u64 tgl_err_addr_to_imc_addr(u64 eaddr, int mc)
{
return eaddr;
}
static u64 adl_err_addr_to_sys_addr(u64 eaddr, int mc)
{
return mem_addr_to_sys_addr(eaddr);
}
static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
{
u64 imc_addr, ms_s_size = igen6_pvt->ms_s_size;
struct igen6_imc *imc = &igen6_pvt->imc[mc];
int intlv_bit;
u32 mc_hash;
if (eaddr >= 2 * ms_s_size)
return eaddr - ms_s_size;
mc_hash = readl(imc->window + MAD_MC_HASH_OFFSET);
intlv_bit = MAC_MC_HASH_LSB(mc_hash) + 6;
imc_addr = GET_BITFIELD(eaddr, intlv_bit + 1, 63) << intlv_bit |
GET_BITFIELD(eaddr, 0, intlv_bit - 1);
return imc_addr;
}
static struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
.ibecc_available = ehl_ibecc_available,
.ibecc_error_log_offset = 0x170,
.err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
static struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
.ibecc_error_log_offset = 0x170,
.ibecc_available = icl_ibecc_available,
.err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
static struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
.cmf_base = 0x11000,
.cmf_size = 0x800,
.ms_hash_offset = 0xac,
.ibecc_base = 0xd400,
.ibecc_error_log_offset = 0x170,
.ibecc_available = tgl_ibecc_available,
.err_addr_to_sys_addr = tgl_err_addr_to_sys_addr,
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
static struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
.ibecc_base = 0xd400,
.ibecc_error_log_offset = 0x68,
.ibecc_available = tgl_ibecc_available,
.err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU8), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU9), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU10), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU11), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU12), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg },
{ PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg },
{ PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg },
{ PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg },
{ PCI_VDEVICE(INTEL, DID_TGL_SKU), (kernel_ulong_t)&tgl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU1), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
static enum dev_type get_width(int dimm_l, u32 mad_dimm)
{
u32 w = dimm_l ? MAD_DIMM_CH_DLW(mad_dimm) :
MAD_DIMM_CH_DSW(mad_dimm);
switch (w) {
case 0:
return DEV_X8;
case 1:
return DEV_X16;
case 2:
return DEV_X32;
default:
return DEV_UNKNOWN;
}
}
static enum mem_type get_memory_type(u32 mad_inter)
{
u32 t = MAD_INTER_CHANNEL_DDR_TYPE(mad_inter);
switch (t) {
case 0:
return MEM_DDR4;
case 1:
return MEM_DDR3;
case 2:
return MEM_LPDDR3;
case 3:
return MEM_LPDDR4;
case 4:
return MEM_WIO2;
default:
return MEM_UNKNOWN;
}
}
static int decode_chan_idx(u64 addr, u64 mask, int intlv_bit)
{
u64 hash_addr = addr & mask, hash = 0;
u64 intlv = (addr >> intlv_bit) & 1;
int i;
for (i = 6; i < 20; i++)
hash ^= (hash_addr >> i) & 1;
return (int)hash ^ intlv;
}
static u64 decode_channel_addr(u64 addr, int intlv_bit)
{
u64 channel_addr;
/* Remove the interleave bit and shift upper part down to fill gap */
channel_addr = GET_BITFIELD(addr, intlv_bit + 1, 63) << intlv_bit;
channel_addr |= GET_BITFIELD(addr, 0, intlv_bit - 1);
return channel_addr;
}
static void decode_addr(u64 addr, u32 hash, u64 s_size, int l_map,
int *idx, u64 *sub_addr)
{
int intlv_bit = CHANNEL_HASH_LSB_MASK_BIT(hash) + 6;
if (addr > 2 * s_size) {
*sub_addr = addr - s_size;
*idx = l_map;
return;
}
if (CHANNEL_HASH_MODE(hash)) {
*sub_addr = decode_channel_addr(addr, intlv_bit);
*idx = decode_chan_idx(addr, CHANNEL_HASH_MASK(hash), intlv_bit);
} else {
*sub_addr = decode_channel_addr(addr, 6);
*idx = GET_BITFIELD(addr, 6, 6);
}
}
static int igen6_decode(struct decoded_addr *res)
{
struct igen6_imc *imc = &igen6_pvt->imc[res->mc];
u64 addr = res->imc_addr, sub_addr, s_size;
int idx, l_map;
u32 hash;
if (addr >= igen6_tom) {
edac_dbg(0, "Address 0x%llx out of range\n", addr);
return -EINVAL;
}
/* Decode channel */
hash = readl(imc->window + CHANNEL_HASH_OFFSET);
s_size = imc->ch_s_size;
l_map = imc->ch_l_map;
decode_addr(addr, hash, s_size, l_map, &idx, &sub_addr);
res->channel_idx = idx;
res->channel_addr = sub_addr;
/* Decode sub-channel/DIMM */
hash = readl(imc->window + CHANNEL_EHASH_OFFSET);
s_size = imc->dimm_s_size[idx];
l_map = imc->dimm_l_map[idx];
decode_addr(res->channel_addr, hash, s_size, l_map, &idx, &sub_addr);
res->sub_channel_idx = idx;
res->sub_channel_addr = sub_addr;
return 0;
}
static void igen6_output_error(struct decoded_addr *res,
struct mem_ctl_info *mci, u64 ecclog)
{
enum hw_event_mc_err_type type = ecclog & ECC_ERROR_LOG_UE ?
HW_EVENT_ERR_UNCORRECTED :
HW_EVENT_ERR_CORRECTED;
edac_mc_handle_error(type, mci, 1,
res->sys_addr >> PAGE_SHIFT,
res->sys_addr & ~PAGE_MASK,
ECC_ERROR_LOG_SYND(ecclog),
res->channel_idx, res->sub_channel_idx,
-1, "", "");
}
static struct gen_pool *ecclog_gen_pool_create(void)
{
struct gen_pool *pool;
pool = gen_pool_create(ilog2(sizeof(struct ecclog_node)), -1);
if (!pool)
return NULL;
if (gen_pool_add(pool, (unsigned long)ecclog_buf, ECCLOG_POOL_SIZE, -1)) {
gen_pool_destroy(pool);
return NULL;
}
return pool;
}
static int ecclog_gen_pool_add(int mc, u64 ecclog)
{
struct ecclog_node *node;
node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node));
if (!node)
return -ENOMEM;
node->mc = mc;
node->ecclog = ecclog;
llist_add(&node->llnode, &ecclog_llist);
return 0;
}
/*
* Either the memory-mapped I/O status register ECC_ERROR_LOG or the PCI
* configuration space status register ERRSTS can indicate whether a
* correctable error or an uncorrectable error occurred. We only use the
* ECC_ERROR_LOG register to check error type, but need to clear both
* registers to enable future error events.
*/
static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
/* Clear CE/UE bits by writing 1s */
writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
return ecclog;
}
return 0;
}
static void errsts_clear(struct igen6_imc *imc)
{
u16 errsts;
if (pci_read_config_word(imc->pdev, ERRSTS_OFFSET, &errsts)) {
igen6_printk(KERN_ERR, "Failed to read ERRSTS\n");
return;
}
/* Clear CE/UE bits by writing 1s */
if (errsts & (ERRSTS_CE | ERRSTS_UE))
pci_write_config_word(imc->pdev, ERRSTS_OFFSET, errsts);
}
static int errcmd_enable_error_reporting(bool enable)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
u16 errcmd;
int rc;
rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
if (rc)
return rc;
if (enable)
errcmd |= ERRCMD_CE | ERRSTS_UE;
else
errcmd &= ~(ERRCMD_CE | ERRSTS_UE);
rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
if (rc)
return rc;
return 0;
}
static int ecclog_handler(void)
{
struct igen6_imc *imc;
int i, n = 0;
u64 ecclog;
for (i = 0; i < res_cfg->num_imc; i++) {
imc = &igen6_pvt->imc[i];
/* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
ecclog = ecclog_read_and_clear(imc);
if (!ecclog)
continue;
if (!ecclog_gen_pool_add(i, ecclog))
irq_work_queue(&ecclog_irq_work);
n++;
}
return n;
}
static void ecclog_work_cb(struct work_struct *work)
{
struct ecclog_node *node, *tmp;
struct mem_ctl_info *mci;
struct llist_node *head;
struct decoded_addr res;
u64 eaddr;
head = llist_del_all(&ecclog_llist);
if (!head)
return;
llist_for_each_entry_safe(node, tmp, head, llnode) {
memset(&res, 0, sizeof(res));
eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
ECC_ERROR_LOG_ADDR_SHIFT;
res.mc = node->mc;
res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
mci = igen6_pvt->imc[res.mc].mci;
edac_dbg(2, "MC %d, ecclog = 0x%llx\n", node->mc, node->ecclog);
igen6_mc_printk(mci, KERN_DEBUG, "HANDLING IBECC MEMORY ERROR\n");
igen6_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", res.sys_addr);
if (!igen6_decode(&res))
igen6_output_error(&res, mci, node->ecclog);
gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node));
}
}
static void ecclog_irq_work_cb(struct irq_work *irq_work)
{
int i;
for (i = 0; i < res_cfg->num_imc; i++)
errsts_clear(&igen6_pvt->imc[i]);
if (!llist_empty(&ecclog_llist))
schedule_work(&ecclog_work);
}
static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
unsigned char reason;
if (!ecclog_handler())
return NMI_DONE;
/*
* Both In-Band ECC correctable error and uncorrectable error are
* reported by SERR# NMI. The NMI generic code (see pci_serr_error())
* doesn't clear the bit NMI_REASON_CLEAR_SERR (in port 0x61) to
* re-enable the SERR# NMI after NMI handling. So clear this bit here
* to re-enable SERR# NMI for receiving future In-Band ECC errors.
*/
reason = x86_platform.get_nmi_reason() & NMI_REASON_CLEAR_MASK;
reason |= NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
reason &= ~NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
return NMI_HANDLED;
}
static int ecclog_mce_handler(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
char *type;
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
/*
* Ignore unless this is a memory related error.
* We don't check the bit MCI_STATUS_ADDRV of MCi_STATUS here,
* since this bit isn't set on some CPU (e.g., Tiger Lake UP3).
*/
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
type = "Event";
edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
mce->extcpu, type, mce->mcgstatus,
mce->bank, mce->status);
edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
edac_dbg(0, "MISC 0x%llx\n", mce->misc);
edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
mce->cpuvendor, mce->cpuid, mce->time,
mce->socketid, mce->apicid);
/*
* We just use the Machine Check for the memory error notification.
* Each memory controller is associated with an IBECC instance.
* Directly read and clear the error information(error address and
* error type) on all the IBECC instances so that we know on which
* memory controller the memory error(s) occurred.
*/
if (!ecclog_handler())
return NOTIFY_DONE;
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_DONE;
}
static struct notifier_block ecclog_mce_dec = {
.notifier_call = ecclog_mce_handler,
.priority = MCE_PRIO_EDAC,
};
static bool igen6_check_ecc(struct igen6_imc *imc)
{
u32 activate = readl(imc->window + IBECC_ACTIVATE_OFFSET);
return !!(activate & IBECC_ACTIVATE_EN);
}
static int igen6_get_dimm_config(struct mem_ctl_info *mci)
{
struct igen6_imc *imc = mci->pvt_info;
u32 mad_inter, mad_intra, mad_dimm;
int i, j, ndimms, mc = imc->mc;
struct dimm_info *dimm;
enum mem_type mtype;
enum dev_type dtype;
u64 dsize;
bool ecc;
edac_dbg(2, "\n");
mad_inter = readl(imc->window + MAD_INTER_CHANNEL_OFFSET);
mtype = get_memory_type(mad_inter);
ecc = igen6_check_ecc(imc);
imc->ch_s_size = MAD_INTER_CHANNEL_CH_S_SIZE(mad_inter);
imc->ch_l_map = MAD_INTER_CHANNEL_CH_L_MAP(mad_inter);
for (i = 0; i < NUM_CHANNELS; i++) {
mad_intra = readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4);
mad_dimm = readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4);
imc->dimm_l_size[i] = MAD_DIMM_CH_DIMM_L_SIZE(mad_dimm);
imc->dimm_s_size[i] = MAD_DIMM_CH_DIMM_S_SIZE(mad_dimm);
imc->dimm_l_map[i] = MAD_INTRA_CH_DIMM_L_MAP(mad_intra);
imc->size += imc->dimm_s_size[i];
imc->size += imc->dimm_l_size[i];
ndimms = 0;
for (j = 0; j < NUM_DIMMS; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
if (j ^ imc->dimm_l_map[i]) {
dtype = get_width(0, mad_dimm);
dsize = imc->dimm_s_size[i];
} else {
dtype = get_width(1, mad_dimm);
dsize = imc->dimm_l_size[i];
}
if (!dsize)
continue;
dimm->grain = 64;
dimm->mtype = mtype;
dimm->dtype = dtype;
dimm->nr_pages = MiB_TO_PAGES(dsize >> 20);
dimm->edac_mode = EDAC_SECDED;
snprintf(dimm->label, sizeof(dimm->label),
"MC#%d_Chan#%d_DIMM#%d", mc, i, j);
edac_dbg(0, "MC %d, Channel %d, DIMM %d, Size %llu MiB (%u pages)\n",
mc, i, j, dsize >> 20, dimm->nr_pages);
ndimms++;
}
if (ndimms && !ecc) {
igen6_printk(KERN_ERR, "MC%d In-Band ECC is disabled\n", mc);
return -ENODEV;
}
}
edac_dbg(0, "MC %d, total size %llu MiB\n", mc, imc->size >> 20);
return 0;
}
#ifdef CONFIG_EDAC_DEBUG
/* Top of upper usable DRAM */
static u64 igen6_touud;
#define TOUUD_OFFSET 0xa8
static void igen6_reg_dump(struct igen6_imc *imc)
{
int i;
edac_dbg(2, "CHANNEL_HASH : 0x%x\n",
readl(imc->window + CHANNEL_HASH_OFFSET));
edac_dbg(2, "CHANNEL_EHASH : 0x%x\n",
readl(imc->window + CHANNEL_EHASH_OFFSET));
edac_dbg(2, "MAD_INTER_CHANNEL: 0x%x\n",
readl(imc->window + MAD_INTER_CHANNEL_OFFSET));
edac_dbg(2, "ECC_ERROR_LOG : 0x%llx\n",
readq(imc->window + ECC_ERROR_LOG_OFFSET));
for (i = 0; i < NUM_CHANNELS; i++) {
edac_dbg(2, "MAD_INTRA_CH%d : 0x%x\n", i,
readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4));
edac_dbg(2, "MAD_DIMM_CH%d : 0x%x\n", i,
readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4));
}
edac_dbg(2, "TOLUD : 0x%x", igen6_tolud);
edac_dbg(2, "TOUUD : 0x%llx", igen6_touud);
edac_dbg(2, "TOM : 0x%llx", igen6_tom);
}
static struct dentry *igen6_test;
static int debugfs_u64_set(void *data, u64 val)
{
u64 ecclog;
if ((val >= igen6_tolud && val < _4GB) || val >= igen6_touud) {
edac_dbg(0, "Address 0x%llx out of range\n", val);
return 0;
}
pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
val >>= ECC_ERROR_LOG_ADDR_SHIFT;
ecclog = (val << ECC_ERROR_LOG_ADDR_SHIFT) | ECC_ERROR_LOG_CE;
if (!ecclog_gen_pool_add(0, ecclog))
irq_work_queue(&ecclog_irq_work);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
static void igen6_debug_setup(void)
{
igen6_test = edac_debugfs_create_dir("igen6_test");
if (!igen6_test)
return;
if (!edac_debugfs_create_file("addr", 0200, igen6_test,
NULL, &fops_u64_wo)) {
debugfs_remove(igen6_test);
igen6_test = NULL;
}
}
static void igen6_debug_teardown(void)
{
debugfs_remove_recursive(igen6_test);
}
#else
static void igen6_reg_dump(struct igen6_imc *imc) {}
static void igen6_debug_setup(void) {}
static void igen6_debug_teardown(void) {}
#endif
static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar)
{
union {
u64 v;
struct {
u32 v_lo;
u32 v_hi;
};
} u;
edac_dbg(2, "\n");
if (!res_cfg->ibecc_available(pdev)) {
edac_dbg(2, "No In-Band ECC IP\n");
goto fail;
}
if (pci_read_config_dword(pdev, TOLUD_OFFSET, &igen6_tolud)) {
igen6_printk(KERN_ERR, "Failed to read TOLUD\n");
goto fail;
}
igen6_tolud &= GENMASK(31, 20);
if (pci_read_config_dword(pdev, TOM_OFFSET, &u.v_lo)) {
igen6_printk(KERN_ERR, "Failed to read lower TOM\n");
goto fail;
}
if (pci_read_config_dword(pdev, TOM_OFFSET + 4, &u.v_hi)) {
igen6_printk(KERN_ERR, "Failed to read upper TOM\n");
goto fail;
}
igen6_tom = u.v & GENMASK_ULL(38, 20);
if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
goto fail;
}
if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
goto fail;
}
if (!(u.v & MCHBAR_EN)) {
igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
goto fail;
}
*mchbar = MCHBAR_BASE(u.v);
#ifdef CONFIG_EDAC_DEBUG
if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo))
edac_dbg(2, "Failed to read lower TOUUD\n");
else if (pci_read_config_dword(pdev, TOUUD_OFFSET + 4, &u.v_hi))
edac_dbg(2, "Failed to read upper TOUUD\n");
else
igen6_touud = u.v & GENMASK_ULL(38, 20);
#endif
return 0;
fail:
return -ENODEV;
}
static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct igen6_imc *imc;
void __iomem *window;
int rc;
edac_dbg(2, "\n");
mchbar += mc * MCHBAR_SIZE;
window = ioremap(mchbar, MCHBAR_SIZE);
if (!window) {
igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
return -ENODEV;
}
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = NUM_DIMMS;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
if (!mci) {
rc = -ENOMEM;
goto fail;
}
mci->ctl_name = kasprintf(GFP_KERNEL, "Intel_client_SoC MC#%d", mc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail2;
}
mci->mtype_cap = MEM_FLAG_LPDDR4 | MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->pvt_info = &igen6_pvt->imc[mc];
imc = mci->pvt_info;
device_initialize(&imc->dev);
/*
* EDAC core uses mci->pdev(pointer of structure device) as
* memory controller ID. The client SoCs attach one or more
* memory controllers to single pci_dev (single pci_dev->dev
* can be for multiple memory controllers).
*
* To make mci->pdev unique, assign pci_dev->dev to mci->pdev
* for the first memory controller and assign a unique imc->dev
* to mci->pdev for each non-first memory controller.
*/
mci->pdev = mc ? &imc->dev : &pdev->dev;
imc->mc = mc;
imc->pdev = pdev;
imc->window = window;
igen6_reg_dump(imc);
rc = igen6_get_dimm_config(mci);
if (rc)
goto fail3;
rc = edac_mc_add_mc(mci);
if (rc) {
igen6_printk(KERN_ERR, "Failed to register mci#%d\n", mc);
goto fail3;
}
imc->mci = mci;
return 0;
fail3:
kfree(mci->ctl_name);
fail2:
edac_mc_free(mci);
fail:
iounmap(window);
return rc;
}
static void igen6_unregister_mcis(void)
{
struct mem_ctl_info *mci;
struct igen6_imc *imc;
int i;
edac_dbg(2, "\n");
for (i = 0; i < res_cfg->num_imc; i++) {
imc = &igen6_pvt->imc[i];
mci = imc->mci;
if (!mci)
continue;
edac_mc_del_mc(mci->pdev);
kfree(mci->ctl_name);
edac_mc_free(mci);
iounmap(imc->window);
}
}
static int igen6_mem_slice_setup(u64 mchbar)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
u64 base = mchbar + res_cfg->cmf_base;
u32 offset = res_cfg->ms_hash_offset;
u32 size = res_cfg->cmf_size;
u64 ms_s_size, ms_hash;
void __iomem *cmf;
int ms_l_map;
edac_dbg(2, "\n");
if (imc[0].size < imc[1].size) {
ms_s_size = imc[0].size;
ms_l_map = 1;
} else {
ms_s_size = imc[1].size;
ms_l_map = 0;
}
igen6_pvt->ms_s_size = ms_s_size;
igen6_pvt->ms_l_map = ms_l_map;
edac_dbg(0, "ms_s_size: %llu MiB, ms_l_map %d\n",
ms_s_size >> 20, ms_l_map);
if (!size)
return 0;
cmf = ioremap(base, size);
if (!cmf) {
igen6_printk(KERN_ERR, "Failed to ioremap cmf 0x%llx\n", base);
return -ENODEV;
}
ms_hash = readq(cmf + offset);
igen6_pvt->ms_hash = ms_hash;
edac_dbg(0, "MEM_SLICE_HASH: 0x%llx\n", ms_hash);
iounmap(cmf);
return 0;
}
static int register_err_handler(void)
{
int rc;
if (res_cfg->machine_check) {
mce_register_decode_chain(&ecclog_mce_dec);
return 0;
}
rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler,
0, IGEN6_NMI_NAME);
if (rc) {
igen6_printk(KERN_ERR, "Failed to register NMI handler\n");
return rc;
}
return 0;
}
static void unregister_err_handler(void)
{
if (res_cfg->machine_check) {
mce_unregister_decode_chain(&ecclog_mce_dec);
return;
}
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
int i, rc;
edac_dbg(2, "\n");
igen6_pvt = kzalloc(sizeof(*igen6_pvt), GFP_KERNEL);
if (!igen6_pvt)
return -ENOMEM;
res_cfg = (struct res_config *)ent->driver_data;
rc = igen6_pci_setup(pdev, &mchbar);
if (rc)
goto fail;
for (i = 0; i < res_cfg->num_imc; i++) {
rc = igen6_register_mci(i, mchbar, pdev);
if (rc)
goto fail2;
}
if (res_cfg->num_imc > 1) {
rc = igen6_mem_slice_setup(mchbar);
if (rc)
goto fail2;
}
ecclog_pool = ecclog_gen_pool_create();
if (!ecclog_pool) {
rc = -ENOMEM;
goto fail2;
}
INIT_WORK(&ecclog_work, ecclog_work_cb);
init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb);
rc = register_err_handler();
if (rc)
goto fail3;
/* Enable error reporting */
rc = errcmd_enable_error_reporting(true);
if (rc) {
igen6_printk(KERN_ERR, "Failed to enable error reporting\n");
goto fail4;
}
/* Check if any pending errors before/during the registration of the error handler */
ecclog_handler();
igen6_debug_setup();
return 0;
fail4:
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
fail3:
gen_pool_destroy(ecclog_pool);
fail2:
igen6_unregister_mcis();
fail:
kfree(igen6_pvt);
return rc;
}
static void igen6_remove(struct pci_dev *pdev)
{
edac_dbg(2, "\n");
igen6_debug_teardown();
errcmd_enable_error_reporting(false);
unregister_err_handler();
irq_work_sync(&ecclog_irq_work);
flush_work(&ecclog_work);
gen_pool_destroy(ecclog_pool);
igen6_unregister_mcis();
kfree(igen6_pvt);
}
static struct pci_driver igen6_driver = {
.name = EDAC_MOD_STR,
.probe = igen6_probe,
.remove = igen6_remove,
.id_table = igen6_pci_tbl,
};
static int __init igen6_init(void)
{
const char *owner;
int rc;
edac_dbg(2, "\n");
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
edac_op_state = EDAC_OPSTATE_NMI;
rc = pci_register_driver(&igen6_driver);
if (rc)
return rc;
igen6_printk(KERN_INFO, "%s\n", IGEN6_REVISION);
return 0;
}
static void __exit igen6_exit(void)
{
edac_dbg(2, "\n");
pci_unregister_driver(&igen6_driver);
}
module_init(igen6_init);
module_exit(igen6_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qiuxu Zhuo");
MODULE_DESCRIPTION("MC Driver for Intel client SoC using In-Band ECC");
| linux-master | drivers/edac/igen6_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/edac.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include "edac_module.h"
/* Registers Offset */
#define AL_MC_ECC_CFG 0x70
#define AL_MC_ECC_CLEAR 0x7c
#define AL_MC_ECC_ERR_COUNT 0x80
#define AL_MC_ECC_CE_ADDR0 0x84
#define AL_MC_ECC_CE_ADDR1 0x88
#define AL_MC_ECC_UE_ADDR0 0xa4
#define AL_MC_ECC_UE_ADDR1 0xa8
#define AL_MC_ECC_CE_SYND0 0x8c
#define AL_MC_ECC_CE_SYND1 0x90
#define AL_MC_ECC_CE_SYND2 0x94
#define AL_MC_ECC_UE_SYND0 0xac
#define AL_MC_ECC_UE_SYND1 0xb0
#define AL_MC_ECC_UE_SYND2 0xb4
/* Registers Fields */
#define AL_MC_ECC_CFG_SCRUB_DISABLED BIT(4)
#define AL_MC_ECC_CLEAR_UE_COUNT BIT(3)
#define AL_MC_ECC_CLEAR_CE_COUNT BIT(2)
#define AL_MC_ECC_CLEAR_UE_ERR BIT(1)
#define AL_MC_ECC_CLEAR_CE_ERR BIT(0)
#define AL_MC_ECC_ERR_COUNT_UE GENMASK(31, 16)
#define AL_MC_ECC_ERR_COUNT_CE GENMASK(15, 0)
#define AL_MC_ECC_CE_ADDR0_RANK GENMASK(25, 24)
#define AL_MC_ECC_CE_ADDR0_ROW GENMASK(17, 0)
#define AL_MC_ECC_CE_ADDR1_BG GENMASK(25, 24)
#define AL_MC_ECC_CE_ADDR1_BANK GENMASK(18, 16)
#define AL_MC_ECC_CE_ADDR1_COLUMN GENMASK(11, 0)
#define AL_MC_ECC_UE_ADDR0_RANK GENMASK(25, 24)
#define AL_MC_ECC_UE_ADDR0_ROW GENMASK(17, 0)
#define AL_MC_ECC_UE_ADDR1_BG GENMASK(25, 24)
#define AL_MC_ECC_UE_ADDR1_BANK GENMASK(18, 16)
#define AL_MC_ECC_UE_ADDR1_COLUMN GENMASK(11, 0)
#define DRV_NAME "al_mc_edac"
#define AL_MC_EDAC_MSG_MAX 256
struct al_mc_edac {
void __iomem *mmio_base;
spinlock_t lock;
int irq_ce;
int irq_ue;
};
static void prepare_msg(char *message, size_t buffer_size,
enum hw_event_mc_err_type type,
u8 rank, u32 row, u8 bg, u8 bank, u16 column,
u32 syn0, u32 syn1, u32 syn2)
{
snprintf(message, buffer_size,
"%s rank=0x%x row=0x%x bg=0x%x bank=0x%x col=0x%x syn0: 0x%x syn1: 0x%x syn2: 0x%x",
type == HW_EVENT_ERR_UNCORRECTED ? "UE" : "CE",
rank, row, bg, bank, column, syn0, syn1, syn2);
}
static int handle_ce(struct mem_ctl_info *mci)
{
u32 eccerrcnt, ecccaddr0, ecccaddr1, ecccsyn0, ecccsyn1, ecccsyn2, row;
struct al_mc_edac *al_mc = mci->pvt_info;
char msg[AL_MC_EDAC_MSG_MAX];
u16 ce_count, column;
unsigned long flags;
u8 rank, bg, bank;
eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
ce_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_CE, eccerrcnt);
if (!ce_count)
return 0;
ecccaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR0);
ecccaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR1);
ecccsyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND0);
ecccsyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND1);
ecccsyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND2);
writel_relaxed(AL_MC_ECC_CLEAR_CE_COUNT | AL_MC_ECC_CLEAR_CE_ERR,
al_mc->mmio_base + AL_MC_ECC_CLEAR);
dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
ecccaddr0, ecccaddr1);
rank = FIELD_GET(AL_MC_ECC_CE_ADDR0_RANK, ecccaddr0);
row = FIELD_GET(AL_MC_ECC_CE_ADDR0_ROW, ecccaddr0);
bg = FIELD_GET(AL_MC_ECC_CE_ADDR1_BG, ecccaddr1);
bank = FIELD_GET(AL_MC_ECC_CE_ADDR1_BANK, ecccaddr1);
column = FIELD_GET(AL_MC_ECC_CE_ADDR1_COLUMN, ecccaddr1);
prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_CORRECTED,
rank, row, bg, bank, column,
ecccsyn0, ecccsyn1, ecccsyn2);
spin_lock_irqsave(&al_mc->lock, flags);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
ce_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
spin_unlock_irqrestore(&al_mc->lock, flags);
return ce_count;
}
static int handle_ue(struct mem_ctl_info *mci)
{
u32 eccerrcnt, eccuaddr0, eccuaddr1, eccusyn0, eccusyn1, eccusyn2, row;
struct al_mc_edac *al_mc = mci->pvt_info;
char msg[AL_MC_EDAC_MSG_MAX];
u16 ue_count, column;
unsigned long flags;
u8 rank, bg, bank;
eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
ue_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_UE, eccerrcnt);
if (!ue_count)
return 0;
eccuaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR0);
eccuaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR1);
eccusyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND0);
eccusyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND1);
eccusyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND2);
writel_relaxed(AL_MC_ECC_CLEAR_UE_COUNT | AL_MC_ECC_CLEAR_UE_ERR,
al_mc->mmio_base + AL_MC_ECC_CLEAR);
dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
eccuaddr0, eccuaddr1);
rank = FIELD_GET(AL_MC_ECC_UE_ADDR0_RANK, eccuaddr0);
row = FIELD_GET(AL_MC_ECC_UE_ADDR0_ROW, eccuaddr0);
bg = FIELD_GET(AL_MC_ECC_UE_ADDR1_BG, eccuaddr1);
bank = FIELD_GET(AL_MC_ECC_UE_ADDR1_BANK, eccuaddr1);
column = FIELD_GET(AL_MC_ECC_UE_ADDR1_COLUMN, eccuaddr1);
prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_UNCORRECTED,
rank, row, bg, bank, column,
eccusyn0, eccusyn1, eccusyn2);
spin_lock_irqsave(&al_mc->lock, flags);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
ue_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
spin_unlock_irqrestore(&al_mc->lock, flags);
return ue_count;
}
static void al_mc_edac_check(struct mem_ctl_info *mci)
{
struct al_mc_edac *al_mc = mci->pvt_info;
if (al_mc->irq_ue <= 0)
handle_ue(mci);
if (al_mc->irq_ce <= 0)
handle_ce(mci);
}
static irqreturn_t al_mc_edac_irq_handler_ue(int irq, void *info)
{
struct platform_device *pdev = info;
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
if (handle_ue(mci))
return IRQ_HANDLED;
return IRQ_NONE;
}
static irqreturn_t al_mc_edac_irq_handler_ce(int irq, void *info)
{
struct platform_device *pdev = info;
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
if (handle_ce(mci))
return IRQ_HANDLED;
return IRQ_NONE;
}
static enum scrub_type get_scrub_mode(void __iomem *mmio_base)
{
u32 ecccfg0;
ecccfg0 = readl(mmio_base + AL_MC_ECC_CFG);
if (FIELD_GET(AL_MC_ECC_CFG_SCRUB_DISABLED, ecccfg0))
return SCRUB_NONE;
else
return SCRUB_HW_SRC;
}
static void devm_al_mc_edac_free(void *data)
{
edac_mc_free(data);
}
static void devm_al_mc_edac_del(void *data)
{
edac_mc_del_mc(data);
}
static int al_mc_edac_probe(struct platform_device *pdev)
{
struct edac_mc_layer layers[1];
struct mem_ctl_info *mci;
struct al_mc_edac *al_mc;
void __iomem *mmio_base;
struct dimm_info *dimm;
int ret;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base)) {
dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
PTR_ERR(mmio_base));
return PTR_ERR(mmio_base);
}
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct al_mc_edac));
if (!mci)
return -ENOMEM;
ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_free, mci);
if (ret)
return ret;
platform_set_drvdata(pdev, mci);
al_mc = mci->pvt_info;
al_mc->mmio_base = mmio_base;
al_mc->irq_ue = of_irq_get_byname(pdev->dev.of_node, "ue");
if (al_mc->irq_ue <= 0)
dev_dbg(&pdev->dev,
"no IRQ defined for UE - falling back to polling\n");
al_mc->irq_ce = of_irq_get_byname(pdev->dev.of_node, "ce");
if (al_mc->irq_ce <= 0)
dev_dbg(&pdev->dev,
"no IRQ defined for CE - falling back to polling\n");
/*
* In case both interrupts (ue/ce) are to be found, use interrupt mode.
* In case none of the interrupt are foud, use polling mode.
* In case only one interrupt is found, use interrupt mode for it but
* keep polling mode enable for the other.
*/
if (al_mc->irq_ue <= 0 || al_mc->irq_ce <= 0) {
edac_op_state = EDAC_OPSTATE_POLL;
mci->edac_check = al_mc_edac_check;
} else {
edac_op_state = EDAC_OPSTATE_INT;
}
spin_lock_init(&al_mc->lock);
mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = DRV_NAME;
mci->ctl_name = "al_mc";
mci->pdev = &pdev->dev;
mci->scrub_mode = get_scrub_mode(mmio_base);
dimm = *mci->dimms;
dimm->grain = 1;
ret = edac_mc_add_mc(mci);
if (ret < 0) {
dev_err(&pdev->dev,
"fail to add memory controller device (%d)\n",
ret);
return ret;
}
ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
if (ret)
return ret;
if (al_mc->irq_ue > 0) {
ret = devm_request_irq(&pdev->dev,
al_mc->irq_ue,
al_mc_edac_irq_handler_ue,
IRQF_SHARED,
pdev->name,
pdev);
if (ret != 0) {
dev_err(&pdev->dev,
"failed to request UE IRQ %d (%d)\n",
al_mc->irq_ue, ret);
return ret;
}
}
if (al_mc->irq_ce > 0) {
ret = devm_request_irq(&pdev->dev,
al_mc->irq_ce,
al_mc_edac_irq_handler_ce,
IRQF_SHARED,
pdev->name,
pdev);
if (ret != 0) {
dev_err(&pdev->dev,
"failed to request CE IRQ %d (%d)\n",
al_mc->irq_ce, ret);
return ret;
}
}
return 0;
}
static const struct of_device_id al_mc_edac_of_match[] = {
{ .compatible = "amazon,al-mc-edac", },
{},
};
MODULE_DEVICE_TABLE(of, al_mc_edac_of_match);
static struct platform_driver al_mc_edac_driver = {
.probe = al_mc_edac_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = al_mc_edac_of_match,
},
};
module_platform_driver(al_mc_edac_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Talel Shenhar");
MODULE_DESCRIPTION("Amazon's Annapurna Lab's Memory Controller EDAC Driver");
| linux-master | drivers/edac/al_mc_edac.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018, 2019 Cisco Systems
*/
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
#include "edac_module.h"
#define DRV_NAME "aspeed-edac"
#define ASPEED_MCR_PROT 0x00 /* protection key register */
#define ASPEED_MCR_CONF 0x04 /* configuration register */
#define ASPEED_MCR_INTR_CTRL 0x50 /* interrupt control/status register */
#define ASPEED_MCR_ADDR_UNREC 0x58 /* address of first un-recoverable error */
#define ASPEED_MCR_ADDR_REC 0x5c /* address of last recoverable error */
#define ASPEED_MCR_LAST ASPEED_MCR_ADDR_REC
#define ASPEED_MCR_PROT_PASSWD 0xfc600309
#define ASPEED_MCR_CONF_DRAM_TYPE BIT(4)
#define ASPEED_MCR_CONF_ECC BIT(7)
#define ASPEED_MCR_INTR_CTRL_CLEAR BIT(31)
#define ASPEED_MCR_INTR_CTRL_CNT_REC GENMASK(23, 16)
#define ASPEED_MCR_INTR_CTRL_CNT_UNREC GENMASK(15, 12)
#define ASPEED_MCR_INTR_CTRL_ENABLE (BIT(0) | BIT(1))
static struct regmap *aspeed_regmap;
static int regmap_reg_write(void *context, unsigned int reg, unsigned int val)
{
void __iomem *regs = (void __iomem *)context;
/* enable write to MCR register set */
writel(ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
writel(val, regs + reg);
/* disable write to MCR register set */
writel(~ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
return 0;
}
static int regmap_reg_read(void *context, unsigned int reg, unsigned int *val)
{
void __iomem *regs = (void __iomem *)context;
*val = readl(regs + reg);
return 0;
}
static bool regmap_is_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case ASPEED_MCR_PROT:
case ASPEED_MCR_INTR_CTRL:
case ASPEED_MCR_ADDR_UNREC:
case ASPEED_MCR_ADDR_REC:
return true;
default:
return false;
}
}
static const struct regmap_config aspeed_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = ASPEED_MCR_LAST,
.reg_write = regmap_reg_write,
.reg_read = regmap_reg_read,
.volatile_reg = regmap_is_volatile,
.fast_io = true,
};
static void count_rec(struct mem_ctl_info *mci, u8 rec_cnt, u32 rec_addr)
{
struct csrow_info *csrow = mci->csrows[0];
u32 page, offset, syndrome;
if (!rec_cnt)
return;
/* report first few errors (if there are) */
/* note: no addresses are recorded */
if (rec_cnt > 1) {
/* page, offset and syndrome are not available */
page = 0;
offset = 0;
syndrome = 0;
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, rec_cnt-1,
page, offset, syndrome, 0, 0, -1,
"address(es) not available", "");
}
/* report last error */
/* note: rec_addr is the last recoverable error addr */
page = rec_addr >> PAGE_SHIFT;
offset = rec_addr & ~PAGE_MASK;
/* syndrome is not available */
syndrome = 0;
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
csrow->first_page + page, offset, syndrome,
0, 0, -1, "", "");
}
static void count_un_rec(struct mem_ctl_info *mci, u8 un_rec_cnt,
u32 un_rec_addr)
{
struct csrow_info *csrow = mci->csrows[0];
u32 page, offset, syndrome;
if (!un_rec_cnt)
return;
/* report 1. error */
/* note: un_rec_addr is the first unrecoverable error addr */
page = un_rec_addr >> PAGE_SHIFT;
offset = un_rec_addr & ~PAGE_MASK;
/* syndrome is not available */
syndrome = 0;
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
csrow->first_page + page, offset, syndrome,
0, 0, -1, "", "");
/* report further errors (if there are) */
/* note: no addresses are recorded */
if (un_rec_cnt > 1) {
/* page, offset and syndrome are not available */
page = 0;
offset = 0;
syndrome = 0;
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, un_rec_cnt-1,
page, offset, syndrome, 0, 0, -1,
"address(es) not available", "");
}
}
static irqreturn_t mcr_isr(int irq, void *arg)
{
struct mem_ctl_info *mci = arg;
u32 rec_addr, un_rec_addr;
u32 reg50, reg5c, reg58;
u8 rec_cnt, un_rec_cnt;
regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50);
dev_dbg(mci->pdev, "received edac interrupt w/ mcr register 50: 0x%x\n",
reg50);
/* collect data about recoverable and unrecoverable errors */
rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_REC) >> 16;
un_rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_UNREC) >> 12;
dev_dbg(mci->pdev, "%d recoverable interrupts and %d unrecoverable interrupts\n",
rec_cnt, un_rec_cnt);
regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_UNREC, ®58);
un_rec_addr = reg58;
regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_REC, ®5c);
rec_addr = reg5c;
/* clear interrupt flags and error counters: */
regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
ASPEED_MCR_INTR_CTRL_CLEAR,
ASPEED_MCR_INTR_CTRL_CLEAR);
regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
ASPEED_MCR_INTR_CTRL_CLEAR, 0);
/* process recoverable and unrecoverable errors */
count_rec(mci, rec_cnt, rec_addr);
count_un_rec(mci, un_rec_cnt, un_rec_addr);
if (!rec_cnt && !un_rec_cnt)
dev_dbg(mci->pdev, "received edac interrupt, but did not find any ECC counters\n");
regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50);
dev_dbg(mci->pdev, "edac interrupt handled. mcr reg 50 is now: 0x%x\n",
reg50);
return IRQ_HANDLED;
}
static int config_irq(void *ctx, struct platform_device *pdev)
{
int irq;
int rc;
/* register interrupt handler */
irq = platform_get_irq(pdev, 0);
dev_dbg(&pdev->dev, "got irq %d\n", irq);
if (irq < 0)
return irq;
rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
DRV_NAME, ctx);
if (rc) {
dev_err(&pdev->dev, "unable to request irq %d\n", irq);
return rc;
}
/* enable interrupts */
regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
ASPEED_MCR_INTR_CTRL_ENABLE,
ASPEED_MCR_INTR_CTRL_ENABLE);
return 0;
}
static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
u32 nr_pages, dram_type;
struct dimm_info *dimm;
struct device_node *np;
struct resource r;
u32 reg04;
int rc;
/* retrieve info about physical memory from device tree */
np = of_find_node_by_name(NULL, "memory");
if (!np) {
dev_err(mci->pdev, "dt: missing /memory node\n");
return -ENODEV;
}
rc = of_address_to_resource(np, 0, &r);
of_node_put(np);
if (rc) {
dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
return rc;
}
dev_dbg(mci->pdev, "dt: /memory node resources: first page %pR, PAGE_SHIFT macro=0x%x\n",
&r, PAGE_SHIFT);
csrow->first_page = r.start >> PAGE_SHIFT;
nr_pages = resource_size(&r) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + nr_pages - 1;
regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04);
dram_type = (reg04 & ASPEED_MCR_CONF_DRAM_TYPE) ? MEM_DDR4 : MEM_DDR3;
dimm = csrow->channels[0]->dimm;
dimm->mtype = dram_type;
dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = nr_pages / csrow->nr_channels;
dev_dbg(mci->pdev, "initialized dimm with first_page=0x%lx and nr_pages=0x%x\n",
csrow->first_page, nr_pages);
return 0;
}
static int aspeed_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
void __iomem *regs;
u32 reg04;
int rc;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
aspeed_regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
&aspeed_regmap_config);
if (IS_ERR(aspeed_regmap))
return PTR_ERR(aspeed_regmap);
/* bail out if ECC mode is not configured */
regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04);
if (!(reg04 & ASPEED_MCR_CONF_ECC)) {
dev_err(&pdev->dev, "ECC mode is not configured in u-boot\n");
return -EPERM;
}
edac_op_state = EDAC_OPSTATE_INT;
/* allocate & init EDAC MC data structure */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = SCRUB_HW_SRC;
mci->mod_name = DRV_NAME;
mci->ctl_name = "MIC";
mci->dev_name = dev_name(&pdev->dev);
rc = init_csrows(mci);
if (rc) {
dev_err(&pdev->dev, "failed to init csrows\n");
goto probe_exit02;
}
platform_set_drvdata(pdev, mci);
/* register with edac core */
rc = edac_mc_add_mc(mci);
if (rc) {
dev_err(&pdev->dev, "failed to register with EDAC core\n");
goto probe_exit02;
}
/* register interrupt handler and enable interrupts */
rc = config_irq(mci, pdev);
if (rc) {
dev_err(&pdev->dev, "failed setting up irq\n");
goto probe_exit01;
}
return 0;
probe_exit01:
edac_mc_del_mc(&pdev->dev);
probe_exit02:
edac_mc_free(mci);
return rc;
}
static int aspeed_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
/* disable interrupts */
regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
ASPEED_MCR_INTR_CTRL_ENABLE, 0);
/* free resources */
mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
return 0;
}
static const struct of_device_id aspeed_of_match[] = {
{ .compatible = "aspeed,ast2400-sdram-edac" },
{ .compatible = "aspeed,ast2500-sdram-edac" },
{ .compatible = "aspeed,ast2600-sdram-edac" },
{},
};
MODULE_DEVICE_TABLE(of, aspeed_of_match);
static struct platform_driver aspeed_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = aspeed_of_match
},
.probe = aspeed_probe,
.remove = aspeed_remove
};
module_platform_driver(aspeed_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Schaeckeler <[email protected]>");
MODULE_DESCRIPTION("Aspeed BMC SoC EDAC driver");
MODULE_VERSION("1.0");
| linux-master | drivers/edac/aspeed_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/soc/qcom/llcc-qcom.h>
#include "edac_mc.h"
#include "edac_device.h"
#define EDAC_LLCC "qcom_llcc"
#define LLCC_ERP_PANIC_ON_UE 1
#define TRP_SYN_REG_CNT 6
#define DRP_SYN_REG_CNT 8
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
#define LLCC_LB_CNT_SHIFT 28
/* Mask and shift macros */
#define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0)
#define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16)
#define ECC_DB_ERR_WAYS_SHIFT BIT(4)
#define ECC_SB_ERR_COUNT_MASK GENMASK(23, 16)
#define ECC_SB_ERR_COUNT_SHIFT BIT(4)
#define ECC_SB_ERR_WAYS_MASK GENMASK(15, 0)
#define SB_ECC_ERROR BIT(0)
#define DB_ECC_ERROR BIT(1)
#define DRP_TRP_INT_CLEAR GENMASK(1, 0)
#define DRP_TRP_CNT_CLEAR GENMASK(1, 0)
#define SB_ERROR_THRESHOLD 0x1
#define SB_ERROR_THRESHOLD_SHIFT 24
#define SB_DB_TRP_INTERRUPT_ENABLE 0x3
#define TRP0_INTERRUPT_ENABLE 0x1
#define DRP0_INTERRUPT_ENABLE BIT(6)
#define SB_DB_DRP_INTERRUPT_ENABLE 0x3
#define ECC_POLL_MSEC 5000
enum {
LLCC_DRAM_CE = 0,
LLCC_DRAM_UE,
LLCC_TRAM_CE,
LLCC_TRAM_UE,
};
static const struct llcc_edac_reg_data edac_reg_data[] = {
[LLCC_DRAM_CE] = {
.name = "DRAM Single-bit",
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
.count_shift = ECC_SB_ERR_COUNT_SHIFT,
},
[LLCC_DRAM_UE] = {
.name = "DRAM Double-bit",
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
.ways_shift = ECC_DB_ERR_WAYS_SHIFT,
},
[LLCC_TRAM_CE] = {
.name = "TRAM Single-bit",
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
.count_shift = ECC_SB_ERR_COUNT_SHIFT,
},
[LLCC_TRAM_UE] = {
.name = "TRAM Double-bit",
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
.ways_shift = ECC_DB_ERR_WAYS_SHIFT,
},
};
static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
{
u32 sb_err_threshold;
int ret;
/*
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable,
SB_DB_TRP_INTERRUPT_ENABLE,
SB_DB_TRP_INTERRUPT_ENABLE);
if (ret)
return ret;
sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg,
sb_err_threshold);
if (ret)
return ret;
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable,
SB_DB_DRP_INTERRUPT_ENABLE);
return ret;
}
/* Clear the error interrupt and counter registers */
static int
qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
{
int ret;
switch (err_type) {
case LLCC_DRAM_CE:
case LLCC_DRAM_UE:
ret = regmap_write(drv->bcast_regmap,
drv->edac_reg_offset->drp_interrupt_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
ret = regmap_write(drv->bcast_regmap,
drv->edac_reg_offset->drp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
break;
case LLCC_TRAM_CE:
case LLCC_TRAM_UE:
ret = regmap_write(drv->bcast_regmap,
drv->edac_reg_offset->trp_interrupt_0_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
ret = regmap_write(drv->bcast_regmap,
drv->edac_reg_offset->trp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
break;
default:
ret = -EINVAL;
edac_printk(KERN_CRIT, EDAC_LLCC, "Unexpected error type: %d\n",
err_type);
}
return ret;
}
struct qcom_llcc_syn_regs {
u32 synd_reg;
u32 count_status_reg;
u32 ways_status_reg;
};
static void get_reg_offsets(struct llcc_drv_data *drv, int err_type,
struct qcom_llcc_syn_regs *syn_regs)
{
const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset;
switch (err_type) {
case LLCC_DRAM_CE:
syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0;
syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
break;
case LLCC_DRAM_UE:
syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0;
syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
break;
case LLCC_TRAM_CE:
syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0;
syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
break;
case LLCC_TRAM_UE:
syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0;
syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
break;
}
}
/* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
static int
dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
{
struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
struct qcom_llcc_syn_regs regs = { };
int err_cnt, err_ways, ret, i;
u32 synd_reg, synd_val;
get_reg_offsets(drv, err_type, ®s);
for (i = 0; i < reg_data.reg_cnt; i++) {
synd_reg = regs.synd_reg + (i * 4);
ret = regmap_read(drv->regmaps[bank], synd_reg,
&synd_val);
if (ret)
goto clear;
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: ECC_SYN%d: 0x%8x\n",
reg_data.name, i, synd_val);
}
ret = regmap_read(drv->regmaps[bank], regs.count_status_reg,
&err_cnt);
if (ret)
goto clear;
err_cnt &= reg_data.count_mask;
err_cnt >>= reg_data.count_shift;
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
reg_data.name, err_cnt);
ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg,
&err_ways);
if (ret)
goto clear;
err_ways &= reg_data.ways_mask;
err_ways >>= reg_data.ways_shift;
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error ways: 0x%4x\n",
reg_data.name, err_ways);
clear:
return qcom_llcc_clear_error_status(err_type, drv);
}
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
if (ret)
return ret;
switch (err_type) {
case LLCC_DRAM_CE:
edac_device_handle_ce(edev_ctl, 0, bank,
"LLCC Data RAM correctable Error");
break;
case LLCC_DRAM_UE:
edac_device_handle_ue(edev_ctl, 0, bank,
"LLCC Data RAM uncorrectable Error");
break;
case LLCC_TRAM_CE:
edac_device_handle_ce(edev_ctl, 0, bank,
"LLCC Tag RAM correctable Error");
break;
case LLCC_TRAM_UE:
edac_device_handle_ue(edev_ctl, 0, bank,
"LLCC Tag RAM uncorrectable Error");
break;
default:
ret = -EINVAL;
edac_printk(KERN_CRIT, EDAC_LLCC, "Unexpected error type: %d\n",
err_type);
}
return ret;
}
static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
for (i = 0; i < drv->num_banks; i++) {
ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status,
&drp_error);
if (!ret && (drp_error & SB_ECC_ERROR)) {
edac_printk(KERN_CRIT, EDAC_LLCC,
"Single Bit Error detected in Data RAM\n");
ret = dump_syn_reg(edev_ctl, LLCC_DRAM_CE, i);
} else if (!ret && (drp_error & DB_ECC_ERROR)) {
edac_printk(KERN_CRIT, EDAC_LLCC,
"Double Bit Error detected in Data RAM\n");
ret = dump_syn_reg(edev_ctl, LLCC_DRAM_UE, i);
}
if (!ret)
irq_rc = IRQ_HANDLED;
ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status,
&trp_error);
if (!ret && (trp_error & SB_ECC_ERROR)) {
edac_printk(KERN_CRIT, EDAC_LLCC,
"Single Bit Error detected in Tag RAM\n");
ret = dump_syn_reg(edev_ctl, LLCC_TRAM_CE, i);
} else if (!ret && (trp_error & DB_ECC_ERROR)) {
edac_printk(KERN_CRIT, EDAC_LLCC,
"Double Bit Error detected in Tag RAM\n");
ret = dump_syn_reg(edev_ctl, LLCC_TRAM_UE, i);
}
if (!ret)
irq_rc = IRQ_HANDLED;
}
return irq_rc;
}
static void llcc_ecc_check(struct edac_device_ctl_info *edev_ctl)
{
llcc_ecc_irq_handler(0, edev_ctl);
}
static int qcom_llcc_edac_probe(struct platform_device *pdev)
{
struct llcc_drv_data *llcc_driv_data = pdev->dev.platform_data;
struct edac_device_ctl_info *edev_ctl;
struct device *dev = &pdev->dev;
int ecc_irq;
int rc;
rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
if (rc)
return rc;
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
llcc_driv_data->num_banks, 1,
NULL, 0,
edac_device_alloc_index());
if (!edev_ctl)
return -ENOMEM;
edev_ctl->dev = dev;
edev_ctl->mod_name = dev_name(dev);
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
/* Check if LLCC driver has passed ECC IRQ */
ecc_irq = llcc_driv_data->ecc_irq;
if (ecc_irq > 0) {
/* Use interrupt mode if IRQ is available */
rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
if (!rc) {
edac_op_state = EDAC_OPSTATE_INT;
goto irq_done;
}
}
/* Fall back to polling mode otherwise */
edev_ctl->poll_msec = ECC_POLL_MSEC;
edev_ctl->edac_check = llcc_ecc_check;
edac_op_state = EDAC_OPSTATE_POLL;
irq_done:
rc = edac_device_add_device(edev_ctl);
if (rc) {
edac_device_free_ctl_info(edev_ctl);
return rc;
}
platform_set_drvdata(pdev, edev_ctl);
return rc;
}
static int qcom_llcc_edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edev_ctl = dev_get_drvdata(&pdev->dev);
edac_device_del_device(edev_ctl->dev);
edac_device_free_ctl_info(edev_ctl);
return 0;
}
static const struct platform_device_id qcom_llcc_edac_id_table[] = {
{ .name = "qcom_llcc_edac" },
{}
};
MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
.remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
.id_table = qcom_llcc_edac_id_table,
};
module_platform_driver(qcom_llcc_edac_driver);
MODULE_DESCRIPTION("QCOM EDAC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/qcom_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
*
* Copyright (c) 2008 Wind River Systems, Inc.
*
* Authors: Cao Qingtao <[email protected]>
* Benjamin Walsh <[email protected]>
* Hu Yongqi <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/edac.h>
#include <linux/pci_ids.h>
#include <asm/io.h>
#include "edac_module.h"
#include "amd8111_edac.h"
#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
#define AMD8111_EDAC_MOD_STR "amd8111_edac"
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
enum amd8111_edac_devs {
LPC_BRIDGE = 0,
};
enum amd8111_edac_pcis {
PCI_BRIDGE = 0,
};
/* Wrapper functions for accessing PCI configuration space */
static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
{
int ret;
ret = pci_read_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8111_EDAC_MOD_STR
" PCI Access Read Error at 0x%x\n", reg);
return ret;
}
static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
{
int ret;
ret = pci_read_config_byte(dev, reg, val8);
if (ret != 0)
printk(KERN_ERR AMD8111_EDAC_MOD_STR
" PCI Access Read Error at 0x%x\n", reg);
}
static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
{
int ret;
ret = pci_write_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8111_EDAC_MOD_STR
" PCI Access Write Error at 0x%x\n", reg);
}
static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
{
int ret;
ret = pci_write_config_byte(dev, reg, val8);
if (ret != 0)
printk(KERN_ERR AMD8111_EDAC_MOD_STR
" PCI Access Write Error at 0x%x\n", reg);
}
/*
* device-specific methods for amd8111 PCI Bridge Controller
*
* Error Reporting and Handling for amd8111 chipset could be found
* in its datasheet 3.1.2 section, P37
*/
static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
{
u32 val32;
struct pci_dev *dev = pci_info->dev;
/* First clear error detection flags on the host interface */
/* Clear SSE/SMA/STA flags in the global status register*/
edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
if (val32 & PCI_STSCMD_CLEAR_MASK)
edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
/* Clear CRC and Link Fail flags in HT Link Control reg */
edac_pci_read_dword(dev, REG_HT_LINK, &val32);
if (val32 & HT_LINK_CLEAR_MASK)
edac_pci_write_dword(dev, REG_HT_LINK, val32);
/* Second clear all fault on the secondary interface */
/* Clear error flags in the memory-base limit reg. */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_CLEAR_MASK)
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
/* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
/* Last enable error detections */
if (edac_op_state == EDAC_OPSTATE_POLL) {
/* Enable System Error reporting in global status register */
edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
val32 |= PCI_STSCMD_SERREN;
edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
/* Enable CRC Sync flood packets to HyperTransport Link */
edac_pci_read_dword(dev, REG_HT_LINK, &val32);
val32 |= HT_LINK_CRCFEN;
edac_pci_write_dword(dev, REG_HT_LINK, val32);
/* Enable SSE reporting etc in Interrupt control reg */
edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
val32 |= PCI_INTBRG_CTRL_POLL_MASK;
edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
}
}
static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
{
u32 val32;
struct pci_dev *dev = pci_info->dev;
if (edac_op_state == EDAC_OPSTATE_POLL) {
/* Disable System Error reporting */
edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
val32 &= ~PCI_STSCMD_SERREN;
edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
/* Disable CRC flood packets */
edac_pci_read_dword(dev, REG_HT_LINK, &val32);
val32 &= ~HT_LINK_CRCFEN;
edac_pci_write_dword(dev, REG_HT_LINK, val32);
/* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
}
}
static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
{
struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
struct pci_dev *dev = pci_info->dev;
u32 val32;
/* Check out PCI Bridge Status and Command Register */
edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
if (val32 & PCI_STSCMD_CLEAR_MASK) {
printk(KERN_INFO "Error(s) in PCI bridge status and command"
"register on device %s\n", pci_info->ctl_name);
printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
(val32 & PCI_STSCMD_SSE) != 0,
(val32 & PCI_STSCMD_RMA) != 0,
(val32 & PCI_STSCMD_RTA) != 0);
val32 |= PCI_STSCMD_CLEAR_MASK;
edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check out HyperTransport Link Control Register */
edac_pci_read_dword(dev, REG_HT_LINK, &val32);
if (val32 & HT_LINK_LKFAIL) {
printk(KERN_INFO "Error(s) in hypertransport link control"
"register on device %s\n", pci_info->ctl_name);
printk(KERN_INFO "LKFAIL: %d\n",
(val32 & HT_LINK_LKFAIL) != 0);
val32 |= HT_LINK_LKFAIL;
edac_pci_write_dword(dev, REG_HT_LINK, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check out PCI Interrupt and Bridge Control Register */
edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
"register on device %s\n", pci_info->ctl_name);
printk(KERN_INFO "DTSTAT: %d\n",
(val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
val32 |= PCI_INTBRG_CTRL_DTSTAT;
edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check out PCI Bridge Memory Base-Limit Register */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_CLEAR_MASK) {
printk(KERN_INFO
"Error(s) in mem limit register on %s device\n",
pci_info->ctl_name);
printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
"RTA: %d, STA: %d, MDPE: %d\n",
(val32 & MEM_LIMIT_DPE) != 0,
(val32 & MEM_LIMIT_RSE) != 0,
(val32 & MEM_LIMIT_RMA) != 0,
(val32 & MEM_LIMIT_RTA) != 0,
(val32 & MEM_LIMIT_STA) != 0,
(val32 & MEM_LIMIT_MDPE) != 0);
val32 |= MEM_LIMIT_CLEAR_MASK;
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
}
static struct resource *legacy_io_res;
static int at_compat_reg_broken;
#define LEGACY_NR_PORTS 1
/* device-specific methods for amd8111 LPC Bridge device */
static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
{
u8 val8;
struct pci_dev *dev = dev_info->dev;
/* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
AMD8111_EDAC_MOD_STR);
if (!legacy_io_res)
printk(KERN_INFO "%s: failed to request legacy I/O region "
"start %d, len %d\n", __func__,
REG_AT_COMPAT, LEGACY_NR_PORTS);
else {
val8 = __do_inb(REG_AT_COMPAT);
if (val8 == 0xff) { /* buggy port */
printk(KERN_INFO "%s: port %d is buggy, not supported"
" by hardware?\n", __func__, REG_AT_COMPAT);
at_compat_reg_broken = 1;
release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
legacy_io_res = NULL;
} else {
u8 out8 = 0;
if (val8 & AT_COMPAT_SERR)
out8 = AT_COMPAT_CLRSERR;
if (val8 & AT_COMPAT_IOCHK)
out8 |= AT_COMPAT_CLRIOCHK;
if (out8 > 0)
__do_outb(out8, REG_AT_COMPAT);
}
}
/* Second clear error flags on LPC bridge */
edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
if (val8 & IO_CTRL_1_CLEAR_MASK)
edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
}
static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
{
if (legacy_io_res)
release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
}
static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
{
struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
struct pci_dev *dev = dev_info->dev;
u8 val8;
edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
if (val8 & IO_CTRL_1_CLEAR_MASK) {
printk(KERN_INFO
"Error(s) in IO control register on %s device\n",
dev_info->ctl_name);
printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
(val8 & IO_CTRL_1_LPC_ERR) != 0,
(val8 & IO_CTRL_1_PW2LPC) != 0);
val8 |= IO_CTRL_1_CLEAR_MASK;
edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
if (at_compat_reg_broken == 0) {
u8 out8 = 0;
val8 = __do_inb(REG_AT_COMPAT);
if (val8 & AT_COMPAT_SERR)
out8 = AT_COMPAT_CLRSERR;
if (val8 & AT_COMPAT_IOCHK)
out8 |= AT_COMPAT_CLRIOCHK;
if (out8 > 0) {
__do_outb(out8, REG_AT_COMPAT);
edac_device_handle_ue(edac_dev, 0, 0,
edac_dev->ctl_name);
}
}
}
/* General devices represented by edac_device_ctl_info */
static struct amd8111_dev_info amd8111_devices[] = {
[LPC_BRIDGE] = {
.err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
.ctl_name = "lpc",
.init = amd8111_lpc_bridge_init,
.exit = amd8111_lpc_bridge_exit,
.check = amd8111_lpc_bridge_check,
},
{0},
};
/* PCI controllers represented by edac_pci_ctl_info */
static struct amd8111_pci_info amd8111_pcis[] = {
[PCI_BRIDGE] = {
.err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
.ctl_name = "AMD8111_PCI_Controller",
.init = amd8111_pci_bridge_init,
.exit = amd8111_pci_bridge_exit,
.check = amd8111_pci_bridge_check,
},
{0},
};
static int amd8111_dev_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
int ret = -ENODEV;
dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
dev_info->err_dev, NULL);
if (!dev_info->dev) {
printk(KERN_ERR "EDAC device not found:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
goto err;
}
if (pci_enable_device(dev_info->dev)) {
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
goto err_dev_put;
}
/*
* we do not allocate extra private structure for
* edac_device_ctl_info, but make use of existing
* one instead.
*/
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
NULL, 0, 0,
NULL, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
ret = -ENOMEM;
goto err_dev_put;
}
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = dev_info->check;
if (dev_info->init)
dev_info->init(dev_info);
if (edac_device_add_device(dev_info->edac_dev) > 0) {
printk(KERN_ERR "failed to add edac_dev for %s\n",
dev_info->ctl_name);
goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_dev on AMD8111 "
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
return 0;
err_edac_free_ctl:
edac_device_free_ctl_info(dev_info->edac_dev);
err_dev_put:
pci_dev_put(dev_info->dev);
err:
return ret;
}
static void amd8111_dev_remove(struct pci_dev *dev)
{
struct amd8111_dev_info *dev_info;
for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
if (dev_info->dev->device == dev->device)
break;
if (!dev_info->err_dev) /* should never happen */
return;
if (dev_info->edac_dev) {
edac_device_del_device(dev_info->edac_dev->dev);
edac_device_free_ctl_info(dev_info->edac_dev);
}
if (dev_info->exit)
dev_info->exit(dev_info);
pci_dev_put(dev_info->dev);
}
static int amd8111_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
int ret = -ENODEV;
pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
pci_info->err_dev, NULL);
if (!pci_info->dev) {
printk(KERN_ERR "EDAC device not found:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
goto err;
}
if (pci_enable_device(pci_info->dev)) {
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
goto err_dev_put;
}
/*
* we do not allocate extra private structure for
* edac_pci_ctl_info, but make use of existing
* one instead.
*/
pci_info->edac_idx = edac_pci_alloc_index();
pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
if (!pci_info->edac_dev) {
ret = -ENOMEM;
goto err_dev_put;
}
pci_info->edac_dev->pvt_info = pci_info;
pci_info->edac_dev->dev = &pci_info->dev->dev;
pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
pci_info->edac_dev->ctl_name = pci_info->ctl_name;
pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
pci_info->edac_dev->edac_check = pci_info->check;
if (pci_info->init)
pci_info->init(pci_info);
if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
printk(KERN_ERR "failed to add edac_pci for %s\n",
pci_info->ctl_name);
goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_pci on AMD8111 "
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
return 0;
err_edac_free_ctl:
edac_pci_free_ctl_info(pci_info->edac_dev);
err_dev_put:
pci_dev_put(pci_info->dev);
err:
return ret;
}
static void amd8111_pci_remove(struct pci_dev *dev)
{
struct amd8111_pci_info *pci_info;
for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
if (pci_info->dev->device == dev->device)
break;
if (!pci_info->err_dev) /* should never happen */
return;
if (pci_info->edac_dev) {
edac_pci_del_device(pci_info->edac_dev->dev);
edac_pci_free_ctl_info(pci_info->edac_dev);
}
if (pci_info->exit)
pci_info->exit(pci_info);
pci_dev_put(pci_info->dev);
}
/* PCI Device ID talbe for general EDAC device */
static const struct pci_device_id amd8111_edac_dev_tbl[] = {
{
PCI_VEND_DEV(AMD, 8111_LPC),
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = 0,
.class_mask = 0,
.driver_data = LPC_BRIDGE,
},
{
0,
} /* table is NULL-terminated */
};
MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
static struct pci_driver amd8111_edac_dev_driver = {
.name = "AMD8111_EDAC_DEV",
.probe = amd8111_dev_probe,
.remove = amd8111_dev_remove,
.id_table = amd8111_edac_dev_tbl,
};
/* PCI Device ID table for EDAC PCI controller */
static const struct pci_device_id amd8111_edac_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, 8111_PCI),
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = 0,
.class_mask = 0,
.driver_data = PCI_BRIDGE,
},
{
0,
} /* table is NULL-terminated */
};
MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
static struct pci_driver amd8111_edac_pci_driver = {
.name = "AMD8111_EDAC_PCI",
.probe = amd8111_pci_probe,
.remove = amd8111_pci_remove,
.id_table = amd8111_edac_pci_tbl,
};
static int __init amd8111_edac_init(void)
{
int val;
printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n");
printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
/* Only POLL mode supported so far */
edac_op_state = EDAC_OPSTATE_POLL;
val = pci_register_driver(&amd8111_edac_dev_driver);
val |= pci_register_driver(&amd8111_edac_pci_driver);
return val;
}
static void __exit amd8111_edac_exit(void)
{
pci_unregister_driver(&amd8111_edac_pci_driver);
pci_unregister_driver(&amd8111_edac_dev_driver);
}
module_init(amd8111_edac_init);
module_exit(amd8111_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <[email protected]>");
MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
| linux-master | drivers/edac/amd8111_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel 7300 class Memory Controllers kernel module (Clarksboro)
*
* Copyright (c) 2010 by:
* Mauro Carvalho Chehab
*
* Red Hat Inc. https://www.redhat.com
*
* Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
* http://www.intel.com/Assets/PDF/datasheet/318082.pdf
*
* TODO: The chipset allow checking for PCI Express errors also. Currently,
* the driver covers only memory error errors
*
* This driver uses "csrows" EDAC attribute to represent DIMM slot#
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include "edac_module.h"
/*
* Alter this version for the I7300 module when modifications are made
*/
#define I7300_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i7300_edac"
#define i7300_printk(level, fmt, arg...) \
edac_printk(level, "i7300", fmt, ##arg)
#define i7300_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
/***********************************************
* i7300 Limit constants Structs and static vars
***********************************************/
/*
* Memory topology is organized as:
* Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
* Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
* Each channel can have to 8 DIMM sets (called as SLOTS)
* Slots should generally be filled in pairs
* Except on Single Channel mode of operation
* just slot 0/channel0 filled on this mode
* On normal operation mode, the two channels on a branch should be
* filled together for the same SLOT#
* When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
* channels on both branches should be filled
*/
/* Limits for i7300 */
#define MAX_SLOTS 8
#define MAX_BRANCHES 2
#define MAX_CH_PER_BRANCH 2
#define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
#define MAX_MIR 3
#define to_channel(ch, branch) ((((branch)) << 1) | (ch))
#define to_csrow(slot, ch, branch) \
(to_channel(ch, branch) | ((slot) << 2))
/* Device name and register DID (Device ID) */
struct i7300_dev_info {
const char *ctl_name; /* name for this device */
u16 fsb_mapping_errors; /* DID for the branchmap,control */
};
/* Table of devices attributes supported by this driver */
static const struct i7300_dev_info i7300_devs[] = {
{
.ctl_name = "I7300",
.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
},
};
struct i7300_dimm_info {
int megabytes; /* size, 0 means not present */
};
/* driver private data structure */
struct i7300_pvt {
struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
u16 tolm; /* top of low memory */
u64 ambase; /* AMB BAR */
u32 mc_settings; /* Report several settings */
u32 mc_settings_a;
u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
/* DIMM information matrix, allocating architecture maximums */
struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
/* Temporary buffer for use when preparing error messages */
char *tmp_prt_buffer;
};
/* FIXME: Why do we need to have this static? */
static struct edac_pci_ctl_info *i7300_pci;
/***************************************************
* i7300 Register definitions for memory enumeration
***************************************************/
/*
* Device 16,
* Function 0: System Address (not documented)
* Function 1: Memory Branch Map, Control, Errors Register
*/
/* OFFSETS for Function 0 */
#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
#define MAXCH 0x56 /* Max Channel Number */
#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
/* OFFSETS for Function 1 */
#define MC_SETTINGS 0x40
#define IS_MIRRORED(mc) ((mc) & (1 << 16))
#define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
#define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
#define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
#define MC_SETTINGS_A 0x58
#define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
#define TOLM 0x6C
#define MIR0 0x80
#define MIR1 0x84
#define MIR2 0x88
/*
* Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
* memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
* seems that we cannot use this information directly for the same usage.
* Each memory slot may have up to 2 AMB interfaces, one for income and another
* for outcome interface to the next slot.
* For now, the driver just stores the AMB present registers, but rely only at
* the MTR info to detect memory.
* Datasheet is also not clear about how to map each AMBPRESENT registers to
* one of the 4 available channels.
*/
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
static const u16 mtr_regs[MAX_SLOTS] = {
0x80, 0x84, 0x88, 0x8c,
0x82, 0x86, 0x8a, 0x8e
};
/*
* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
*/
#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
#define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
#define MTR_DRAM_BANKS_ADDR_BITS 2
#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
/************************************************
* i7300 Register definitions for error detection
************************************************/
/*
* Device 16.1: FBD Error Registers
*/
#define FERR_FAT_FBD 0x98
static const char *ferr_fat_fbd_name[] = {
[22] = "Non-Redundant Fast Reset Timeout",
[2] = ">Tmid Thermal event with intelligent throttling disabled",
[1] = "Memory or FBD configuration CRC read error",
[0] = "Memory Write error on non-redundant retry or "
"FBD configuration Write error on retry",
};
#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
#define FERR_NF_FBD 0xa0
static const char *ferr_nf_fbd_name[] = {
[24] = "DIMM-Spare Copy Completed",
[23] = "DIMM-Spare Copy Initiated",
[22] = "Redundant Fast Reset Timeout",
[21] = "Memory Write error on redundant retry",
[18] = "SPD protocol Error",
[17] = "FBD Northbound parity error on FBD Sync Status",
[16] = "Correctable Patrol Data ECC",
[15] = "Correctable Resilver- or Spare-Copy Data ECC",
[14] = "Correctable Mirrored Demand Data ECC",
[13] = "Correctable Non-Mirrored Demand Data ECC",
[11] = "Memory or FBD configuration CRC read error",
[10] = "FBD Configuration Write error on first attempt",
[9] = "Memory Write error on first attempt",
[8] = "Non-Aliased Uncorrectable Patrol Data ECC",
[7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
[5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
[4] = "Aliased Uncorrectable Patrol Data ECC",
[3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
[1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
[0] = "Uncorrectable Data ECC on Replay",
};
#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
(1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
(1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
(1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
(1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
(1 << 1) | (1 << 0))
#define EMASK_FBD 0xa8
#define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
(1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
(1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
(1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
(1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
(1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
(1 << 1) | (1 << 0))
/*
* Device 16.2: Global Error Registers
*/
#define FERR_GLOBAL_HI 0x48
static const char *ferr_global_hi_name[] = {
[3] = "FSB 3 Fatal Error",
[2] = "FSB 2 Fatal Error",
[1] = "FSB 1 Fatal Error",
[0] = "FSB 0 Fatal Error",
};
#define ferr_global_hi_is_fatal(errno) 1
#define FERR_GLOBAL_LO 0x40
static const char *ferr_global_lo_name[] = {
[31] = "Internal MCH Fatal Error",
[30] = "Intel QuickData Technology Device Fatal Error",
[29] = "FSB1 Fatal Error",
[28] = "FSB0 Fatal Error",
[27] = "FBD Channel 3 Fatal Error",
[26] = "FBD Channel 2 Fatal Error",
[25] = "FBD Channel 1 Fatal Error",
[24] = "FBD Channel 0 Fatal Error",
[23] = "PCI Express Device 7Fatal Error",
[22] = "PCI Express Device 6 Fatal Error",
[21] = "PCI Express Device 5 Fatal Error",
[20] = "PCI Express Device 4 Fatal Error",
[19] = "PCI Express Device 3 Fatal Error",
[18] = "PCI Express Device 2 Fatal Error",
[17] = "PCI Express Device 1 Fatal Error",
[16] = "ESI Fatal Error",
[15] = "Internal MCH Non-Fatal Error",
[14] = "Intel QuickData Technology Device Non Fatal Error",
[13] = "FSB1 Non-Fatal Error",
[12] = "FSB 0 Non-Fatal Error",
[11] = "FBD Channel 3 Non-Fatal Error",
[10] = "FBD Channel 2 Non-Fatal Error",
[9] = "FBD Channel 1 Non-Fatal Error",
[8] = "FBD Channel 0 Non-Fatal Error",
[7] = "PCI Express Device 7 Non-Fatal Error",
[6] = "PCI Express Device 6 Non-Fatal Error",
[5] = "PCI Express Device 5 Non-Fatal Error",
[4] = "PCI Express Device 4 Non-Fatal Error",
[3] = "PCI Express Device 3 Non-Fatal Error",
[2] = "PCI Express Device 2 Non-Fatal Error",
[1] = "PCI Express Device 1 Non-Fatal Error",
[0] = "ESI Non-Fatal Error",
};
#define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
#define NRECMEMA 0xbe
#define NRECMEMA_BANK(v) (((v) >> 12) & 7)
#define NRECMEMA_RANK(v) (((v) >> 8) & 15)
#define NRECMEMB 0xc0
#define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
#define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
#define NRECMEMB_RAS(v) ((v) & 0xffff)
#define REDMEMA 0xdc
#define REDMEMB 0x7c
#define RECMEMA 0xe0
#define RECMEMA_BANK(v) (((v) >> 12) & 7)
#define RECMEMA_RANK(v) (((v) >> 8) & 15)
#define RECMEMB 0xe4
#define RECMEMB_IS_WR(v) ((v) & (1 << 31))
#define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
#define RECMEMB_RAS(v) ((v) & 0xffff)
/********************************************
* i7300 Functions related to error detection
********************************************/
/**
* get_err_from_table() - Gets the error message from a table
* @table: table name (array of char *)
* @size: number of elements at the table
* @pos: position of the element to be returned
*
* This is a small routine that gets the pos-th element of a table. If the
* element doesn't exist (or it is empty), it returns "reserved".
* Instead of calling it directly, the better is to call via the macro
* GET_ERR_FROM_TABLE(), that automatically checks the table size via
* ARRAY_SIZE() macro
*/
static const char *get_err_from_table(const char *table[], int size, int pos)
{
if (unlikely(pos >= size))
return "Reserved";
if (unlikely(!table[pos]))
return "Reserved";
return table[pos];
}
#define GET_ERR_FROM_TABLE(table, pos) \
get_err_from_table(table, ARRAY_SIZE(table), pos)
/**
* i7300_process_error_global() - Retrieve the hardware error information from
* the hardware global error registers and
* sends it to dmesg
* @mci: struct mem_ctl_info pointer
*/
static void i7300_process_error_global(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
u32 errnum, error_reg;
unsigned long errors;
const char *specific;
bool is_fatal;
pvt = mci->pvt_info;
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_HI, &error_reg);
if (unlikely(error_reg)) {
errors = error_reg;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_global_hi_name));
specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
is_fatal = ferr_global_hi_is_fatal(errnum);
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_HI, error_reg);
goto error_global;
}
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_LO, &error_reg);
if (unlikely(error_reg)) {
errors = error_reg;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_global_lo_name));
specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
is_fatal = ferr_global_lo_is_fatal(errnum);
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_LO, error_reg);
goto error_global;
}
return;
error_global:
i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
is_fatal ? "Fatal" : "NOT fatal", specific);
}
/**
* i7300_process_fbd_error() - Retrieve the hardware error information from
* the FBD error registers and sends it via
* EDAC error API calls
* @mci: struct mem_ctl_info pointer
*/
static void i7300_process_fbd_error(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
u32 errnum, value, error_reg;
u16 val16;
unsigned branch, channel, bank, rank, cas, ras;
u32 syndrome;
unsigned long errors;
const char *specific;
bool is_wr;
pvt = mci->pvt_info;
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_FAT_FBD, &error_reg);
if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_fat_fbd_name));
specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
NRECMEMA, &val16);
bank = NRECMEMA_BANK(val16);
rank = NRECMEMA_RANK(val16);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
NRECMEMB, &value);
is_wr = NRECMEMB_IS_WR(value);
cas = NRECMEMB_CAS(value);
ras = NRECMEMB_RAS(value);
/* Clean the error register */
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_FAT_FBD, error_reg);
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
"Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
bank, ras, cas, errors, specific);
edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
branch, -1, rank,
is_wr ? "Write error" : "Read error",
pvt->tmp_prt_buffer);
}
/* read in the 1st NON-FATAL error register */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_NF_FBD, &error_reg);
if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
errors = error_reg & FERR_NF_FBD_ERR_MASK;
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_nf_fbd_name));
specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMA, &syndrome);
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
RECMEMA, &val16);
bank = RECMEMA_BANK(val16);
rank = RECMEMA_RANK(val16);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
RECMEMB, &value);
is_wr = RECMEMB_IS_WR(value);
cas = RECMEMB_CAS(value);
ras = RECMEMB_RAS(value);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMB, &value);
channel = (branch << 1);
/* Second channel ? */
channel += !!(value & BIT(17));
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_NF_FBD, error_reg);
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
"DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
bank, ras, cas, errors, specific);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
syndrome,
branch >> 1, channel % 2, rank,
is_wr ? "Write error" : "Read error",
pvt->tmp_prt_buffer);
}
return;
}
/**
* i7300_check_error() - Calls the error checking subroutines
* @mci: struct mem_ctl_info pointer
*/
static void i7300_check_error(struct mem_ctl_info *mci)
{
i7300_process_error_global(mci);
i7300_process_fbd_error(mci);
};
/**
* i7300_clear_error() - Clears the error registers
* @mci: struct mem_ctl_info pointer
*/
static void i7300_clear_error(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt = mci->pvt_info;
u32 value;
/*
* All error values are RWC - we need to read and write 1 to the
* bit that we want to cleanup
*/
/* Clear global error registers */
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_HI, &value);
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_HI, value);
pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_LO, &value);
pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
FERR_GLOBAL_LO, value);
/* Clear FBD error registers */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_FAT_FBD, &value);
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_FAT_FBD, value);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_NF_FBD, &value);
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
FERR_NF_FBD, value);
}
/**
* i7300_enable_error_reporting() - Enable the memory reporting logic at the
* hardware
* @mci: struct mem_ctl_info pointer
*/
static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt = mci->pvt_info;
u32 fbd_error_mask;
/* Read the FBD Error Mask Register */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
EMASK_FBD, &fbd_error_mask);
/* Enable with a '0' */
fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
EMASK_FBD, fbd_error_mask);
}
/************************************************
* i7300 Functions related to memory enumberation
************************************************/
/**
* decode_mtr() - Decodes the MTR descriptor, filling the edac structs
* @pvt: pointer to the private data struct used by i7300 driver
* @slot: DIMM slot (0 to 7)
* @ch: Channel number within the branch (0 or 1)
* @branch: Branch number (0 or 1)
* @dinfo: Pointer to DIMM info where dimm size is stored
* @dimm: Pointer to the struct dimm_info that corresponds to that element
*/
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
struct i7300_dimm_info *dinfo,
struct dimm_info *dimm)
{
int mtr, ans, addrBits, channel;
channel = to_channel(ch, branch);
mtr = pvt->mtr[slot][branch];
ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
slot, channel, ans ? "" : "NOT ");
/* Determine if there is a DIMM present in this DIMM slot */
if (!ans)
return 0;
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS;
/* Add thenumber of ROW bits */
addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
/* add the number of COLUMN bits */
addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
/* add the number of RANK bits */
addrBits += MTR_DIMM_RANKS(mtr);
addrBits += 6; /* add 64 bits per DIMM */
addrBits -= 20; /* divide by 2^^20 */
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
MTR_DIMM_RANKS(mtr) ? "double" : "single");
edac_dbg(2, "\t\tNUMROW: %s\n",
MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
"65,536 - 16 rows");
edac_dbg(2, "\t\tNUMCOL: %s\n",
MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
"reserved");
edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
/*
* The type of error detection actually depends of the
* mode of operation. When it is just one single memory chip, at
* socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
* In normal or mirrored mode, it uses Lockstep mode,
* with the possibility of using an extended algorithm for x8 memories
* See datasheet Sections 7.3.6 to 7.3.8
*/
dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
dimm->grain = 8;
dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
dimm->edac_mode = EDAC_SECDED;
edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->edac_mode = EDAC_S8ECD8ED;
else
dimm->edac_mode = EDAC_S4ECD4ED;
}
/* ask what device type on this row */
if (MTR_DRAM_WIDTH(mtr) == 8) {
edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
"enhanced" : "normal");
dimm->dtype = DEV_X8;
} else
dimm->dtype = DEV_X4;
return mtr;
}
/**
* print_dimm_size() - Prints dump of the memory organization
* @pvt: pointer to the private data struct used by i7300 driver
*
* Useful for debug. If debug is disabled, this routine do nothing
*/
static void print_dimm_size(struct i7300_pvt *pvt)
{
#ifdef CONFIG_EDAC_DEBUG
struct i7300_dimm_info *dinfo;
char *p;
int space, n;
int channel, slot;
space = PAGE_SIZE;
p = pvt->tmp_prt_buffer;
n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < MAX_CHANNELS; channel++) {
n = snprintf(p, space, "channel %d | ", channel);
p += n;
space -= n;
}
edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
n = snprintf(p, space, "-------------------------------"
"------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
for (slot = 0; slot < MAX_SLOTS; slot++) {
n = snprintf(p, space, "csrow/SLOT %d ", slot);
p += n;
space -= n;
for (channel = 0; channel < MAX_CHANNELS; channel++) {
dinfo = &pvt->dimm_info[slot][channel];
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
}
edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
}
n = snprintf(p, space, "-------------------------------"
"------------------------------");
p += n;
space -= n;
edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
#endif
}
/**
* i7300_init_csrows() - Initialize the 'csrows' table within
* the mci control structure with the
* addressing of memory.
* @mci: struct mem_ctl_info pointer
*/
static int i7300_init_csrows(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct i7300_dimm_info *dinfo;
int rc = -ENODEV;
int mtr;
int ch, branch, slot, channel, max_channel, max_branch;
struct dimm_info *dimm;
pvt = mci->pvt_info;
edac_dbg(2, "Memory Technology Registers:\n");
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
max_branch = 1;
max_channel = 1;
} else {
max_branch = MAX_BRANCHES;
max_channel = MAX_CH_PER_BRANCH;
}
/* Get the AMB present registers for the four channels */
for (branch = 0; branch < max_branch; branch++) {
/* Read and dump branch 0's MTRs */
channel = to_channel(0, branch);
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_0,
&pvt->ambpresent[channel]);
edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
channel, pvt->ambpresent[channel]);
if (max_channel == 1)
continue;
channel = to_channel(1, branch);
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_1,
&pvt->ambpresent[channel]);
edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
channel, pvt->ambpresent[channel]);
}
/* Get the set of MTR[0-7] regs by each branch */
for (slot = 0; slot < MAX_SLOTS; slot++) {
int where = mtr_regs[slot];
for (branch = 0; branch < max_branch; branch++) {
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
where,
&pvt->mtr[slot][branch]);
for (ch = 0; ch < max_channel; ch++) {
int channel = to_channel(ch, branch);
dimm = edac_get_dimm(mci, branch, ch, slot);
dinfo = &pvt->dimm_info[slot][channel];
mtr = decode_mtr(pvt, slot, ch, branch,
dinfo, dimm);
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
rc = 0;
}
}
}
return rc;
}
/**
* decode_mir() - Decodes Memory Interleave Register (MIR) info
* @mir_no: number of the MIR register to decode
* @mir: array with the MIR data cached on the driver
*/
static void decode_mir(int mir_no, u16 mir[MAX_MIR])
{
if (mir[mir_no] & 3)
edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
mir_no,
(mir[mir_no] >> 4) & 0xfff,
(mir[mir_no] & 1) ? "B0" : "",
(mir[mir_no] & 2) ? "B1" : "");
}
/**
* i7300_get_mc_regs() - Get the contents of the MC enumeration registers
* @mci: struct mem_ctl_info pointer
*
* Data read is cached internally for its usage when needed
*/
static int i7300_get_mc_regs(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
u32 actual_tolm;
int i, rc;
pvt = mci->pvt_info;
pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
(u32 *) &pvt->ambase);
edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
/* Get the Branch Map regs */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
/* Get memory controller settings */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
&pvt->mc_settings);
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
&pvt->mc_settings_a);
if (IS_SINGLE_MODE(pvt->mc_settings_a))
edac_dbg(0, "Memory controller operating on single mode\n");
else
edac_dbg(0, "Memory controller operating on %smirrored mode\n",
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
edac_dbg(0, "Retry is %s\n",
IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
&pvt->mir[0]);
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
&pvt->mir[1]);
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
&pvt->mir[2]);
/* Decode the MIR regs */
for (i = 0; i < MAX_MIR; i++)
decode_mir(i, pvt->mir);
rc = i7300_init_csrows(mci);
if (rc < 0)
return rc;
/* Go and determine the size of each DIMM and place in an
* orderly matrix */
print_dimm_size(pvt);
return 0;
}
/*************************************************
* i7300 Functions related to device probe/release
*************************************************/
/**
* i7300_put_devices() - Release the PCI devices
* @mci: struct mem_ctl_info pointer
*/
static void i7300_put_devices(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
int branch;
pvt = mci->pvt_info;
/* Decrement usage count for devices */
for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
}
/**
* i7300_get_devices() - Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
* @mci: struct mem_ctl_info pointer
*
* Access and prepare the several devices for usage:
* I7300 devices used by this driver:
* Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
* Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
* Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
*/
static int i7300_get_devices(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct pci_dev *pdev;
pvt = mci->pvt_info;
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
if (!pvt->pci_dev_16_1_fsb_addr_map)
pvt->pci_dev_16_1_fsb_addr_map =
pci_dev_get(pdev);
break;
case 2:
if (!pvt->pci_dev_16_2_fsb_err_regs)
pvt->pci_dev_16_2_fsb_err_regs =
pci_dev_get(pdev);
break;
}
}
if (!pvt->pci_dev_16_1_fsb_addr_map ||
!pvt->pci_dev_16_2_fsb_err_regs) {
/* At least one device was not found */
i7300_printk(KERN_ERR,
"'system address,Process Bus' device not found:"
"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
goto error;
}
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
pvt->pci_dev_16_0_fsb_ctlr->device);
edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_1_fsb_addr_map),
pvt->pci_dev_16_1_fsb_addr_map->vendor,
pvt->pci_dev_16_1_fsb_addr_map->device);
edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_2_fsb_err_regs),
pvt->pci_dev_16_2_fsb_err_regs->vendor,
pvt->pci_dev_16_2_fsb_err_regs->device);
pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
NULL);
if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
i7300_printk(KERN_ERR,
"MC: 'BRANCH 0' device not found:"
"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
goto error;
}
pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
NULL);
if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
i7300_printk(KERN_ERR,
"MC: 'BRANCH 1' device not found:"
"vendor 0x%x device 0x%x Func 0 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
goto error;
}
return 0;
error:
i7300_put_devices(mci);
return -ENODEV;
}
/**
* i7300_init_one() - Probe for one instance of the device
* @pdev: struct pci_dev pointer
* @id: struct pci_device_id pointer - currently unused
*/
static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[3];
struct i7300_pvt *pvt;
int rc;
/* wake up device */
rc = pci_enable_device(pdev);
if (rc == -EIO)
return rc;
edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
/* allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_BRANCH;
layers[0].size = MAX_BRANCHES;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = MAX_CH_PER_BRANCH;
layers[1].is_virt_csrow = true;
layers[2].type = EDAC_MC_LAYER_SLOT;
layers[2].size = MAX_SLOTS;
layers[2].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pvt->tmp_prt_buffer) {
edac_mc_free(mci);
return -ENOMEM;
}
/* 'get' the pci devices we want to reserve for our use */
if (i7300_get_devices(mci))
goto fail0;
mci->mc_idx = 0;
mci->mtype_cap = MEM_FLAG_FB_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7300_edac.c";
mci->ctl_name = i7300_devs[0].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
/* Set the function pointer to an actual operation function */
mci->edac_check = i7300_check_error;
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i7300_get_mc_regs(mci)) {
edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
edac_dbg(1, "MC: Enable error reporting now\n");
i7300_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
goto fail1;
}
i7300_clear_error(mci);
/* allocating generic PCI control info */
i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i7300_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
return 0;
/* Error exit unwinding stack */
fail1:
i7300_put_devices(mci);
fail0:
kfree(pvt->tmp_prt_buffer);
edac_mc_free(mci);
return -ENODEV;
}
/**
* i7300_remove_one() - Remove the driver
* @pdev: struct pci_dev pointer
*/
static void i7300_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
char *tmp;
edac_dbg(0, "\n");
if (i7300_pci)
edac_pci_release_generic_ctl(i7300_pci);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
/* retrieve references to resources, and free those resources */
i7300_put_devices(mci);
kfree(tmp);
edac_mc_free(mci);
}
/*
* pci_device_id: table for which devices we are looking for
*
* Has only 8086:360c PCI ID
*/
static const struct pci_device_id i7300_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
/*
* i7300_driver: pci_driver structure for this module
*/
static struct pci_driver i7300_driver = {
.name = "i7300_edac",
.probe = i7300_init_one,
.remove = i7300_remove_one,
.id_table = i7300_pci_tbl,
};
/**
* i7300_init() - Registers the driver
*/
static int __init i7300_init(void)
{
int pci_rc;
edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i7300_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
/**
* i7300_exit() - Unregisters the driver
*/
static void __exit i7300_exit(void)
{
edac_dbg(2, "\n");
pci_unregister_driver(&i7300_driver);
}
module_init(i7300_init);
module_exit(i7300_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
I7300_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i7300_edac.c |
/*
* Radisys 82600 Embedded chipset Memory Controller kernel module
* (C) 2005 EADS Astrium
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Tim Small <[email protected]>, based on work by Thayne
* Harbaugh, Dan Hollis <goemon at anime dot net> and others.
*
* $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
*
* Written with reference to 82600 High Integration Dual PCI System
* Controller Data Book:
* www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
* references to this document given in []
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "r82600_edac"
#define r82600_printk(level, fmt, arg...) \
edac_printk(level, "r82600", fmt, ##arg)
#define r82600_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
/* Radisys say "The 82600 integrates a main memory SDRAM controller that
* supports up to four banks of memory. The four banks can support a mix of
* sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
* each of which can be any size from 16MB to 512MB. Both registered (control
* signals buffered) and unbuffered DIMM types are supported. Mixing of
* registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
* is not allowed. The 82600 SDRAM interface operates at the same frequency as
* the CPU bus, 66MHz, 100MHz or 133MHz."
*/
#define R82600_NR_CSROWS 4
#define R82600_NR_CHANS 1
#define R82600_NR_DIMMS 4
#define R82600_BRIDGE_ID 0x8200
/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
* all bits are R/W
*
* 7 SDRAM ISA Hole Enable
* 6 Flash Page Mode Enable
* 5 ECC Enable: 1=ECC 0=noECC
* 4 DRAM DIMM Type: 1=
* 3 BIOS Alias Disable
* 2 SDRAM BIOS Flash Write Enable
* 1:0 SDRAM Refresh Rate: 00=Disabled
* 01=7.8usec (256Mbit SDRAMs)
* 10=15.6us 11=125usec
*/
#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
* More SDRAM related control bits
* all bits are R/W
*
* 15:8 Reserved.
*
* 7:5 Special SDRAM Mode Select
*
* 4 Force ECC
*
* 1=Drive ECC bits to 0 during
* write cycles (i.e. ECC test mode)
*
* 0=Normal ECC functioning
*
* 3 Enhanced Paging Enable
*
* 2 CAS# Latency 0=3clks 1=2clks
*
* 1 RAS# to CAS# Delay 0=3 1=2
*
* 0 RAS# Precharge 0=3 1=2
*/
#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
*
* 31 Disable Hardware Scrubbing (RW)
* 0=Scrub on corrected read
* 1=Don't scrub on corrected read
*
* 30:12 Error Address Pointer (RO)
* Upper 19 bits of error address
*
* 11:4 Syndrome Bits (RO)
*
* 3 BSERR# on multibit error (RW)
* 1=enable 0=disable
*
* 2 NMI on Single Bit Eror (RW)
* 1=NMI triggered by SBE n.b. other
* prerequeists
* 0=NMI not triggered
*
* 1 MBE (R/WC)
* read 1=MBE at EAP (see above)
* read 0=no MBE, or SBE occurred first
* write 1=Clear MBE status (must also
* clear SBE)
* write 0=NOP
*
* 1 SBE (R/WC)
* read 1=SBE at EAP (see above)
* read 0=no SBE, or MBE occurred first
* write 1=Clear SBE status (must also
* clear MBE)
* write 0=NOP
*/
#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address
* Registers
*
* 7:0 Address lines 30:24 - upper limit of
* each row [p57]
*/
struct r82600_error_info {
u32 eapr;
};
static bool disable_hardware_scrub;
static struct edac_pci_ctl_info *r82600_pci;
static void r82600_get_error_info(struct mem_ctl_info *mci,
struct r82600_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
if (info->eapr & BIT(0))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(pdev, R82600_EAP,
((u32) BIT(0) & (u32) BIT(1)),
((u32) BIT(0) & (u32) BIT(1)));
if (info->eapr & BIT(1))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(pdev, R82600_EAP,
((u32) BIT(0) & (u32) BIT(1)),
((u32) BIT(0) & (u32) BIT(1)));
}
static int r82600_process_error_info(struct mem_ctl_info *mci,
struct r82600_error_info *info,
int handle_errors)
{
int error_found;
u32 eapaddr, page;
u32 syndrome;
error_found = 0;
/* bits 30:12 store the upper 19 bits of the 32 bit error address */
eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
/* Syndrome in bits 11:4 [p.62] */
syndrome = (info->eapr >> 4) & 0xFF;
/* the R82600 reports at less than page *
* granularity (upper 19 bits only) */
page = eapaddr >> PAGE_SHIFT;
if (info->eapr & BIT(0)) { /* CE? */
error_found = 1;
if (handle_errors)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, 0, syndrome,
edac_mc_find_csrow_by_page(mci, page),
0, -1,
mci->ctl_name, "");
}
if (info->eapr & BIT(1)) { /* UE? */
error_found = 1;
if (handle_errors)
/* 82600 doesn't give enough info */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, 0, 0,
edac_mc_find_csrow_by_page(mci, page),
0, -1,
mci->ctl_name, "");
}
return error_found;
}
static void r82600_check(struct mem_ctl_info *mci)
{
struct r82600_error_info info;
r82600_get_error_info(mci, &info);
r82600_process_error_info(mci, &info, 1);
}
static inline int ecc_enabled(u8 dramcr)
{
return dramcr & BIT(5);
}
static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
int index;
u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
u32 reg_sdram, ecc_on, row_base;
ecc_on = ecc_enabled(dramcr);
reg_sdram = dramcr & BIT(4);
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n",
index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
dimm->grain = 1 << 14;
dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
dimm->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
u8 dramcr;
u32 eapr;
u32 scrub_disabled;
u32 sdram_refresh_rate;
struct r82600_error_info discard;
edac_dbg(0, "\n");
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr);
scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate);
edac_dbg(2, "DRAMC register = %#0x\n", dramcr);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = R82600_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = R82600_NR_CHANS;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
/* FIXME try to work out if the chip leads have been used for COM2
* instead on this board? [MA6?] MAYBE:
*/
/* On the R82600, the pins for memory bits 72:65 - i.e. the *
* EC bits are shared with the pins for COM2 (!), so if COM2 *
* is enabled, we assume COM2 is wired up, and thus no EDAC *
* is possible. */
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
if (ecc_enabled(dramcr)) {
if (scrub_disabled)
edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n",
mci, eapr);
} else
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = "R82600";
mci->dev_name = pci_name(pdev);
mci->edac_check = r82600_check;
mci->ctl_page_to_phys = NULL;
r82600_init_csrows(mci, pdev, dramcr);
r82600_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
if (disable_hardware_scrub) {
edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n");
pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
}
/* allocating generic PCI control info */
r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!r82600_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
edac_dbg(3, "success\n");
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int r82600_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
}
static void r82600_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (r82600_pci)
edac_pci_release_generic_ctl(r82600_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
edac_mc_free(mci);
}
static const struct pci_device_id r82600_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
static struct pci_driver r82600_driver = {
.name = EDAC_MOD_STR,
.probe = r82600_init_one,
.remove = r82600_remove_one,
.id_table = r82600_pci_tbl,
};
static int __init r82600_init(void)
{
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
return pci_register_driver(&r82600_driver);
}
static void __exit r82600_exit(void)
{
pci_unregister_driver(&r82600_driver);
}
module_init(r82600_init);
module_exit(r82600_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <[email protected]> - WPAD Ltd. on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);
MODULE_PARM_DESC(disable_hardware_scrub,
"If set, disable the chipset's automatic scrub for CEs");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/r82600_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EDAC driver for DMC-520 memory controller.
*
* The driver supports 10 interrupt lines,
* though only dram_ecc_errc and dram_ecc_errd are currently handled.
*
* Authors: Rui Zhao <[email protected]>
* Lei Wang <[email protected]>
* Shiping Ji <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "edac_mc.h"
/* DMC-520 registers */
#define REG_OFFSET_FEATURE_CONFIG 0x130
#define REG_OFFSET_ECC_ERRC_COUNT_31_00 0x158
#define REG_OFFSET_ECC_ERRC_COUNT_63_32 0x15C
#define REG_OFFSET_ECC_ERRD_COUNT_31_00 0x160
#define REG_OFFSET_ECC_ERRD_COUNT_63_32 0x164
#define REG_OFFSET_INTERRUPT_CONTROL 0x500
#define REG_OFFSET_INTERRUPT_CLR 0x508
#define REG_OFFSET_INTERRUPT_STATUS 0x510
#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 0x528
#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 0x52C
#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00 0x530
#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32 0x534
#define REG_OFFSET_ADDRESS_CONTROL_NOW 0x1010
#define REG_OFFSET_MEMORY_TYPE_NOW 0x1128
#define REG_OFFSET_SCRUB_CONTROL0_NOW 0x1170
#define REG_OFFSET_FORMAT_CONTROL 0x18
/* DMC-520 types, masks and bitfields */
#define RAM_ECC_INT_CE_BIT BIT(0)
#define RAM_ECC_INT_UE_BIT BIT(1)
#define DRAM_ECC_INT_CE_BIT BIT(2)
#define DRAM_ECC_INT_UE_BIT BIT(3)
#define FAILED_ACCESS_INT_BIT BIT(4)
#define FAILED_PROG_INT_BIT BIT(5)
#define LINK_ERR_INT_BIT BIT(6)
#define TEMPERATURE_EVENT_INT_BIT BIT(7)
#define ARCH_FSM_INT_BIT BIT(8)
#define PHY_REQUEST_INT_BIT BIT(9)
#define MEMORY_WIDTH_MASK GENMASK(1, 0)
#define SCRUB_TRIGGER0_NEXT_MASK GENMASK(1, 0)
#define REG_FIELD_DRAM_ECC_ENABLED GENMASK(1, 0)
#define REG_FIELD_MEMORY_TYPE GENMASK(2, 0)
#define REG_FIELD_DEVICE_WIDTH GENMASK(9, 8)
#define REG_FIELD_ADDRESS_CONTROL_COL GENMASK(2, 0)
#define REG_FIELD_ADDRESS_CONTROL_ROW GENMASK(10, 8)
#define REG_FIELD_ADDRESS_CONTROL_BANK GENMASK(18, 16)
#define REG_FIELD_ADDRESS_CONTROL_RANK GENMASK(25, 24)
#define REG_FIELD_ERR_INFO_LOW_VALID BIT(0)
#define REG_FIELD_ERR_INFO_LOW_COL GENMASK(10, 1)
#define REG_FIELD_ERR_INFO_LOW_ROW GENMASK(28, 11)
#define REG_FIELD_ERR_INFO_LOW_RANK GENMASK(31, 29)
#define REG_FIELD_ERR_INFO_HIGH_BANK GENMASK(3, 0)
#define REG_FIELD_ERR_INFO_HIGH_VALID BIT(31)
#define DRAM_ADDRESS_CONTROL_MIN_COL_BITS 8
#define DRAM_ADDRESS_CONTROL_MIN_ROW_BITS 11
#define DMC520_SCRUB_TRIGGER_ERR_DETECT 2
#define DMC520_SCRUB_TRIGGER_IDLE 3
/* Driver settings */
/*
* The max-length message would be: "rank:7 bank:15 row:262143 col:1023".
* Max length is 34. Using a 40-size buffer is enough.
*/
#define DMC520_MSG_BUF_SIZE 40
#define EDAC_MOD_NAME "dmc520-edac"
#define EDAC_CTL_NAME "dmc520"
/* the data bus width for the attached memory chips. */
enum dmc520_mem_width {
MEM_WIDTH_X32 = 2,
MEM_WIDTH_X64 = 3
};
/* memory type */
enum dmc520_mem_type {
MEM_TYPE_DDR3 = 1,
MEM_TYPE_DDR4 = 2
};
/* memory device width */
enum dmc520_dev_width {
DEV_WIDTH_X4 = 0,
DEV_WIDTH_X8 = 1,
DEV_WIDTH_X16 = 2
};
struct ecc_error_info {
u32 col;
u32 row;
u32 bank;
u32 rank;
};
/* The interrupt config */
struct dmc520_irq_config {
char *name;
int mask;
};
/* The interrupt mappings */
static struct dmc520_irq_config dmc520_irq_configs[] = {
{
.name = "ram_ecc_errc",
.mask = RAM_ECC_INT_CE_BIT
},
{
.name = "ram_ecc_errd",
.mask = RAM_ECC_INT_UE_BIT
},
{
.name = "dram_ecc_errc",
.mask = DRAM_ECC_INT_CE_BIT
},
{
.name = "dram_ecc_errd",
.mask = DRAM_ECC_INT_UE_BIT
},
{
.name = "failed_access",
.mask = FAILED_ACCESS_INT_BIT
},
{
.name = "failed_prog",
.mask = FAILED_PROG_INT_BIT
},
{
.name = "link_err",
.mask = LINK_ERR_INT_BIT
},
{
.name = "temperature_event",
.mask = TEMPERATURE_EVENT_INT_BIT
},
{
.name = "arch_fsm",
.mask = ARCH_FSM_INT_BIT
},
{
.name = "phy_request",
.mask = PHY_REQUEST_INT_BIT
}
};
#define NUMBER_OF_IRQS ARRAY_SIZE(dmc520_irq_configs)
/*
* The EDAC driver private data.
* error_lock is to protect concurrent writes to the mci->error_desc through
* edac_mc_handle_error().
*/
struct dmc520_edac {
void __iomem *reg_base;
spinlock_t error_lock;
u32 mem_width_in_bytes;
int irqs[NUMBER_OF_IRQS];
int masks[NUMBER_OF_IRQS];
};
static int dmc520_mc_idx;
static u32 dmc520_read_reg(struct dmc520_edac *pvt, u32 offset)
{
return readl(pvt->reg_base + offset);
}
static void dmc520_write_reg(struct dmc520_edac *pvt, u32 val, u32 offset)
{
writel(val, pvt->reg_base + offset);
}
static u32 dmc520_calc_dram_ecc_error(u32 value)
{
u32 total = 0;
/* Each rank's error counter takes one byte. */
while (value > 0) {
total += (value & 0xFF);
value >>= 8;
}
return total;
}
static u32 dmc520_get_dram_ecc_error_count(struct dmc520_edac *pvt,
bool is_ce)
{
u32 reg_offset_low, reg_offset_high;
u32 err_low, err_high;
u32 err_count;
reg_offset_low = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_31_00 :
REG_OFFSET_ECC_ERRD_COUNT_31_00;
reg_offset_high = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_63_32 :
REG_OFFSET_ECC_ERRD_COUNT_63_32;
err_low = dmc520_read_reg(pvt, reg_offset_low);
err_high = dmc520_read_reg(pvt, reg_offset_high);
/* Reset error counters */
dmc520_write_reg(pvt, 0, reg_offset_low);
dmc520_write_reg(pvt, 0, reg_offset_high);
err_count = dmc520_calc_dram_ecc_error(err_low) +
dmc520_calc_dram_ecc_error(err_high);
return err_count;
}
static void dmc520_get_dram_ecc_error_info(struct dmc520_edac *pvt,
bool is_ce,
struct ecc_error_info *info)
{
u32 reg_offset_low, reg_offset_high;
u32 reg_val_low, reg_val_high;
bool valid;
reg_offset_low = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 :
REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00;
reg_offset_high = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 :
REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32;
reg_val_low = dmc520_read_reg(pvt, reg_offset_low);
reg_val_high = dmc520_read_reg(pvt, reg_offset_high);
valid = (FIELD_GET(REG_FIELD_ERR_INFO_LOW_VALID, reg_val_low) != 0) &&
(FIELD_GET(REG_FIELD_ERR_INFO_HIGH_VALID, reg_val_high) != 0);
if (valid) {
info->col = FIELD_GET(REG_FIELD_ERR_INFO_LOW_COL, reg_val_low);
info->row = FIELD_GET(REG_FIELD_ERR_INFO_LOW_ROW, reg_val_low);
info->rank = FIELD_GET(REG_FIELD_ERR_INFO_LOW_RANK, reg_val_low);
info->bank = FIELD_GET(REG_FIELD_ERR_INFO_HIGH_BANK, reg_val_high);
} else {
memset(info, 0, sizeof(*info));
}
}
static bool dmc520_is_ecc_enabled(void __iomem *reg_base)
{
u32 reg_val = readl(reg_base + REG_OFFSET_FEATURE_CONFIG);
return FIELD_GET(REG_FIELD_DRAM_ECC_ENABLED, reg_val);
}
static enum scrub_type dmc520_get_scrub_type(struct dmc520_edac *pvt)
{
enum scrub_type type = SCRUB_NONE;
u32 reg_val, scrub_cfg;
reg_val = dmc520_read_reg(pvt, REG_OFFSET_SCRUB_CONTROL0_NOW);
scrub_cfg = FIELD_GET(SCRUB_TRIGGER0_NEXT_MASK, reg_val);
if (scrub_cfg == DMC520_SCRUB_TRIGGER_ERR_DETECT ||
scrub_cfg == DMC520_SCRUB_TRIGGER_IDLE)
type = SCRUB_HW_PROG;
return type;
}
/* Get the memory data bus width, in number of bytes. */
static u32 dmc520_get_memory_width(struct dmc520_edac *pvt)
{
enum dmc520_mem_width mem_width_field;
u32 mem_width_in_bytes = 0;
u32 reg_val;
reg_val = dmc520_read_reg(pvt, REG_OFFSET_FORMAT_CONTROL);
mem_width_field = FIELD_GET(MEMORY_WIDTH_MASK, reg_val);
if (mem_width_field == MEM_WIDTH_X32)
mem_width_in_bytes = 4;
else if (mem_width_field == MEM_WIDTH_X64)
mem_width_in_bytes = 8;
return mem_width_in_bytes;
}
static enum mem_type dmc520_get_mtype(struct dmc520_edac *pvt)
{
enum mem_type mt = MEM_UNKNOWN;
enum dmc520_mem_type type;
u32 reg_val;
reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
type = FIELD_GET(REG_FIELD_MEMORY_TYPE, reg_val);
switch (type) {
case MEM_TYPE_DDR3:
mt = MEM_DDR3;
break;
case MEM_TYPE_DDR4:
mt = MEM_DDR4;
break;
}
return mt;
}
static enum dev_type dmc520_get_dtype(struct dmc520_edac *pvt)
{
enum dmc520_dev_width device_width;
enum dev_type dt = DEV_UNKNOWN;
u32 reg_val;
reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
device_width = FIELD_GET(REG_FIELD_DEVICE_WIDTH, reg_val);
switch (device_width) {
case DEV_WIDTH_X4:
dt = DEV_X4;
break;
case DEV_WIDTH_X8:
dt = DEV_X8;
break;
case DEV_WIDTH_X16:
dt = DEV_X16;
break;
}
return dt;
}
static u32 dmc520_get_rank_count(void __iomem *reg_base)
{
u32 reg_val, rank_bits;
reg_val = readl(reg_base + REG_OFFSET_ADDRESS_CONTROL_NOW);
rank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_RANK, reg_val);
return BIT(rank_bits);
}
static u64 dmc520_get_rank_size(struct dmc520_edac *pvt)
{
u32 reg_val, col_bits, row_bits, bank_bits;
reg_val = dmc520_read_reg(pvt, REG_OFFSET_ADDRESS_CONTROL_NOW);
col_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_COL, reg_val) +
DRAM_ADDRESS_CONTROL_MIN_COL_BITS;
row_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_ROW, reg_val) +
DRAM_ADDRESS_CONTROL_MIN_ROW_BITS;
bank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_BANK, reg_val);
return (u64)pvt->mem_width_in_bytes << (col_bits + row_bits + bank_bits);
}
static void dmc520_handle_dram_ecc_errors(struct mem_ctl_info *mci,
bool is_ce)
{
struct dmc520_edac *pvt = mci->pvt_info;
char message[DMC520_MSG_BUF_SIZE];
struct ecc_error_info info;
u32 cnt;
dmc520_get_dram_ecc_error_info(pvt, is_ce, &info);
cnt = dmc520_get_dram_ecc_error_count(pvt, is_ce);
if (!cnt)
return;
snprintf(message, ARRAY_SIZE(message),
"rank:%d bank:%d row:%d col:%d",
info.rank, info.bank,
info.row, info.col);
spin_lock(&pvt->error_lock);
edac_mc_handle_error((is_ce ? HW_EVENT_ERR_CORRECTED :
HW_EVENT_ERR_UNCORRECTED),
mci, cnt, 0, 0, 0, info.rank, -1, -1,
message, "");
spin_unlock(&pvt->error_lock);
}
static irqreturn_t dmc520_edac_dram_ecc_isr(int irq, struct mem_ctl_info *mci,
bool is_ce)
{
struct dmc520_edac *pvt = mci->pvt_info;
u32 i_mask;
i_mask = is_ce ? DRAM_ECC_INT_CE_BIT : DRAM_ECC_INT_UE_BIT;
dmc520_handle_dram_ecc_errors(mci, is_ce);
dmc520_write_reg(pvt, i_mask, REG_OFFSET_INTERRUPT_CLR);
return IRQ_HANDLED;
}
static irqreturn_t dmc520_edac_dram_all_isr(int irq, struct mem_ctl_info *mci,
u32 irq_mask)
{
struct dmc520_edac *pvt = mci->pvt_info;
irqreturn_t irq_ret = IRQ_NONE;
u32 status;
status = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_STATUS);
if ((irq_mask & DRAM_ECC_INT_CE_BIT) &&
(status & DRAM_ECC_INT_CE_BIT))
irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, true);
if ((irq_mask & DRAM_ECC_INT_UE_BIT) &&
(status & DRAM_ECC_INT_UE_BIT))
irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, false);
return irq_ret;
}
static irqreturn_t dmc520_isr(int irq, void *data)
{
struct mem_ctl_info *mci = data;
struct dmc520_edac *pvt = mci->pvt_info;
u32 mask = 0;
int idx;
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
if (pvt->irqs[idx] == irq) {
mask = pvt->masks[idx];
break;
}
}
return dmc520_edac_dram_all_isr(irq, mci, mask);
}
static void dmc520_init_csrow(struct mem_ctl_info *mci)
{
struct dmc520_edac *pvt = mci->pvt_info;
struct csrow_info *csi;
struct dimm_info *dimm;
u32 pages_per_rank;
enum dev_type dt;
enum mem_type mt;
int row, ch;
u64 rs;
dt = dmc520_get_dtype(pvt);
mt = dmc520_get_mtype(pvt);
rs = dmc520_get_rank_size(pvt);
pages_per_rank = rs >> PAGE_SHIFT;
for (row = 0; row < mci->nr_csrows; row++) {
csi = mci->csrows[row];
for (ch = 0; ch < csi->nr_channels; ch++) {
dimm = csi->channels[ch]->dimm;
dimm->grain = pvt->mem_width_in_bytes;
dimm->dtype = dt;
dimm->mtype = mt;
dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = pages_per_rank / csi->nr_channels;
}
}
}
static int dmc520_edac_probe(struct platform_device *pdev)
{
bool registered[NUMBER_OF_IRQS] = { false };
int irqs[NUMBER_OF_IRQS] = { -ENXIO };
int masks[NUMBER_OF_IRQS] = { 0 };
struct edac_mc_layer layers[1];
struct dmc520_edac *pvt = NULL;
struct mem_ctl_info *mci;
void __iomem *reg_base;
u32 irq_mask_all = 0;
struct resource *res;
struct device *dev;
int ret, idx, irq;
u32 reg_val;
/* Parse the device node */
dev = &pdev->dev;
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name);
irqs[idx] = irq;
masks[idx] = dmc520_irq_configs[idx].mask;
if (irq >= 0) {
irq_mask_all |= dmc520_irq_configs[idx].mask;
edac_dbg(0, "Discovered %s, irq: %d.\n", dmc520_irq_configs[idx].name, irq);
}
}
if (!irq_mask_all) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"At least one valid interrupt line is expected.\n");
return -EINVAL;
}
/* Initialize dmc520 edac */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
if (!dmc520_is_ecc_enabled(reg_base))
return -ENXIO;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = dmc520_get_rank_count(reg_base);
layers[0].is_virt_csrow = true;
mci = edac_mc_alloc(dmc520_mc_idx++, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"Failed to allocate memory for mc instance\n");
ret = -ENOMEM;
goto err;
}
pvt = mci->pvt_info;
pvt->reg_base = reg_base;
spin_lock_init(&pvt->error_lock);
memcpy(pvt->irqs, irqs, sizeof(irqs));
memcpy(pvt->masks, masks, sizeof(masks));
platform_set_drvdata(pdev, mci);
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = dmc520_get_scrub_type(pvt);
mci->ctl_name = EDAC_CTL_NAME;
mci->dev_name = dev_name(mci->pdev);
mci->mod_name = EDAC_MOD_NAME;
edac_op_state = EDAC_OPSTATE_INT;
pvt->mem_width_in_bytes = dmc520_get_memory_width(pvt);
dmc520_init_csrow(mci);
/* Clear interrupts, not affecting other unrelated interrupts */
reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
REG_OFFSET_INTERRUPT_CONTROL);
dmc520_write_reg(pvt, irq_mask_all, REG_OFFSET_INTERRUPT_CLR);
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
irq = irqs[idx];
if (irq >= 0) {
ret = devm_request_irq(&pdev->dev, irq,
dmc520_isr, IRQF_SHARED,
dev_name(&pdev->dev), mci);
if (ret < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"Failed to request irq %d\n", irq);
goto err;
}
registered[idx] = true;
}
}
/* Reset DRAM CE/UE counters */
if (irq_mask_all & DRAM_ECC_INT_CE_BIT)
dmc520_get_dram_ecc_error_count(pvt, true);
if (irq_mask_all & DRAM_ECC_INT_UE_BIT)
dmc520_get_dram_ecc_error_count(pvt, false);
ret = edac_mc_add_mc(mci);
if (ret) {
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"Failed to register with EDAC core\n");
goto err;
}
/* Enable interrupts, not affecting other unrelated interrupts */
dmc520_write_reg(pvt, reg_val | irq_mask_all,
REG_OFFSET_INTERRUPT_CONTROL);
return 0;
err:
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
if (registered[idx])
devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
}
if (mci)
edac_mc_free(mci);
return ret;
}
static int dmc520_edac_remove(struct platform_device *pdev)
{
u32 reg_val, idx, irq_mask_all = 0;
struct mem_ctl_info *mci;
struct dmc520_edac *pvt;
mci = platform_get_drvdata(pdev);
pvt = mci->pvt_info;
/* Disable interrupts */
reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
REG_OFFSET_INTERRUPT_CONTROL);
/* free irq's */
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
if (pvt->irqs[idx] >= 0) {
irq_mask_all |= pvt->masks[idx];
devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
}
}
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static const struct of_device_id dmc520_edac_driver_id[] = {
{ .compatible = "arm,dmc-520", },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, dmc520_edac_driver_id);
static struct platform_driver dmc520_edac_driver = {
.driver = {
.name = "dmc520",
.of_match_table = dmc520_edac_driver_id,
},
.probe = dmc520_edac_probe,
.remove = dmc520_edac_remove
};
module_platform_driver(dmc520_edac_driver);
MODULE_AUTHOR("Rui Zhao <[email protected]>");
MODULE_AUTHOR("Lei Wang <[email protected]>");
MODULE_AUTHOR("Shiping Ji <[email protected]>");
MODULE_DESCRIPTION("DMC-520 ECC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/edac/dmc520_edac.c |
/*
* AMD 76x Memory Controller kernel module
* (C) 2003 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Thayne Harbaugh
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
* $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "amd76x_edac"
#define amd76x_printk(level, fmt, arg...) \
edac_printk(level, "amd76x", fmt, ##arg)
#define amd76x_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
#define AMD76X_NR_DIMMS 4
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
*
* 31:16 reserved
* 15:14 SERR enabled: x1=ue 1x=ce
* 13 reserved
* 12 diag: disabled, enabled
* 11:10 mode: dis, EC, ECC, ECC+scrub
* 9:8 status: x1=ue 1x=ce
* 7:4 UE cs row
* 3:0 CE cs row
*/
#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
*
* 31:26 clock disable 5 - 0
* 25 SDRAM init
* 24 reserved
* 23 mode register service
* 22:21 suspend to RAM
* 20 burst refresh enable
* 19 refresh disable
* 18 reserved
* 17:16 cycles-per-refresh
* 15:8 reserved
* 7:0 x4 mode enable 7 - 0
*/
#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
*
* 31:23 chip-select base
* 22:16 reserved
* 15:7 chip-select mask
* 6:3 reserved
* 2:1 address mode
* 0 chip-select enable
*/
struct amd76x_error_info {
u32 ecc_mode_status;
};
enum amd76x_chips {
AMD761 = 0,
AMD762
};
struct amd76x_dev_info {
const char *ctl_name;
};
static const struct amd76x_dev_info amd76x_devs[] = {
[AMD761] = {
.ctl_name = "AMD761"},
[AMD762] = {
.ctl_name = "AMD762"},
};
static struct edac_pci_ctl_info *amd76x_pci;
/**
* amd76x_get_error_info - fetch error information
* @mci: Memory controller
* @info: Info to fill in
*
* Fetch and store the AMD76x ECC status. Clear pending status
* on the chip so that further errors will be reported
*/
static void amd76x_get_error_info(struct mem_ctl_info *mci,
struct amd76x_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
&info->ecc_mode_status);
if (info->ecc_mode_status & BIT(8))
pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
(u32) BIT(8), (u32) BIT(8));
if (info->ecc_mode_status & BIT(9))
pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
(u32) BIT(9), (u32) BIT(9));
}
/**
* amd76x_process_error_info - Error check
* @mci: Memory controller
* @info: Previously fetched information from chip
* @handle_errors: 1 if we should do recovery
*
* Process the chip state and decide if an error has occurred.
* A return of 1 indicates an error. Also if handle_errors is true
* then attempt to handle and clean up after the error
*/
static int amd76x_process_error_info(struct mem_ctl_info *mci,
struct amd76x_error_info *info,
int handle_errors)
{
int error_found;
u32 row;
error_found = 0;
/*
* Check for an uncorrectable error
*/
if (info->ecc_mode_status & BIT(8)) {
error_found = 1;
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
mci->csrows[row]->first_page, 0, 0,
row, 0, -1,
mci->ctl_name, "");
}
}
/*
* Check for a correctable error
*/
if (info->ecc_mode_status & BIT(9)) {
error_found = 1;
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
mci->csrows[row]->first_page, 0, 0,
row, 0, -1,
mci->ctl_name, "");
}
}
return error_found;
}
/**
* amd76x_check - Poll the controller
* @mci: Memory controller
*
* Called by the poll handlers this function reads the status
* from the controller and checks for errors.
*/
static void amd76x_check(struct mem_ctl_info *mci)
{
struct amd76x_error_info info;
amd76x_get_error_info(mci, &info);
amd76x_process_error_info(mci, &info, 1);
}
static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
if (!(mba & BIT(0)))
continue;
mba_base = mba & 0xff800000UL;
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
dimm->grain = dimm->nr_pages << PAGE_SHIFT;
dimm->mtype = MEM_RDDR;
dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
dimm->edac_mode = edac_mode;
}
}
/**
* amd76x_probe1 - Perform set up for detected device
* @pdev; PCI device detected
* @dev_idx: Device type index
*
* We have found an AMD76x and now need to set up the memory
* controller status reporting. We configure and set up the
* memory controller reporting and claim the device.
*/
static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
{
static const enum edac_type ems_modes[] = {
EDAC_NONE,
EDAC_EC,
EDAC_SECDED,
EDAC_SECDED
};
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
edac_dbg(0, "\n");
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = AMD76X_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
edac_dbg(0, "mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = amd76x_check;
mci->ctl_page_to_phys = NULL;
amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
amd76x_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* allocating generic PCI control info */
amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!amd76x_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int amd76x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
}
/**
* amd76x_remove_one - driver shutdown
* @pdev: PCI device being handed back
*
* Called when the driver is unloaded. Find the matching mci
* structure for the device then delete the mci and free the
* resources.
*/
static void amd76x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (amd76x_pci)
edac_pci_release_generic_ctl(amd76x_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
edac_mc_free(mci);
}
static const struct pci_device_id amd76x_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
AMD762},
{
PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
AMD761},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
static struct pci_driver amd76x_driver = {
.name = EDAC_MOD_STR,
.probe = amd76x_init_one,
.remove = amd76x_remove_one,
.id_table = amd76x_pci_tbl,
};
static int __init amd76x_init(void)
{
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
return pci_register_driver(&amd76x_driver);
}
static void __exit amd76x_exit(void)
{
pci_unregister_driver(&amd76x_driver);
}
module_init(amd76x_init);
module_exit(amd76x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/amd76x_edac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel E3-1200
* Copyright (C) 2014 Jason Baron <[email protected]>
*
* Support for the E3-1200 processor family. Heavily based on previous
* Intel EDAC drivers.
*
* Since the DRAM controller is on the cpu chip, we can use its PCI device
* id to identify these processors.
*
* PCI DRAM controller device ids (Taken from The PCI ID Repository - https://pci-ids.ucw.cz/)
*
* 0108: Xeon E3-1200 Processor Family DRAM Controller
* 010c: Xeon E3-1200/2nd Generation Core Processor Family DRAM Controller
* 0150: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
* 0158: Xeon E3-1200 v2/Ivy Bridge DRAM Controller
* 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
* 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
* 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers
* 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
* https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
* https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/desktop-6th-gen-core-family-datasheet-vol-2.pdf
* https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v6-vol-2-datasheet.pdf
* https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
* https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html
*
* According to the above datasheet (p.16):
* "
* 6. Software must not access B0/D0/F0 32-bit memory-mapped registers with
* requests that cross a DW boundary.
* "
*
* Thus, we make use of the explicit: lo_hi_readq(), which breaks the readq into
* 2 readl() calls. This restriction may be lifted in subsequent chip releases,
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
#define ie31200_printk(level, fmt, arg...) \
edac_printk(level, "ie31200", fmt, ##arg)
#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F
#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918
#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F
#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918
/* Coffee Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1 0x3e0f
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2 0x3e18
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3 0x3e1f
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4 0x3e30
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5 0x3e31
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6 0x3e32
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7 0x3e33
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8 0x3ec2
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
/* Test if HB is for Skylake or later. */
#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
(((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
(((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
#define IE31200_RANKS_PER_CHANNEL 4
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
* 15 reserved
* 14 Isochronous TBWRR Run Behind FIFO Full
* (ITCV)
* 13 Isochronous TBWRR Run Behind FIFO Put
* (ITSTV)
* 12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR (GTSE)
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 reserved
* 7 DRAM Throttle Flag (DTF)
* 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define IE31200_ERRSTS 0xc8
#define IE31200_ERRSTS_UE BIT(1)
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
/*
* Channel 0 ECC Error Log (64b)
*
* 63:48 Error Column Address (ERRCOL)
* 47:32 Error Row Address (ERRROW)
* 31:29 Error Bank Address (ERRBANK)
* 28:27 Error Rank Address (ERRRANK)
* 26:24 reserved
* 23:16 Error Syndrome (ERRSYND)
* 15: 2 reserved
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
#define IE31200_C0ECCERRLOG 0x40c8
#define IE31200_C1ECCERRLOG 0x44c8
#define IE31200_C0ECCERRLOG_SKL 0x4048
#define IE31200_C1ECCERRLOG_SKL 0x4448
#define IE31200_ECCERRLOG_CE BIT(0)
#define IE31200_ECCERRLOG_UE BIT(1)
#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
#define IE31200_ECCERRLOG_RANK_SHIFT 27
#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
#define IE31200_ECCERRLOG_SYNDROME(log) \
((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
IE31200_ECCERRLOG_SYNDROME_SHIFT)
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
#define IE31200_MAD_DIMM_0_OFFSET 0x5004
#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
#define IE31200_MAD_DIMM_A_RANK BIT(17)
#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
/* Skylake reports 1GB increments, everything else is 256MB */
#define IE31200_PAGES(n, skl) \
(n << (28 + (2 * skl) - PAGE_SHIFT))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
};
enum ie31200_chips {
IE31200 = 0,
};
struct ie31200_dev_info {
const char *ctl_name;
};
struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
};
struct dimm_data {
u8 size; /* in multiples of 256MB, except Skylake is 1GB */
u8 dual_rank : 1,
x16_width : 2; /* 0 means x8 width */
};
static int how_many_channels(struct pci_dev *pdev)
{
int n_channels;
unsigned char capid0_2b; /* 2nd byte of CAPID0 */
pci_read_config_byte(pdev, IE31200_CAPID0 + 1, &capid0_2b);
/* check PDCD: Dual Channel Disable */
if (capid0_2b & IE31200_CAPID0_PDCD) {
edac_dbg(0, "In single channel mode\n");
n_channels = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
n_channels = 2;
}
/* check DDPCD - check if both channels are filled */
if (capid0_2b & IE31200_CAPID0_DDPCD)
edac_dbg(0, "2 DIMMS per channel disabled\n");
else
edac_dbg(0, "2 DIMMS per channel enabled\n");
return n_channels;
}
static bool ecc_capable(struct pci_dev *pdev)
{
unsigned char capid0_4b; /* 4th byte of CAPID0 */
pci_read_config_byte(pdev, IE31200_CAPID0 + 3, &capid0_4b);
if (capid0_4b & IE31200_CAPID0_ECC)
return false;
return true;
}
static int eccerrlog_row(u64 log)
{
return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
IE31200_ECCERRLOG_RANK_SHIFT);
}
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
struct pci_dev *pdev;
struct ie31200_priv *priv = mci->pvt_info;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts);
if (!(info->errsts & IE31200_ERRSTS_BITS))
return;
info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
info->eccerrlog[1] =
lo_hi_readq(priv->c1errlog);
}
ie31200_clear_error_info(mci);
}
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
int channel;
u64 log;
if (!(info->errsts & IE31200_ERRSTS_BITS))
return;
if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & IE31200_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(log),
channel, -1,
"ie31200 UE", "");
} else if (log & IE31200_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0,
IE31200_ECCERRLOG_SYNDROME(log),
eccerrlog_row(log),
channel, -1,
"ie31200 CE", "");
}
}
}
static void ie31200_check(struct mem_ctl_info *mci)
{
struct ie31200_error_info info;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
struct {
u32 mchbar_low;
u32 mchbar_high;
};
} u;
void __iomem *window;
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
u.mchbar &= IE31200_MCHBAR_MASK;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
(unsigned long long)u.mchbar);
return NULL;
}
window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
return window;
}
static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
int chan)
{
dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
(IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
}
static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
int chan)
{
dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
}
static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
bool skl)
{
if (skl)
__skl_populate_dimm_info(dd, addr_decode, chan);
else
__populate_dimm_info(dd, addr_decode, chan);
}
static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
{
int i, j, ret;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
u32 addr_decode, mad_offset;
/*
* Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
* this logic when adding new CPU support.
*/
bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
edac_dbg(0, "MC:\n");
if (!ecc_capable(pdev)) {
ie31200_printk(KERN_INFO, "No ECC support\n");
return -ENODEV;
}
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = IE31200_DIMMS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
window = ie31200_map_mchbar(pdev);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
if (skl)
mci->mtype_cap = MEM_FLAG_DDR4;
else
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
if (skl) {
priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
} else {
priv->c0errlog = window + IE31200_C0ECCERRLOG;
priv->c1errlog = window + IE31200_C1ECCERRLOG;
mad_offset = IE31200_MAD_DIMM_0_OFFSET;
}
/* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
addr_decode = readl(window + mad_offset +
(i * 4));
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
populate_dimm_info(&dimm_info[i][j], addr_decode, j,
skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info[i][j].size,
dimm_info[i][j].dual_rank,
dimm_info[i][j].x16_width);
}
}
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 64MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
for (j = 0; j < IE31200_CHANNELS; j++) {
struct dimm_info *dimm;
unsigned long nr_pages;
nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
if (nr_pages == 0)
continue;
if (dimm_info[j][i].dual_rank) {
nr_pages = nr_pages / 2;
dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* just a guess */
if (skl)
dimm->mtype = MEM_DDR4;
else
dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
dimm = edac_get_dimm(mci, i * 2, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* same guess */
if (skl)
dimm->mtype = MEM_DDR4;
else
dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
}
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
ret = -ENODEV;
goto fail_unmap;
}
/* get this far and it's successful */
edac_dbg(3, "MC: success\n");
return 0;
fail_unmap:
iounmap(window);
fail_free:
edac_mc_free(mci);
return ret;
}
static int ie31200_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = ie31200_probe1(pdev, ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void ie31200_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct ie31200_priv *priv;
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
priv = mci->pvt_info;
iounmap(priv->window);
edac_mc_free(mci);
}
static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
static struct pci_driver ie31200_driver = {
.name = EDAC_MOD_STR,
.probe = ie31200_init_one,
.remove = ie31200_remove_one,
.id_table = ie31200_pci_tbl,
};
static int __init ie31200_init(void)
{
int pci_rc, i;
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
ie31200_registered = 0;
for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
ie31200_pci_tbl[i].device,
NULL);
if (mci_pdev)
break;
}
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&ie31200_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit ie31200_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&ie31200_driver);
if (!ie31200_registered)
ie31200_remove_one(mci_pdev);
}
module_init(ie31200_init);
module_exit(ie31200_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jason Baron <[email protected]>");
MODULE_DESCRIPTION("MC support for Intel Processor E31200 memory hub controllers");
| linux-master | drivers/edac/ie31200_edac.c |
/*
* Intel 3000/3010 Memory Controller kernel module
* Copyright (C) 2007 Akamai Technologies, Inc.
* Shamelessly copied from:
* Intel D82875P Memory Controller kernel module
* (C) 2003 Linux Networx (http://lnxi.com)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "i3000_edac"
#define I3000_RANKS 8
#define I3000_RANKS_PER_CHANNEL 4
#define I3000_CHANNELS 2
/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
#define I3000_MCHBAR_MASK 0xffffc000
#define I3000_MMR_WINDOW_SIZE 16384
#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
*
* 7:1 reserved
* 0 bit 32 of address
*/
#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
*
* 31:7 address
* 6:1 reserved
* 0 Error channel 0/1
*/
#define I3000_DEAP_GRAIN (1 << 7)
/*
* Helper functions to decode the DEAP/EDEAP hardware registers.
*
* The type promotion here is deliberate; we're deriving an
* unsigned long pfn and offset from hardware regs which are u8/u32.
*/
static inline unsigned long deap_pfn(u8 edeap, u32 deap)
{
deap >>= PAGE_SHIFT;
deap |= (edeap & 1) << (32 - PAGE_SHIFT);
return deap;
}
static inline unsigned long deap_offset(u32 deap)
{
return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
}
static inline int deap_channel(u32 deap)
{
return deap & 1;
}
#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
*
* 7:0 DRAM ECC Syndrome
*/
#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15:12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 Received Refresh Timeout Flag (RRTOF)
* 7:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
#define I3000_ERRSTS_UE 0x0002
#define I3000_ERRSTS_CE 0x0001
#define I3000_ERRCMD 0xca /* Error Command (16b)
*
* 15:12 reserved
* 11 SERR on MCH Thermal Sensor Event
* (TSESERR)
* 10 reserved
* 9 SERR on LOCK to non-DRAM Memory
* (LCKERR)
* 8 SERR on DRAM Refresh Timeout
* (DRTOERR)
* 7:2 reserved
* 1 SERR Multi-Bit DRAM ECC Error
* (DMERR)
* 0 SERR on Single-Bit ECC Error
* (DSERR)
*/
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3000_DRB_SHIFT 25 /* 32MiB grain */
#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
*
* 7:0 Channel 0 DRAM Rank Boundary Address
*/
#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
*
* 7:0 Channel 1 DRAM Rank Boundary Address
*/
#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
*
* 7 reserved
* 6:4 DRAM odd Rank Attribute
* 3 reserved
* 2:0 DRAM even Rank Attribute
*
* Each attribute defines the page
* size of the corresponding rank:
* 000: unpopulated
* 001: reserved
* 010: 4 KB
* 011: 8 KB
* 100: 16 KB
* Others: reserved
*/
#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
static inline unsigned char odd_rank_attrib(unsigned char dra)
{
return (dra & 0x70) >> 4;
}
static inline unsigned char even_rank_attrib(unsigned char dra)
{
return dra & 0x07;
}
#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
*
* 31:30 reserved
* 29 Initialization Complete (IC)
* 28:11 reserved
* 10:8 Refresh Mode Select (RMS)
* 7 reserved
* 6:4 Mode Select (SMS)
* 3:2 reserved
* 1:0 DRAM Type (DT)
*/
#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
*
* 31 Enhanced Addressing Enable (ENHADE)
* 30:0 reserved
*/
enum i3000p_chips {
I3000 = 0,
};
struct i3000_dev_info {
const char *ctl_name;
};
struct i3000_error_info {
u16 errsts;
u8 derrsyn;
u8 edeap;
u32 deap;
u16 errsts2;
};
static const struct i3000_dev_info i3000_devs[] = {
[I3000] = {
.ctl_name = "i3000"},
};
static struct pci_dev *mci_pdev;
static int i3000_registered = 1;
static struct edac_pci_ctl_info *i3000_pci;
static void i3000_get_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
if (!(info->errsts & I3000_ERRSTS_BITS))
return;
pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
}
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
I3000_ERRSTS_BITS);
}
static int i3000_process_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info,
int handle_errors)
{
int row, multi_chan, channel;
unsigned long pfn, offset;
multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & I3000_ERRSTS_BITS))
return 0;
if (!handle_errors)
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
"UE overwrote CE", "");
info->errsts = info->errsts2;
}
pfn = deap_pfn(info->edeap, info->deap);
offset = deap_offset(info->deap);
channel = deap_channel(info->deap);
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
pfn, offset, 0,
row, -1, -1,
"i3000 UE", "");
else
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, offset, info->derrsyn,
row, multi_chan ? channel : 0, -1,
"i3000 CE", "");
return 1;
}
static void i3000_check(struct mem_ctl_info *mci)
{
struct i3000_error_info info;
i3000_get_error_info(mci, &info);
i3000_process_error_info(mci, &info, 1);
}
static int i3000_is_interleaved(const unsigned char *c0dra,
const unsigned char *c1dra,
const unsigned char *c0drb,
const unsigned char *c1drb)
{
int i;
/*
* If the channels aren't populated identically then
* we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
even_rank_attrib(c0dra[i]) !=
even_rank_attrib(c1dra[i]))
return 0;
/*
* If the rank boundaries for the two channels are different
* then we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
if (c0drb[i] != c1drb[i])
return 0;
return 1;
}
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i, j;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
unsigned long last_cumul_size, nr_pages;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
unsigned long mchbar;
void __iomem *window;
edac_dbg(0, "MC:\n");
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
mchbar);
return -ENODEV;
}
c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
c0drb[i] = readb(window + I3000_C0DRB + i);
c1drb[i] = readb(window + I3000_C1DRB + i);
}
iounmap(window);
/*
* Figure out how many channels we have.
*
* If we have what the datasheet calls "asymmetric channels"
* (essentially the same as what was called "virtual single
* channel mode" in the i82875) then it's a single channel as
* far as EDAC is concerned.
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I3000_RANKS / nr_channels;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i3000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3000_check;
mci->ctl_page_to_phys = NULL;
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 32MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*
* If we're in interleaved mode then we're only walking through
* the ranks of controller 0, so we double all the values we see.
*/
for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
u8 value;
u32 cumul_size;
struct csrow_info *csrow = mci->csrows[i];
value = drb[i];
cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
if (interleaved)
cumul_size <<= 1;
edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
if (cumul_size == last_cumul_size)
continue;
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_channels;
dimm->grain = I3000_DEAP_GRAIN;
dimm->mtype = MEM_DDR2;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
}
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
I3000_ERRSTS_BITS);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* allocating generic PCI control info */
i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i3000_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "MC: success\n");
return 0;
fail:
if (mci)
edac_mc_free(mci);
return rc;
}
/* returns count (>= 0), or negative on error */
static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i3000_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i3000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
edac_dbg(0, "\n");
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
edac_mc_free(mci);
}
static const struct pci_device_id i3000_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3000},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
static struct pci_driver i3000_driver = {
.name = EDAC_MOD_STR,
.probe = i3000_init_one,
.remove = i3000_remove_one,
.id_table = i3000_pci_tbl,
};
static int __init i3000_init(void)
{
int pci_rc;
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i3000_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
i3000_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
if (!mci_pdev) {
edac_dbg(0, "i3000 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "i3000 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i3000_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i3000_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3000_driver);
if (!i3000_registered) {
i3000_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(i3000_init);
module_exit(i3000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i3000_edac.c |
/*
* edac_mc kernel module
* (C) 2005-2007 Linux Networx (http://lnxi.com)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written Doug Thompson <[email protected]> www.softwarebitmaker.com
*
* (c) 2012-2013 - Mauro Carvalho Chehab
* The entire API were re-written, and ported to use struct device
*
*/
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/bug.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include "edac_mc.h"
#include "edac_module.h"
/* MC EDAC Controls, setable by module parameter, and sysfs */
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
static int edac_mc_panic_on_ue;
static unsigned int edac_mc_poll_msec = 1000;
/* Getter functions for above */
int edac_mc_get_log_ue(void)
{
return edac_mc_log_ue;
}
int edac_mc_get_log_ce(void)
{
return edac_mc_log_ce;
}
int edac_mc_get_panic_on_ue(void)
{
return edac_mc_panic_on_ue;
}
/* this is temporary */
unsigned int edac_mc_get_poll_msec(void)
{
return edac_mc_poll_msec;
}
static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
{
unsigned int i;
int ret;
if (!val)
return -EINVAL;
ret = kstrtouint(val, 0, &i);
if (ret)
return ret;
if (i < 1000)
return -EINVAL;
*((unsigned int *)kp->arg) = i;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(i);
return 0;
}
/* Parameter declarations for above */
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
"Log uncorrectable error to console: 0=off 1=on");
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
"Log correctable error to console: 0=off 1=on");
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
static struct device *mci_pdev;
/*
* various constants for Memory Controllers
*/
static const char * const dev_types[] = {
[DEV_UNKNOWN] = "Unknown",
[DEV_X1] = "x1",
[DEV_X2] = "x2",
[DEV_X4] = "x4",
[DEV_X8] = "x8",
[DEV_X16] = "x16",
[DEV_X32] = "x32",
[DEV_X64] = "x64"
};
static const char * const edac_caps[] = {
[EDAC_UNKNOWN] = "Unknown",
[EDAC_NONE] = "None",
[EDAC_RESERVED] = "Reserved",
[EDAC_PARITY] = "PARITY",
[EDAC_EC] = "EC",
[EDAC_SECDED] = "SECDED",
[EDAC_S2ECD2ED] = "S2ECD2ED",
[EDAC_S4ECD4ED] = "S4ECD4ED",
[EDAC_S8ECD8ED] = "S8ECD8ED",
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
#ifdef CONFIG_EDAC_LEGACY_SYSFS
/*
* EDAC sysfs CSROW data structures and methods
*/
#define to_csrow(k) container_of(k, struct csrow_info, dev)
/*
* We need it to avoid namespace conflicts between the legacy API
* and the per-dimm/per-rank one
*/
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
struct dev_ch_attribute {
struct device_attribute attr;
unsigned int channel;
};
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
static struct dev_ch_attribute dev_attr_legacy_##_name = \
{ __ATTR(_name, _mode, _show, _store), (_var) }
#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
/* Set of more default csrow<id> attribute show/store functions */
static ssize_t csrow_ue_count_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
return sprintf(data, "%u\n", csrow->ue_count);
}
static ssize_t csrow_ce_count_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
return sprintf(data, "%u\n", csrow->ce_count);
}
static ssize_t csrow_size_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
int i;
u32 nr_pages = 0;
for (i = 0; i < csrow->nr_channels; i++)
nr_pages += csrow->channels[i]->dimm->nr_pages;
return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct csrow_info *csrow = to_csrow(dev);
return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
static ssize_t channel_dimm_label_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct csrow_info *csrow = to_csrow(dev);
unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
/* if field has not been initialized, there is nothing to send */
if (!rank->dimm->label[0])
return 0;
return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
rank->dimm->label);
}
static ssize_t channel_dimm_label_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct csrow_info *csrow = to_csrow(dev);
unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
size_t copy_count = count;
if (count == 0)
return -EINVAL;
if (data[count - 1] == '\0' || data[count - 1] == '\n')
copy_count -= 1;
if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
return -EINVAL;
strncpy(rank->dimm->label, data, copy_count);
rank->dimm->label[copy_count] = '\0';
return count;
}
/* show function for dynamic chX_ce_count attribute */
static ssize_t channel_ce_count_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
return sprintf(data, "%u\n", rank->ce_count);
}
/* cwrow<id>/attribute files */
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
/* default attributes of the CSROW<id> object */
static struct attribute *csrow_attrs[] = {
&dev_attr_legacy_dev_type.attr,
&dev_attr_legacy_mem_type.attr,
&dev_attr_legacy_edac_mode.attr,
&dev_attr_legacy_size_mb.attr,
&dev_attr_legacy_ue_count.attr,
&dev_attr_legacy_ce_count.attr,
NULL,
};
static const struct attribute_group csrow_attr_grp = {
.attrs = csrow_attrs,
};
static const struct attribute_group *csrow_attr_groups[] = {
&csrow_attr_grp,
NULL
};
static const struct device_type csrow_attr_type = {
.groups = csrow_attr_groups,
};
/*
* possible dynamic channel DIMM Label attribute files
*
*/
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 0);
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 1);
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 2);
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 3);
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 4);
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 5);
DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 6);
DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 7);
DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 8);
DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 9);
DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 10);
DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 11);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch0_dimm_label.attr.attr,
&dev_attr_legacy_ch1_dimm_label.attr.attr,
&dev_attr_legacy_ch2_dimm_label.attr.attr,
&dev_attr_legacy_ch3_dimm_label.attr.attr,
&dev_attr_legacy_ch4_dimm_label.attr.attr,
&dev_attr_legacy_ch5_dimm_label.attr.attr,
&dev_attr_legacy_ch6_dimm_label.attr.attr,
&dev_attr_legacy_ch7_dimm_label.attr.attr,
&dev_attr_legacy_ch8_dimm_label.attr.attr,
&dev_attr_legacy_ch9_dimm_label.attr.attr,
&dev_attr_legacy_ch10_dimm_label.attr.attr,
&dev_attr_legacy_ch11_dimm_label.attr.attr,
NULL
};
/* possible dynamic channel ce_count attribute files */
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 0);
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 1);
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 2);
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 3);
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 6);
DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 7);
DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 8);
DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 9);
DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 10);
DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 11);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch0_ce_count.attr.attr,
&dev_attr_legacy_ch1_ce_count.attr.attr,
&dev_attr_legacy_ch2_ce_count.attr.attr,
&dev_attr_legacy_ch3_ce_count.attr.attr,
&dev_attr_legacy_ch4_ce_count.attr.attr,
&dev_attr_legacy_ch5_ce_count.attr.attr,
&dev_attr_legacy_ch6_ce_count.attr.attr,
&dev_attr_legacy_ch7_ce_count.attr.attr,
&dev_attr_legacy_ch8_ce_count.attr.attr,
&dev_attr_legacy_ch9_ce_count.attr.attr,
&dev_attr_legacy_ch10_ce_count.attr.attr,
&dev_attr_legacy_ch11_ce_count.attr.attr,
NULL
};
static umode_t csrow_dev_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
if (idx >= csrow->nr_channels)
return 0;
if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
WARN_ONCE(1, "idx: %d\n", idx);
return 0;
}
/* Only expose populated DIMMs */
if (!csrow->channels[idx]->dimm->nr_pages)
return 0;
return attr->mode;
}
static const struct attribute_group csrow_dev_dimm_group = {
.attrs = dynamic_csrow_dimm_attr,
.is_visible = csrow_dev_is_visible,
};
static const struct attribute_group csrow_dev_ce_count_group = {
.attrs = dynamic_csrow_ce_count_attr,
.is_visible = csrow_dev_is_visible,
};
static const struct attribute_group *csrow_dev_groups[] = {
&csrow_dev_dimm_group,
&csrow_dev_ce_count_group,
NULL
};
static void csrow_release(struct device *dev)
{
/*
* Nothing to do, just unregister sysfs here. The mci
* device owns the data and will also release it.
*/
}
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
int chan, nr_pages = 0;
for (chan = 0; chan < csrow->nr_channels; chan++)
nr_pages += csrow->channels[chan]->dimm->nr_pages;
return nr_pages;
}
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
int err;
csrow->dev.type = &csrow_attr_type;
csrow->dev.groups = csrow_dev_groups;
csrow->dev.release = csrow_release;
device_initialize(&csrow->dev);
csrow->dev.parent = &mci->dev;
csrow->mci = mci;
dev_set_name(&csrow->dev, "csrow%d", index);
dev_set_drvdata(&csrow->dev, csrow);
err = device_add(&csrow->dev);
if (err) {
edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
put_device(&csrow->dev);
return err;
}
edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
return 0;
}
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
struct csrow_info *csrow;
for (i = 0; i < mci->nr_csrows; i++) {
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
if (err < 0)
goto error;
}
return 0;
error:
for (--i; i >= 0; i--) {
if (device_is_registered(&mci->csrows[i]->dev))
device_unregister(&mci->csrows[i]->dev);
}
return err;
}
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
int i;
for (i = 0; i < mci->nr_csrows; i++) {
if (device_is_registered(&mci->csrows[i]->dev))
device_unregister(&mci->csrows[i]->dev);
}
}
#endif
/*
* Per-dimm (or per-rank) devices
*/
#define to_dimm(k) container_of(k, struct dimm_info, dev)
/* show/store functions for DIMM Label attributes */
static ssize_t dimmdev_location_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
ssize_t count;
count = edac_dimm_info_location(dimm, data, PAGE_SIZE);
count += scnprintf(data + count, PAGE_SIZE - count, "\n");
return count;
}
static ssize_t dimmdev_label_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
/* if field has not been initialized, there is nothing to send */
if (!dimm->label[0])
return 0;
return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
}
static ssize_t dimmdev_label_store(struct device *dev,
struct device_attribute *mattr,
const char *data,
size_t count)
{
struct dimm_info *dimm = to_dimm(dev);
size_t copy_count = count;
if (count == 0)
return -EINVAL;
if (data[count - 1] == '\0' || data[count - 1] == '\n')
copy_count -= 1;
if (copy_count == 0 || copy_count >= sizeof(dimm->label))
return -EINVAL;
strncpy(dimm->label, data, copy_count);
dimm->label[copy_count] = '\0';
return count;
}
static ssize_t dimmdev_size_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}
static ssize_t dimmdev_mem_type_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]);
}
static ssize_t dimmdev_dev_type_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%s\n", dev_types[dimm->dtype]);
}
static ssize_t dimmdev_edac_mode_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}
static ssize_t dimmdev_ce_count_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%u\n", dimm->ce_count);
}
static ssize_t dimmdev_ue_count_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct dimm_info *dimm = to_dimm(dev);
return sprintf(data, "%u\n", dimm->ue_count);
}
/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
dimmdev_label_show, dimmdev_label_store);
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
&dev_attr_dimm_label.attr,
&dev_attr_dimm_location.attr,
&dev_attr_size.attr,
&dev_attr_dimm_mem_type.attr,
&dev_attr_dimm_dev_type.attr,
&dev_attr_dimm_edac_mode.attr,
&dev_attr_dimm_ce_count.attr,
&dev_attr_dimm_ue_count.attr,
NULL,
};
static const struct attribute_group dimm_attr_grp = {
.attrs = dimm_attrs,
};
static const struct attribute_group *dimm_attr_groups[] = {
&dimm_attr_grp,
NULL
};
static const struct device_type dimm_attr_type = {
.groups = dimm_attr_groups,
};
static void dimm_release(struct device *dev)
{
/*
* Nothing to do, just unregister sysfs here. The mci
* device owns the data and will also release it.
*/
}
/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
int err;
dimm->mci = mci;
dimm->dev.type = &dimm_attr_type;
dimm->dev.release = dimm_release;
device_initialize(&dimm->dev);
dimm->dev.parent = &mci->dev;
if (mci->csbased)
dev_set_name(&dimm->dev, "rank%d", dimm->idx);
else
dev_set_name(&dimm->dev, "dimm%d", dimm->idx);
dev_set_drvdata(&dimm->dev, dimm);
pm_runtime_forbid(&mci->dev);
err = device_add(&dimm->dev);
if (err) {
edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
put_device(&dimm->dev);
return err;
}
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
char location[80];
edac_dimm_info_location(dimm, location, sizeof(location));
edac_dbg(0, "device %s created at location %s\n",
dev_name(&dimm->dev), location);
}
return 0;
}
/*
* Memory controller device
*/
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
static ssize_t mci_reset_counters_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct dimm_info *dimm;
int row, chan;
mci->ue_mc = 0;
mci->ce_mc = 0;
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = mci->csrows[row];
ri->ue_count = 0;
ri->ce_count = 0;
for (chan = 0; chan < ri->nr_channels; chan++)
ri->channels[chan]->ce_count = 0;
}
mci_for_each_dimm(mci, dimm) {
dimm->ue_count = 0;
dimm->ce_count = 0;
}
mci->start_time = jiffies;
return count;
}
/* Memory scrubbing interface:
*
* A MC driver can limit the scrubbing bandwidth based on the CPU type.
* Therefore, ->set_sdram_scrub_rate should be made to return the actual
* bandwidth that is accepted or 0 when scrubbing is to be disabled.
*
* Negative value still means that an error has occurred while setting
* the scrub rate.
*/
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
unsigned long bandwidth = 0;
int new_bw = 0;
if (kstrtoul(data, 10, &bandwidth) < 0)
return -EINVAL;
new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
if (new_bw < 0) {
edac_printk(KERN_WARNING, EDAC_MC,
"Error setting scrub rate to: %lu\n", bandwidth);
return -EINVAL;
}
return count;
}
/*
* ->get_sdram_scrub_rate() return value semantics same as above.
*/
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
int bandwidth = 0;
bandwidth = mci->get_sdram_scrub_rate(mci);
if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
return bandwidth;
}
return sprintf(data, "%d\n", bandwidth);
}
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
}
static ssize_t mci_ctl_name_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
return sprintf(data, "%s\n", mci->ctl_name);
}
static ssize_t mci_size_mb_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
int total_pages = 0, csrow_idx, j;
for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = mci->csrows[csrow_idx];
for (j = 0; j < csrow->nr_channels; j++) {
struct dimm_info *dimm = csrow->channels[j]->dimm;
total_pages += dimm->nr_pages;
}
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
}
static ssize_t mci_max_location_show(struct device *dev,
struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
int len = PAGE_SIZE;
char *p = data;
int i, n;
for (i = 0; i < mci->n_layers; i++) {
n = scnprintf(p, len, "%s %d ",
edac_layer_name[mci->layers[i].type],
mci->layers[i].size - 1);
len -= n;
if (len <= 0)
goto out;
p += n;
}
p += scnprintf(p, len, "\n");
out:
return p - data;
}
/* default Control file */
static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
/* default Attribute files */
static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
mci_sdram_scrub_rate_store); /* umode set later in is_visible */
static struct attribute *mci_attrs[] = {
&dev_attr_reset_counters.attr,
&dev_attr_mc_name.attr,
&dev_attr_size_mb.attr,
&dev_attr_seconds_since_reset.attr,
&dev_attr_ue_noinfo_count.attr,
&dev_attr_ce_noinfo_count.attr,
&dev_attr_ue_count.attr,
&dev_attr_ce_count.attr,
&dev_attr_max_location.attr,
&dev_attr_sdram_scrub_rate.attr,
NULL
};
static umode_t mci_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct mem_ctl_info *mci = to_mci(dev);
umode_t mode = 0;
if (attr != &dev_attr_sdram_scrub_rate.attr)
return attr->mode;
if (mci->get_sdram_scrub_rate)
mode |= S_IRUGO;
if (mci->set_sdram_scrub_rate)
mode |= S_IWUSR;
return mode;
}
static const struct attribute_group mci_attr_grp = {
.attrs = mci_attrs,
.is_visible = mci_attr_is_visible,
};
static const struct attribute_group *mci_attr_groups[] = {
&mci_attr_grp,
NULL
};
static const struct device_type mci_attr_type = {
.groups = mci_attr_groups,
};
/*
* Create a new Memory Controller kobject instance,
* mc<id> under the 'mc' directory
*
* Return:
* 0 Success
* !0 Failure
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
struct dimm_info *dimm;
int err;
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
mci->dev.parent = mci_pdev;
mci->dev.groups = groups;
dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
/* no put_device() here, free mci with _edac_mc_free() */
return err;
}
edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
/*
* Create the dimm/rank devices
*/
mci_for_each_dimm(mci, dimm) {
/* Only expose populated DIMMs */
if (!dimm->nr_pages)
continue;
err = edac_create_dimm_object(mci, dimm);
if (err)
goto fail;
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
err = edac_create_csrow_objects(mci);
if (err < 0)
goto fail;
#endif
edac_create_debugfs_nodes(mci);
return 0;
fail:
edac_remove_sysfs_mci_device(mci);
return err;
}
/*
* remove a Memory Controller instance
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
struct dimm_info *dimm;
if (!device_is_registered(&mci->dev))
return;
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
edac_debugfs_remove_recursive(mci->debugfs);
#endif
#ifdef CONFIG_EDAC_LEGACY_SYSFS
edac_delete_csrow_objects(mci);
#endif
mci_for_each_dimm(mci, dimm) {
if (!device_is_registered(&dimm->dev))
continue;
edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
device_unregister(&dimm->dev);
}
/* only remove the device, but keep mci */
device_del(&mci->dev);
}
static void mc_attr_release(struct device *dev)
{
/*
* There's no container structure here, as this is just the mci
* parent device, used to create the /sys/devices/mc sysfs node.
* So, there are no attributes on it.
*/
edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dev);
}
/*
* Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
int __init edac_mc_sysfs_init(void)
{
int err;
mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
if (!mci_pdev)
return -ENOMEM;
mci_pdev->bus = edac_get_sysfs_subsys();
mci_pdev->release = mc_attr_release;
mci_pdev->init_name = "mc";
err = device_register(mci_pdev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
put_device(mci_pdev);
return err;
}
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
}
void edac_mc_sysfs_exit(void)
{
device_unregister(mci_pdev);
}
| linux-master | drivers/edac/edac_mc_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
*
* Copyright (c) 2008 Wind River Systems, Inc.
*
* Authors: Cao Qingtao <[email protected]>
* Benjamin Walsh <[email protected]>
* Hu Yongqi <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/edac.h>
#include <linux/pci_ids.h>
#include "edac_module.h"
#include "amd8131_edac.h"
#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
#define AMD8131_EDAC_MOD_STR "amd8131_edac"
/* Wrapper functions for accessing PCI configuration space */
static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
{
int ret;
ret = pci_read_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8131_EDAC_MOD_STR
" PCI Access Read Error at 0x%x\n", reg);
}
static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
{
int ret;
ret = pci_write_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8131_EDAC_MOD_STR
" PCI Access Write Error at 0x%x\n", reg);
}
/* Support up to two AMD8131 chipsets on a platform */
static struct amd8131_dev_info amd8131_devices[] = {
{
.inst = NORTH_A,
.devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
.ctl_name = "AMD8131_PCIX_NORTH_A",
},
{
.inst = NORTH_B,
.devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
.ctl_name = "AMD8131_PCIX_NORTH_B",
},
{
.inst = SOUTH_A,
.devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
.ctl_name = "AMD8131_PCIX_SOUTH_A",
},
{
.inst = SOUTH_B,
.devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
.ctl_name = "AMD8131_PCIX_SOUTH_B",
},
{.inst = NO_BRIDGE,},
};
static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
{
u32 val32;
struct pci_dev *dev = dev_info->dev;
/* First clear error detection flags */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_MASK)
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
/* Clear Discard Timer Timedout flag */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
if (val32 & INT_CTLR_DTS)
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Clear CRC Error flag on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
if (val32 & LNK_CTRL_CRCERR_A)
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Clear CRC Error flag on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
if (val32 & LNK_CTRL_CRCERR_B)
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
/*
* Then enable all error detections.
*
* Setup Discard Timer Sync Flood Enable,
* System Error Enable and Parity Error Enable.
*/
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Enable overall SERR Error detection */
edac_pci_read_dword(dev, REG_STS_CMD, &val32);
val32 |= STS_CMD_SERREN;
edac_pci_write_dword(dev, REG_STS_CMD, val32);
/* Setup CRC Flood Enable for link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
val32 |= LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Setup CRC Flood Enable for link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
val32 |= LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
}
static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
{
u32 val32;
struct pci_dev *dev = dev_info->dev;
/* Disable SERR, PERR and DTSE Error detection */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Disable overall System Error detection */
edac_pci_read_dword(dev, REG_STS_CMD, &val32);
val32 &= ~STS_CMD_SERREN;
edac_pci_write_dword(dev, REG_STS_CMD, val32);
/* Disable CRC Sync Flood on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
val32 &= ~LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Disable CRC Sync Flood on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
val32 &= ~LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
}
static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
{
struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
struct pci_dev *dev = dev_info->dev;
u32 val32;
/* Check PCI-X Bridge Memory Base-Limit Register for errors */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_MASK) {
printk(KERN_INFO "Error(s) in mem limit register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
"RTA: %d, STA: %d, MDPE: %d\n",
val32 & MEM_LIMIT_DPE,
val32 & MEM_LIMIT_RSE,
val32 & MEM_LIMIT_RMA,
val32 & MEM_LIMIT_RTA,
val32 & MEM_LIMIT_STA,
val32 & MEM_LIMIT_MDPE);
val32 |= MEM_LIMIT_MASK;
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if Discard Timer timed out */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
if (val32 & INT_CTLR_DTS) {
printk(KERN_INFO "Error(s) in interrupt and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
val32 |= INT_CTLR_DTS;
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if CRC error happens on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
if (val32 & LNK_CTRL_CRCERR_A) {
printk(KERN_INFO "Error(s) in link conf and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
val32 |= LNK_CTRL_CRCERR_A;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if CRC error happens on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
if (val32 & LNK_CTRL_CRCERR_B) {
printk(KERN_INFO "Error(s) in link conf and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
val32 |= LNK_CTRL_CRCERR_B;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
}
static struct amd8131_info amd8131_chipset = {
.err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
.devices = amd8131_devices,
.init = amd8131_pcix_init,
.exit = amd8131_pcix_exit,
.check = amd8131_pcix_check,
};
/*
* There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
* so amd8131_probe() would be called by kernel 4 times, with different
* address of pci_dev for each of them each time.
*/
static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct amd8131_dev_info *dev_info;
for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
dev_info++)
if (dev_info->devfn == dev->devfn)
break;
if (dev_info->inst == NO_BRIDGE) /* should never happen */
return -ENODEV;
/*
* We can't call pci_get_device() as we are used to do because
* there are 4 of them but pci_dev_get() instead.
*/
dev_info->dev = pci_dev_get(dev);
if (pci_enable_device(dev_info->dev)) {
pci_dev_put(dev_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, devfn %x, name %s\n",
PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
dev_info->devfn, dev_info->ctl_name);
return -ENODEV;
}
/*
* we do not allocate extra private structure for
* edac_pci_ctl_info, but make use of existing
* one instead.
*/
dev_info->edac_idx = edac_pci_alloc_index();
dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
if (!dev_info->edac_dev)
return -ENOMEM;
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = amd8131_chipset.check;
if (amd8131_chipset.init)
amd8131_chipset.init(dev_info);
if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
dev_info->ctl_name);
edac_pci_free_ctl_info(dev_info->edac_dev);
return -ENODEV;
}
printk(KERN_INFO "added one device on AMD8131 "
"vendor %x, device %x, devfn %x, name %s\n",
PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
dev_info->devfn, dev_info->ctl_name);
return 0;
}
static void amd8131_remove(struct pci_dev *dev)
{
struct amd8131_dev_info *dev_info;
for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
dev_info++)
if (dev_info->devfn == dev->devfn)
break;
if (dev_info->inst == NO_BRIDGE) /* should never happen */
return;
if (dev_info->edac_dev) {
edac_pci_del_device(dev_info->edac_dev->dev);
edac_pci_free_ctl_info(dev_info->edac_dev);
}
if (amd8131_chipset.exit)
amd8131_chipset.exit(dev_info);
pci_dev_put(dev_info->dev);
}
static const struct pci_device_id amd8131_edac_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, 8131_BRIDGE),
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = 0,
.class_mask = 0,
.driver_data = 0,
},
{
0,
} /* table is NULL-terminated */
};
MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
static struct pci_driver amd8131_edac_driver = {
.name = AMD8131_EDAC_MOD_STR,
.probe = amd8131_probe,
.remove = amd8131_remove,
.id_table = amd8131_edac_pci_tbl,
};
static int __init amd8131_edac_init(void)
{
printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
/* Only POLL mode supported so far */
edac_op_state = EDAC_OPSTATE_POLL;
return pci_register_driver(&amd8131_edac_driver);
}
static void __exit amd8131_edac_exit(void)
{
pci_unregister_driver(&amd8131_edac_driver);
}
module_init(amd8131_edac_init);
module_exit(amd8131_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <[email protected]>");
MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
| linux-master | drivers/edac/amd8131_edac.c |
/*
* Freescale MPC85xx Memory Controller kernel module
*
* Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
*
* Author: Dave Jiang <[email protected]>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/edac.h>
#include <linux/smp.h>
#include <linux/gfp.h>
#include <linux/fsl/edac.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include "edac_module.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
static int edac_dev_idx;
#ifdef CONFIG_PCI
static int edac_pci_idx;
#endif
/*
* PCI Err defines
*/
#ifdef CONFIG_PCI
static u32 orig_pci_err_cap_dr;
static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
/**************************** PCI Err device ***************************/
#ifdef CONFIG_PCI
static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
{
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
u32 err_detect;
err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
/* master aborts can happen during PCI config cycles */
if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
return;
}
pr_err("PCI error(s) detected\n");
pr_err("PCI/X ERR_DR register: %#08x\n", err_detect);
pr_err("PCI/X ERR_ATTRIB register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
pr_err("PCI/X ERR_ADDR register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
pr_err("PCI/X ERR_EXT_ADDR register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
pr_err("PCI/X ERR_DL register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
pr_err("PCI/X ERR_DH register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
if (err_detect & PCI_EDE_PERR_MASK)
edac_pci_handle_pe(pci, pci->ctl_name);
if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
edac_pci_handle_npe(pci, pci->ctl_name);
}
static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
{
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
u32 err_detect, err_cap_stat;
err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
err_cap_stat = in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR);
pr_err("PCIe error(s) detected\n");
pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n", err_cap_stat);
pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
/* reset error capture */
out_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR, err_cap_stat | 0x1);
}
static int mpc85xx_pcie_find_capability(struct device_node *np)
{
struct pci_controller *hose;
if (!np)
return -EINVAL;
hose = pci_find_hose_for_OF_device(np);
return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
}
static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
{
struct edac_pci_ctl_info *pci = dev_id;
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
u32 err_detect;
err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
if (!err_detect)
return IRQ_NONE;
if (pdata->is_pcie)
mpc85xx_pcie_check(pci);
else
mpc85xx_pci_check(pci);
return IRQ_HANDLED;
}
static int mpc85xx_pci_err_probe(struct platform_device *op)
{
struct edac_pci_ctl_info *pci;
struct mpc85xx_pci_pdata *pdata;
struct mpc85xx_edac_pci_plat_data *plat_data;
struct device_node *of_node;
struct resource r;
int res = 0;
if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
return -ENOMEM;
pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
if (!pci)
return -ENOMEM;
/* make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
pdata = pci->pvt_info;
pdata->name = "mpc85xx_pci_err";
plat_data = op->dev.platform_data;
if (!plat_data) {
dev_err(&op->dev, "no platform data");
res = -ENXIO;
goto err;
}
of_node = plat_data->of_node;
if (mpc85xx_pcie_find_capability(of_node) > 0)
pdata->is_pcie = true;
dev_set_drvdata(&op->dev, pci);
pci->dev = &op->dev;
pci->mod_name = EDAC_MOD_STR;
pci->ctl_name = pdata->name;
pci->dev_name = dev_name(&op->dev);
if (edac_op_state == EDAC_OPSTATE_POLL) {
if (pdata->is_pcie)
pci->edac_check = mpc85xx_pcie_check;
else
pci->edac_check = mpc85xx_pci_check;
}
pdata->edac_idx = edac_pci_idx++;
res = of_address_to_resource(of_node, 0, &r);
if (res) {
pr_err("%s: Unable to get resource for PCI err regs\n", __func__);
goto err;
}
/* we only need the error registers */
r.start += 0xe00;
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
pr_err("%s: Error while requesting mem region\n", __func__);
res = -EBUSY;
goto err;
}
pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->pci_vbase) {
pr_err("%s: Unable to setup PCI err regs\n", __func__);
res = -ENOMEM;
goto err;
}
if (pdata->is_pcie) {
orig_pci_err_cap_dr =
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
orig_pci_err_en =
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
} else {
orig_pci_err_cap_dr =
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
/* PCI master abort is expected during config cycles */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
orig_pci_err_en =
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
/* disable master abort reporting */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
}
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
/* reset error capture */
out_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR, 0x1);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_pci_isr,
IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
pr_err("%s: Unable to request irq %d for MPC85xx PCI err\n",
__func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
goto err2;
}
pr_info(EDAC_MOD_STR " acquired irq %d for PCI Err\n",
pdata->irq);
}
if (pdata->is_pcie) {
/*
* Enable all PCIe error interrupt & error detect except invalid
* PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
* enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
* detection enable bit. Because PCIe bus code to initialize and
* configure these PCIe devices on booting will use some invalid
* PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
* notice information. So disable this detect to fix ugly print.
*/
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
& ~PEX_ERR_ICCAIE_EN_BIT);
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
| PEX_ERR_ICCAD_DISR_BIT);
}
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
edac_dbg(3, "success\n");
pr_info(EDAC_MOD_STR " PCI err registered\n");
return 0;
err2:
edac_pci_del_device(&op->dev);
err:
edac_pci_free_ctl_info(pci);
devres_release_group(&op->dev, mpc85xx_pci_err_probe);
return res;
}
static int mpc85xx_pci_err_remove(struct platform_device *op)
{
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
edac_dbg(0, "\n");
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, orig_pci_err_cap_dr);
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
edac_pci_del_device(&op->dev);
edac_pci_free_ctl_info(pci);
return 0;
}
static const struct platform_device_id mpc85xx_pci_err_match[] = {
{
.name = "mpc85xx-pci-edac"
},
{}
};
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
.remove = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
.suppress_bind_attrs = true,
},
};
#endif /* CONFIG_PCI */
/**************************** L2 Err device ***************************/
/************************ L2 SYSFS parts ***********************************/
static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
*edac_dev, char *data)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
}
static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
*edac_dev, char *data)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
}
static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
*edac_dev, char *data)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
}
static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
*edac_dev, const char *data,
size_t count)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
simple_strtoul(data, NULL, 0));
return count;
}
return 0;
}
static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
*edac_dev, const char *data,
size_t count)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
simple_strtoul(data, NULL, 0));
return count;
}
return 0;
}
static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
*edac_dev, const char *data,
size_t count)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
simple_strtoul(data, NULL, 0));
return count;
}
return 0;
}
static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
{
.attr = {
.name = "inject_data_hi",
.mode = (S_IRUGO | S_IWUSR)
},
.show = mpc85xx_l2_inject_data_hi_show,
.store = mpc85xx_l2_inject_data_hi_store},
{
.attr = {
.name = "inject_data_lo",
.mode = (S_IRUGO | S_IWUSR)
},
.show = mpc85xx_l2_inject_data_lo_show,
.store = mpc85xx_l2_inject_data_lo_store},
{
.attr = {
.name = "inject_ctrl",
.mode = (S_IRUGO | S_IWUSR)
},
.show = mpc85xx_l2_inject_ctrl_show,
.store = mpc85xx_l2_inject_ctrl_store},
/* End of list */
{
.attr = {.name = NULL}
}
};
static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
*edac_dev)
{
edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
}
/***************************** L2 ops ***********************************/
static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
{
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
u32 err_detect;
err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
if (!(err_detect & L2_EDE_MASK))
return;
pr_err("ECC Error in CPU L2 cache\n");
pr_err("L2 Error Detect Register: 0x%08x\n", err_detect);
pr_err("L2 Error Capture Data High Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
pr_err("L2 Error Capture Data Lo Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
pr_err("L2 Error Syndrome Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
pr_err("L2 Error Attributes Capture Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
pr_err("L2 Error Address Capture Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
/* clear error detect register */
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
if (err_detect & L2_EDE_CE_MASK)
edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
if (err_detect & L2_EDE_UE_MASK)
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
{
struct edac_device_ctl_info *edac_dev = dev_id;
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
u32 err_detect;
err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
if (!(err_detect & L2_EDE_MASK))
return IRQ_NONE;
mpc85xx_l2_check(edac_dev);
return IRQ_HANDLED;
}
static int mpc85xx_l2_err_probe(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev;
struct mpc85xx_l2_pdata *pdata;
struct resource r;
int res;
if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
return -ENOMEM;
edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
"cpu", 1, "L", 1, 2, NULL, 0,
edac_dev_idx);
if (!edac_dev) {
devres_release_group(&op->dev, mpc85xx_l2_err_probe);
return -ENOMEM;
}
pdata = edac_dev->pvt_info;
pdata->name = "mpc85xx_l2_err";
edac_dev->dev = &op->dev;
dev_set_drvdata(edac_dev->dev, edac_dev);
edac_dev->ctl_name = pdata->name;
edac_dev->dev_name = pdata->name;
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
pr_err("%s: Unable to get resource for L2 err regs\n", __func__);
goto err;
}
/* we only need the error registers */
r.start += 0xe00;
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
pr_err("%s: Error while requesting mem region\n", __func__);
res = -EBUSY;
goto err;
}
pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->l2_vbase) {
pr_err("%s: Unable to setup L2 err regs\n", __func__);
res = -ENOMEM;
goto err;
}
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
/* clear the err_dis */
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
edac_dev->mod_name = EDAC_MOD_STR;
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = mpc85xx_l2_check;
mpc85xx_set_l2_sysfs_attributes(edac_dev);
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_l2_isr, IRQF_SHARED,
"[EDAC] L2 err", edac_dev);
if (res < 0) {
pr_err("%s: Unable to request irq %d for MPC85xx L2 err\n",
__func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
goto err2;
}
pr_info(EDAC_MOD_STR " acquired irq %d for L2 Err\n", pdata->irq);
edac_dev->op_state = OP_RUNNING_INTERRUPT;
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
}
devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
edac_dbg(3, "success\n");
pr_info(EDAC_MOD_STR " L2 err registered\n");
return 0;
err2:
edac_device_del_device(&op->dev);
err:
devres_release_group(&op->dev, mpc85xx_l2_err_probe);
edac_device_free_ctl_info(edac_dev);
return res;
}
static int mpc85xx_l2_err_remove(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
irq_dispose_mapping(pdata->irq);
}
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
edac_device_del_device(&op->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
static const struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,mpc8536-l2-cache-controller", },
{ .compatible = "fsl,mpc8540-l2-cache-controller", },
{ .compatible = "fsl,mpc8541-l2-cache-controller", },
{ .compatible = "fsl,mpc8544-l2-cache-controller", },
{ .compatible = "fsl,mpc8548-l2-cache-controller", },
{ .compatible = "fsl,mpc8555-l2-cache-controller", },
{ .compatible = "fsl,mpc8560-l2-cache-controller", },
{ .compatible = "fsl,mpc8568-l2-cache-controller", },
{ .compatible = "fsl,mpc8569-l2-cache-controller", },
{ .compatible = "fsl,mpc8572-l2-cache-controller", },
{ .compatible = "fsl,p1020-l2-cache-controller", },
{ .compatible = "fsl,p1021-l2-cache-controller", },
{ .compatible = "fsl,p2020-l2-cache-controller", },
{ .compatible = "fsl,t2080-l2-cache-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
.of_match_table = mpc85xx_l2_err_of_match,
},
};
static const struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,mpc8536-memory-controller", },
{ .compatible = "fsl,mpc8540-memory-controller", },
{ .compatible = "fsl,mpc8541-memory-controller", },
{ .compatible = "fsl,mpc8544-memory-controller", },
{ .compatible = "fsl,mpc8548-memory-controller", },
{ .compatible = "fsl,mpc8555-memory-controller", },
{ .compatible = "fsl,mpc8560-memory-controller", },
{ .compatible = "fsl,mpc8568-memory-controller", },
{ .compatible = "fsl,mpc8569-memory-controller", },
{ .compatible = "fsl,mpc8572-memory-controller", },
{ .compatible = "fsl,mpc8349-memory-controller", },
{ .compatible = "fsl,p1020-memory-controller", },
{ .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
{ .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
.probe = fsl_mc_err_probe,
.remove = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
},
};
static struct platform_driver * const drivers[] = {
&mpc85xx_mc_err_driver,
&mpc85xx_l2_err_driver,
#ifdef CONFIG_PCI
&mpc85xx_pci_err_driver,
#endif
};
static int __init mpc85xx_mc_init(void)
{
int res = 0;
u32 __maybe_unused pvr = 0;
pr_info("Freescale(R) MPC85xx EDAC driver, (C) 2006 Montavista Software\n");
/* make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
pr_warn(EDAC_MOD_STR "drivers fail to register\n");
return 0;
}
module_init(mpc85xx_mc_init);
static void __exit mpc85xx_mc_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(mpc85xx_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
| linux-master | drivers/edac/mpc85xx_edac.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Cavium, Inc.
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/edac.h>
#include "edac_module.h"
#include <asm/octeon/cvmx.h>
#include <asm/mipsregs.h>
extern int register_co_cache_error_notifier(struct notifier_block *nb);
extern int unregister_co_cache_error_notifier(struct notifier_block *nb);
extern unsigned long long cache_err_dcache[NR_CPUS];
struct co_cache_error {
struct notifier_block notifier;
struct edac_device_ctl_info *ed;
};
/**
* EDAC CPU cache error callback
*
* @event: non-zero if unrecoverable.
*/
static int co_cache_error_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct co_cache_error *p = container_of(this, struct co_cache_error,
notifier);
unsigned int core = cvmx_get_core_num();
unsigned int cpu = smp_processor_id();
u64 icache_err = read_octeon_c0_icacheerr();
u64 dcache_err;
if (event) {
dcache_err = cache_err_dcache[core];
cache_err_dcache[core] = 0;
} else {
dcache_err = read_octeon_c0_dcacheerr();
}
if (icache_err & 1) {
edac_device_printk(p->ed, KERN_ERR,
"CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
(unsigned long long)icache_err, core, cpu,
read_c0_errorepc());
write_octeon_c0_icacheerr(0);
edac_device_handle_ce(p->ed, cpu, 1, "icache");
}
if (dcache_err & 1) {
edac_device_printk(p->ed, KERN_ERR,
"CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
(unsigned long long)dcache_err, core, cpu,
read_c0_errorepc());
if (event)
edac_device_handle_ue(p->ed, cpu, 0, "dcache");
else
edac_device_handle_ce(p->ed, cpu, 0, "dcache");
/* Clear the error indication */
if (OCTEON_IS_OCTEON2())
write_octeon_c0_dcacheerr(1);
else
write_octeon_c0_dcacheerr(0);
}
return NOTIFY_STOP;
}
static int co_cache_error_probe(struct platform_device *pdev)
{
struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p),
GFP_KERNEL);
if (!p)
return -ENOMEM;
p->notifier.notifier_call = co_cache_error_event;
platform_set_drvdata(pdev, p);
p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
"cache", 2, 0, NULL, 0,
edac_device_alloc_index());
if (!p->ed)
goto err;
p->ed->dev = &pdev->dev;
p->ed->dev_name = dev_name(&pdev->dev);
p->ed->mod_name = "octeon-cpu";
p->ed->ctl_name = "cache";
if (edac_device_add_device(p->ed)) {
pr_err("%s: edac_device_add_device() failed\n", __func__);
goto err1;
}
register_co_cache_error_notifier(&p->notifier);
return 0;
err1:
edac_device_free_ctl_info(p->ed);
err:
return -ENXIO;
}
static int co_cache_error_remove(struct platform_device *pdev)
{
struct co_cache_error *p = platform_get_drvdata(pdev);
unregister_co_cache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->ed);
return 0;
}
static struct platform_driver co_cache_error_driver = {
.probe = co_cache_error_probe,
.remove = co_cache_error_remove,
.driver = {
.name = "octeon_pc_edac",
}
};
module_platform_driver(co_cache_error_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
| linux-master | drivers/edac/octeon_edac-pc.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "amd64_edac.h"
#include <asm/amd_nb.h>
static struct edac_pci_ctl_info *pci_ctl;
/*
* Set by command line parameter. If BIOS has enabled the ECC, this override is
* cleared to prevent re-enabling the hardware by this driver.
*/
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
{
if (!pvt->flags.zn_regs_v2)
return reg;
switch (reg) {
case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
}
WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
return 0;
}
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
/* Device for the PCI component */
static struct device *pci_ctl_dev;
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
{ 0x01, 1600000000UL},
{ 0x02, 800000000UL},
{ 0x03, 400000000UL},
{ 0x04, 200000000UL},
{ 0x05, 100000000UL},
{ 0x06, 50000000UL},
{ 0x07, 25000000UL},
{ 0x08, 12284069UL},
{ 0x09, 6274509UL},
{ 0x0A, 3121951UL},
{ 0x0B, 1560975UL},
{ 0x0C, 781440UL},
{ 0x0D, 390720UL},
{ 0x0E, 195300UL},
{ 0x0F, 97650UL},
{ 0x10, 48854UL},
{ 0x11, 24427UL},
{ 0x12, 12213UL},
{ 0x13, 6101UL},
{ 0x14, 3051UL},
{ 0x15, 1523UL},
{ 0x16, 761UL},
{ 0x00, 0UL}, /* scrubbing off */
};
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
u32 *val, const char *func)
{
int err = 0;
err = pci_read_config_dword(pdev, offset, val);
if (err)
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
return err;
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
u32 val, const char *func)
{
int err = 0;
err = pci_write_config_dword(pdev, offset, val);
if (err)
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
return err;
}
/*
* Select DCT to which PCI cfg accesses are routed
*/
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{
u32 reg = 0;
amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
reg &= (pvt->model == 0x30) ? ~3 : ~1;
reg |= dct;
amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}
/*
*
* Depending on the family, F2 DCT reads need special handling:
*
* K8: has a single DCT only and no address offsets >= 0x100
*
* F10h: each DCT has its own set of regs
* DCT0 -> F2x040..
* DCT1 -> F2x140..
*
* F16h: has only 1 DCT
*
* F15h: we select which DCT we access using F1x10C[DctCfgSel]
*/
static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
int offset, u32 *val)
{
switch (pvt->fam) {
case 0xf:
if (dct || offset >= 0x100)
return -EINVAL;
break;
case 0x10:
if (dct) {
/*
* Note: If ganging is enabled, barring the regs
* F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
* return 0. (cf. Section 2.8.1 F10h BKDG)
*/
if (dct_ganging_enabled(pvt))
return 0;
offset += 0x100;
}
break;
case 0x15:
/*
* F15h: F2x1xx addresses do not map explicitly to DCT1.
* We should select which DCT we access using F1x10C[DctCfgSel]
*/
dct = (dct && pvt->model == 0x30) ? 3 : dct;
f15h_select_dct(pvt, dct);
break;
case 0x16:
if (dct)
return -EINVAL;
break;
default:
break;
}
return amd64_read_pci_cfg(pvt->F2, offset, val);
}
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
* F10, this is extended to L3 cache scrubbing on CPU models sporting that
* functionality.
*
* This causes the "units" for the scrubbing speed to vary from 64 byte blocks
* (dram) over to cache lines. This is nasty, so we will use bandwidth in
* bytes/sec for the setting.
*
* Currently, we only do dram scrubbing. If the scrubbing is done in software on
* other archs, we might not have access to the caches directly.
*/
/*
* Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
{
u32 scrubval;
int i;
/*
* map the configured rate (new_bw) to a value specific to the AMD64
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
*
* If no suitable bandwidth is found, turn off DRAM scrubbing entirely
* by falling back to the last element in scrubrates[].
*/
for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
*/
if (scrubrates[i].scrubval < min_rate)
continue;
if (scrubrates[i].bandwidth <= new_bw)
break;
}
scrubval = scrubrates[i].scrubval;
if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
} else {
pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
}
if (scrubval)
return scrubrates[i].bandwidth;
return 0;
}
static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x5;
if (pvt->fam == 0xf)
min_scrubrate = 0x0;
if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
if (pvt->model == 0x60)
min_scrubrate = 0x6;
}
return __set_scrub_rate(pvt, bw, min_scrubrate);
}
static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
int i, retval = -EINVAL;
u32 scrubval = 0;
if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
else
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
} else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
}
scrubval = scrubval & 0x001F;
for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
if (scrubrates[i].scrubval == scrubval) {
retval = scrubrates[i].bandwidth;
break;
}
}
return retval;
}
/*
* returns true if the SysAddr given by sys_addr matches the
* DRAM base/limit associated with node_id
*/
static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
{
u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
* Here we discard bits 63-40. See section 3.4.2 of AMD publication
* 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
* Application Programming.
*/
addr = sys_addr & 0x000000ffffffffffull;
return ((addr >= get_dram_base(pvt, nid)) &&
(addr <= get_dram_limit(pvt, nid)));
}
/*
* Attempt to map a SysAddr to a node. On success, return a pointer to the
* mem_ctl_info structure for the node that the SysAddr maps to.
*
* On failure, return NULL.
*/
static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
u8 node_id;
u32 intlv_en, bits;
/*
* Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
* 3.4.4.2) registers to map the SysAddr to a node ID.
*/
pvt = mci->pvt_info;
/*
* The value of this field should be the same for all DRAM Base
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (base_limit_match(pvt, sys_addr, node_id))
goto found;
}
goto err_no_match;
}
if (unlikely((intlv_en != 0x01) &&
(intlv_en != 0x03) &&
(intlv_en != 0x07))) {
amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
return NULL;
}
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
/* sanity test for sys_addr */
if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
"range for node %d with node interleaving enabled.\n",
__func__, sys_addr, node_id);
return NULL;
}
found:
return edac_mc_find((int)node_id);
err_no_match:
edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
(unsigned long)sys_addr);
return NULL;
}
/*
* compute the CS base address of the @csrow on the DRAM controller @dct.
* For details see F2x[5C:40] in the processor's BKDG
*/
static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
u64 *base, u64 *mask)
{
u64 csbase, csmask, base_bits, mask_bits;
u8 addr_shift;
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow];
base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
addr_shift = 4;
/*
* F16h and F15h, models 30h and later need two addr_shift values:
* 8 for high and 6 for low (cf. F16h BKDG).
*/
} else if (pvt->fam == 0x16 ||
(pvt->fam == 0x15 && pvt->model >= 0x30)) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
*base = (csbase & GENMASK_ULL(15, 5)) << 6;
*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
*mask = ~0ULL;
/* poke holes for the csmask */
*mask &= ~((GENMASK_ULL(15, 5) << 6) |
(GENMASK_ULL(30, 19) << 8));
*mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
return;
} else {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
addr_shift = 8;
if (pvt->fam == 0x15)
base_bits = mask_bits =
GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
else
base_bits = mask_bits =
GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
}
*base = (csbase & base_bits) << addr_shift;
*mask = ~0ULL;
/* poke holes for the csmask */
*mask &= ~(mask_bits << addr_shift);
/* OR them in */
*mask |= (csmask & mask_bits) << addr_shift;
}
#define for_each_chip_select(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].b_cnt; i++)
#define chip_select_base(i, dct, pvt) \
pvt->csels[dct].csbases[i]
#define for_each_chip_select_mask(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
#define for_each_umc(i) \
for (i = 0; i < pvt->max_mcs; i++)
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
*/
static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
int csrow;
u64 base, mask;
pvt = mci->pvt_info;
for_each_chip_select(csrow, 0, pvt) {
if (!csrow_enabled(csrow, 0, pvt))
continue;
get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
(unsigned long)input_addr, csrow,
pvt->mc_node_id);
return csrow;
}
}
edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
return -1;
}
/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
* info is invalid. Info may be invalid for either of the following reasons:
*
* - The revision of the node is not E or greater. In this case, the DRAM Hole
* Address Register does not exist.
*
* - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
* indicating that its contents are not valid.
*
* The values passed back in *hole_base, *hole_offset, and *hole_size are
* complete 32-bit values despite the fact that the bitfields in the DHAR
* only represent bits 31-24 of the base and offset values.
*/
static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size)
{
struct amd64_pvt *pvt = mci->pvt_info;
/* only revE and later have the DRAM Hole Address Register */
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
edac_dbg(1, " revision %d for node %d does not support DHAR\n",
pvt->ext_model, pvt->mc_node_id);
return 1;
}
/* valid for Fam10h and above */
if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
if (!dhar_valid(pvt)) {
edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
}
/* This node has Memory Hoisting */
/* +------------------+--------------------+--------------------+-----
* | memory | DRAM hole | relocated |
* | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
* | | | DRAM hole |
* | | | [0x100000000, |
* | | | (0x100000000+ |
* | | | (0xffffffff-x))] |
* +------------------+--------------------+--------------------+-----
*
* Above is a diagram of physical memory showing the DRAM hole and the
* relocated addresses from the DRAM hole. As shown, the DRAM hole
* starts at address x (the base address) and extends through address
* 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
* addresses in the hole so that they start at 0x100000000.
*/
*hole_base = dhar_base(pvt);
*hole_size = (1ULL << 32) - *hole_base;
*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
: k8_dhar_offset(pvt);
edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
(unsigned long)*hole_offset, (unsigned long)*hole_size);
return 0;
}
#ifdef CONFIG_EDAC_DEBUG
#define EDAC_DCT_ATTR_SHOW(reg) \
static ssize_t reg##_show(struct device *dev, \
struct device_attribute *mattr, char *data) \
{ \
struct mem_ctl_info *mci = to_mci(dev); \
struct amd64_pvt *pvt = mci->pvt_info; \
\
return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
}
EDAC_DCT_ATTR_SHOW(dhar);
EDAC_DCT_ATTR_SHOW(dbam0);
EDAC_DCT_ATTR_SHOW(top_mem);
EDAC_DCT_ATTR_SHOW(top_mem2);
static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
u64 hole_base = 0;
u64 hole_offset = 0;
u64 hole_size = 0;
get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
hole_size);
}
/*
* update NUM_DBG_ATTRS in case you add new members
*/
static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
static DEVICE_ATTR_RO(dram_hole);
static struct attribute *dbg_attrs[] = {
&dev_attr_dhar.attr,
&dev_attr_dbam.attr,
&dev_attr_topmem.attr,
&dev_attr_topmem2.attr,
&dev_attr_dram_hole.attr,
NULL
};
static const struct attribute_group dbg_group = {
.attrs = dbg_attrs,
};
static ssize_t inject_section_show(struct device *dev,
struct device_attribute *mattr, char *buf)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.section);
}
/*
* store error injection section value which refers to one of 4 16-byte sections
* within a 64-byte cacheline
*
* range: 0..3
*/
static ssize_t inject_section_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret;
ret = kstrtoul(data, 10, &value);
if (ret < 0)
return ret;
if (value > 3) {
amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
return -EINVAL;
}
pvt->injection.section = (u32) value;
return count;
}
static ssize_t inject_word_show(struct device *dev,
struct device_attribute *mattr, char *buf)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.word);
}
/*
* store error injection word value which refers to one of 9 16-bit word of the
* 16-byte (128-bit + ECC bits) section
*
* range: 0..8
*/
static ssize_t inject_word_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret;
ret = kstrtoul(data, 10, &value);
if (ret < 0)
return ret;
if (value > 8) {
amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
return -EINVAL;
}
pvt->injection.word = (u32) value;
return count;
}
static ssize_t inject_ecc_vector_show(struct device *dev,
struct device_attribute *mattr,
char *buf)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
}
/*
* store 16 bit error injection vector which enables injecting errors to the
* corresponding bit within the error injection word above. When used during a
* DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
*/
static ssize_t inject_ecc_vector_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret;
ret = kstrtoul(data, 16, &value);
if (ret < 0)
return ret;
if (value & 0xFFFF0000) {
amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
return -EINVAL;
}
pvt->injection.bit_map = (u32) value;
return count;
}
/*
* Do a DRAM ECC read. Assemble staged values in the pvt area, format into
* fields needed by the injection registers and read the NB Array Data Port.
*/
static ssize_t inject_read_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
u32 section, word_bits;
int ret;
ret = kstrtoul(data, 10, &value);
if (ret < 0)
return ret;
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
return count;
}
/*
* Do a DRAM ECC write. Assemble staged values in the pvt area and format into
* fields needed by the injection registers.
*/
static ssize_t inject_write_store(struct device *dev,
struct device_attribute *mattr,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
u32 section, word_bits, tmp;
unsigned long value;
int ret;
ret = kstrtoul(data, 10, &value);
if (ret < 0)
return ret;
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
pr_notice_once("Don't forget to decrease MCE polling interval in\n"
"/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
"so that you can get the error report faster.\n");
on_each_cpu(disable_caches, NULL, 1);
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
retry:
/* wait until injection happens */
amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
if (tmp & F10_NB_ARR_ECC_WR_REQ) {
cpu_relax();
goto retry;
}
on_each_cpu(enable_caches, NULL, 1);
edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
return count;
}
/*
* update NUM_INJ_ATTRS in case you add new members
*/
static DEVICE_ATTR_RW(inject_section);
static DEVICE_ATTR_RW(inject_word);
static DEVICE_ATTR_RW(inject_ecc_vector);
static DEVICE_ATTR_WO(inject_write);
static DEVICE_ATTR_WO(inject_read);
static struct attribute *inj_attrs[] = {
&dev_attr_inject_section.attr,
&dev_attr_inject_word.attr,
&dev_attr_inject_ecc_vector.attr,
&dev_attr_inject_write.attr,
&dev_attr_inject_read.attr,
NULL
};
static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
struct amd64_pvt *pvt = mci->pvt_info;
/* Families which have that injection hw */
if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
return attr->mode;
return 0;
}
static const struct attribute_group inj_group = {
.attrs = inj_attrs,
.is_visible = inj_is_visible,
};
#endif /* CONFIG_EDAC_DEBUG */
/*
* Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
* assumed that sys_addr maps to the node given by mci.
*
* The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
* 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
* SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
* then it is also involved in translating a SysAddr to a DramAddr. Sections
* 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
* These parts of the documentation are unclear. I interpret them as follows:
*
* When node n receives a SysAddr, it processes the SysAddr as follows:
*
* 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
* Limit registers for node n. If the SysAddr is not within the range
* specified by the base and limit values, then node n ignores the Sysaddr
* (since it does not map to node n). Otherwise continue to step 2 below.
*
* 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
* disabled so skip to step 3 below. Otherwise see if the SysAddr is within
* the range of relocated addresses (starting at 0x100000000) from the DRAM
* hole. If not, skip to step 3 below. Else get the value of the
* DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
* offset defined by this value from the SysAddr.
*
* 3. Obtain the base address for node n from the DRAMBase field of the DRAM
* Base register for node n. To obtain the DramAddr, subtract the base
* address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret;
dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
if (!ret) {
if ((sys_addr >= (1ULL << 32)) &&
(sys_addr < ((1ULL << 32) + hole_size))) {
/* use DHAR to translate SysAddr to DramAddr */
dram_addr = sys_addr - hole_offset;
edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
(unsigned long)sys_addr,
(unsigned long)dram_addr);
return dram_addr;
}
}
/*
* Translate the SysAddr to a DramAddr as shown near the start of
* section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
* only deals with 40-bit values. Therefore we discard bits 63-40 of
* sys_addr below. If bit 39 of sys_addr is 1 then the bits we
* discard are all 1s. Otherwise the bits we discard are all 0s. See
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)dram_addr);
return dram_addr;
}
/*
* @intlv_en is the value of the IntlvEn field from a DRAM Base register
* (section 3.4.4.1). Return the number of bits from a SysAddr that are used
* for node interleaving.
*/
static int num_node_interleave_bits(unsigned intlv_en)
{
static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
int n;
BUG_ON(intlv_en > 7);
n = intlv_shift_table[intlv_en];
return n;
}
/* Translate the DramAddr given by @dram_addr to an InputAddr. */
static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt;
int intlv_shift;
u64 input_addr;
pvt = mci->pvt_info;
/*
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
(dram_addr & 0xfff);
edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
(unsigned long)input_addr);
return input_addr;
}
/*
* Translate the SysAddr represented by @sys_addr to an InputAddr. It is
* assumed that @sys_addr maps to the node given by mci.
*/
static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
u64 input_addr;
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
struct err_info *err)
{
err->page = (u32) (error_address >> PAGE_SHIFT);
err->offset = ((u32) error_address) & ~PAGE_MASK;
}
/*
* @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
* Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
* of a node that detected an ECC memory error. mci represents the node that
* the error address maps to (possibly different from the node that detected
* the error). Return the number of the csrow that sys_addr maps to, or -1 on
* error.
*/
static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
{
int csrow;
csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
if (csrow == -1)
amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
"address 0x%lx\n", (unsigned long)sys_addr);
return csrow;
}
/*
* See AMD PPR DF::LclNodeTypeMap
*
* This register gives information for nodes of the same type within a system.
*
* Reading this register from a GPU node will tell how many GPU nodes are in the
* system and what the lowest AMD Node ID value is for the GPU nodes. Use this
* info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
*/
static struct local_node_map {
u16 node_count;
u16 base_node_id;
} gpu_node_map;
#define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1
#define REG_LOCAL_NODE_TYPE_MAP 0x144
/* Local Node Type Map (LNTM) fields */
#define LNTM_NODE_COUNT GENMASK(27, 16)
#define LNTM_BASE_NODE_ID GENMASK(11, 0)
static int gpu_get_node_map(void)
{
struct pci_dev *pdev;
int ret;
u32 tmp;
/*
* Node ID 0 is reserved for CPUs.
* Therefore, a non-zero Node ID means we've already cached the values.
*/
if (gpu_node_map.base_node_id)
return 0;
pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
if (!pdev) {
ret = -ENODEV;
goto out;
}
ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
if (ret)
goto out;
gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
out:
pci_dev_put(pdev);
return ret;
}
static int fixup_node_id(int node_id, struct mce *m)
{
/* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
u8 nid = (m->ipid >> 44) & 0xF;
if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
return node_id;
/* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
if (nid < gpu_node_map.base_node_id)
return node_id;
/* Convert the hardware-provided AMD Node ID to a Linux logical one. */
return nid - gpu_node_map.base_node_id + 1;
}
/* Protect the PCI config register pairs used for DF indirect access. */
static DEFINE_MUTEX(df_indirect_mutex);
/*
* Data Fabric Indirect Access uses FICAA/FICAD.
*
* Fabric Indirect Configuration Access Address (FICAA): Constructed based
* on the device's Instance Id and the PCI function and register offset of
* the desired register.
*
* Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
* and FICAD HI registers but so far we only need the LO register.
*
* Use Instance Id 0xFF to indicate a broadcast read.
*/
#define DF_BROADCAST 0xFF
static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
{
struct pci_dev *F4;
u32 ficaa;
int err = -ENODEV;
if (node >= amd_nb_num())
goto out;
F4 = node_to_amd_nb(node)->link;
if (!F4)
goto out;
ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
ficaa |= reg & 0x3FC;
ficaa |= (func & 0x7) << 11;
ficaa |= instance_id << 16;
mutex_lock(&df_indirect_mutex);
err = pci_write_config_dword(F4, 0x5C, ficaa);
if (err) {
pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
goto out_unlock;
}
err = pci_read_config_dword(F4, 0x98, lo);
if (err)
pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
out_unlock:
mutex_unlock(&df_indirect_mutex);
out:
return err;
}
static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
{
return __df_indirect_read(node, func, reg, instance_id, lo);
}
static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
{
return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
}
struct addr_ctx {
u64 ret_addr;
u32 tmp;
u16 nid;
u8 inst_id;
};
static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
u64 dram_base_addr, dram_limit_addr, dram_hole_base;
u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
u8 intlv_addr_sel, intlv_addr_bit;
u8 num_intlv_bits, hashed_bit;
u8 lgcy_mmio_hole_en, base = 0;
u8 cs_mask, cs_id = 0;
bool hash_enabled = false;
struct addr_ctx ctx;
memset(&ctx, 0, sizeof(ctx));
/* Start from the normalized address */
ctx.ret_addr = norm_addr;
ctx.nid = nid;
ctx.inst_id = umc;
/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
goto out_err;
/* Remove HiAddrOffset from normalized address, if enabled: */
if (ctx.tmp & BIT(0)) {
u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
if (norm_addr >= hi_addr_offset) {
ctx.ret_addr -= hi_addr_offset;
base = 1;
}
}
/* Read D18F0x110 (DramBaseAddress). */
if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
goto out_err;
/* Check if address range is valid. */
if (!(ctx.tmp & BIT(0))) {
pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
__func__, ctx.tmp);
goto out_err;
}
lgcy_mmio_hole_en = ctx.tmp & BIT(1);
intlv_num_chan = (ctx.tmp >> 4) & 0xF;
intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
if (intlv_addr_sel > 3) {
pr_err("%s: Invalid interleave address select %d.\n",
__func__, intlv_addr_sel);
goto out_err;
}
/* Read D18F0x114 (DramLimitAddress). */
if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
goto out_err;
intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
intlv_num_dies = (ctx.tmp >> 10) & 0x3;
dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
intlv_addr_bit = intlv_addr_sel + 8;
/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
switch (intlv_num_chan) {
case 0: intlv_num_chan = 0; break;
case 1: intlv_num_chan = 1; break;
case 3: intlv_num_chan = 2; break;
case 5: intlv_num_chan = 3; break;
case 7: intlv_num_chan = 4; break;
case 8: intlv_num_chan = 1;
hash_enabled = true;
break;
default:
pr_err("%s: Invalid number of interleaved channels %d.\n",
__func__, intlv_num_chan);
goto out_err;
}
num_intlv_bits = intlv_num_chan;
if (intlv_num_dies > 2) {
pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
__func__, intlv_num_dies);
goto out_err;
}
num_intlv_bits += intlv_num_dies;
/* Add a bit if sockets are interleaved. */
num_intlv_bits += intlv_num_sockets;
/* Assert num_intlv_bits <= 4 */
if (num_intlv_bits > 4) {
pr_err("%s: Invalid interleave bits %d.\n",
__func__, num_intlv_bits);
goto out_err;
}
if (num_intlv_bits > 0) {
u64 temp_addr_x, temp_addr_i, temp_addr_y;
u8 die_id_bit, sock_id_bit, cs_fabric_id;
/*
* Read FabricBlockInstanceInformation3_CS[BlockFabricID].
* This is the fabric id for this coherent slave. Use
* umc/channel# as instance id of the coherent slave
* for FICAA.
*/
if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
goto out_err;
cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
die_id_bit = 0;
/* If interleaved over more than 1 channel: */
if (intlv_num_chan) {
die_id_bit = intlv_num_chan;
cs_mask = (1 << die_id_bit) - 1;
cs_id = cs_fabric_id & cs_mask;
}
sock_id_bit = die_id_bit;
/* Read D18F1x208 (SystemFabricIdMask). */
if (intlv_num_dies || intlv_num_sockets)
if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
goto out_err;
/* If interleaved over more than 1 die. */
if (intlv_num_dies) {
sock_id_bit = die_id_bit + intlv_num_dies;
die_id_shift = (ctx.tmp >> 24) & 0xF;
die_id_mask = (ctx.tmp >> 8) & 0xFF;
cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
}
/* If interleaved over more than 1 socket. */
if (intlv_num_sockets) {
socket_id_shift = (ctx.tmp >> 28) & 0xF;
socket_id_mask = (ctx.tmp >> 16) & 0xFF;
cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
}
/*
* The pre-interleaved address consists of XXXXXXIIIYYYYY
* where III is the ID for this CS, and XXXXXXYYYYY are the
* address bits from the post-interleaved address.
* "num_intlv_bits" has been calculated to tell us how many "I"
* bits there are. "intlv_addr_bit" tells us how many "Y" bits
* there are (where "I" starts).
*/
temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
temp_addr_i = (cs_id << intlv_addr_bit);
temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
}
/* Add dram base address */
ctx.ret_addr += dram_base_addr;
/* If legacy MMIO hole enabled */
if (lgcy_mmio_hole_en) {
if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
goto out_err;
dram_hole_base = ctx.tmp & GENMASK(31, 24);
if (ctx.ret_addr >= dram_hole_base)
ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
}
if (hash_enabled) {
/* Save some parentheses and grab ls-bit at the end. */
hashed_bit = (ctx.ret_addr >> 12) ^
(ctx.ret_addr >> 18) ^
(ctx.ret_addr >> 21) ^
(ctx.ret_addr >> 30) ^
cs_id;
hashed_bit &= BIT(0);
if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
ctx.ret_addr ^= BIT(intlv_addr_bit);
}
/* Is calculated system address is above DRAM limit address? */
if (ctx.ret_addr > dram_limit_addr)
goto out_err;
*sys_addr = ctx.ret_addr;
return 0;
out_err:
return -EINVAL;
}
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
{
unsigned long edac_cap = EDAC_FLAG_NONE;
u8 bit;
bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
}
static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
{
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
unsigned long edac_cap = EDAC_FLAG_NONE;
for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
umc_en_mask |= BIT(i);
/* UMC Configuration bit 12 (DimmEccEn) */
if (pvt->umc[i].umc_cfg & BIT(12))
dimm_ecc_en_mask |= BIT(i);
}
if (umc_en_mask == dimm_ecc_en_mask)
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
}
/*
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
int dimm, size0, size1;
if (pvt->fam == 0xf) {
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
WARN_ON(ctrl != 0);
}
if (pvt->fam == 0x10) {
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
: pvt->dbam0;
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
pvt->csels[1].csbases :
pvt->csels[0].csbases;
} else if (ctrl) {
dbam = pvt->dbam0;
dcsb = pvt->csels[1].csbases;
}
edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
/* Dump memory sizes for DIMM and its CSROWs */
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
/*
* For F15m60h, we need multiplier for LRDIMM cs_size
* calculation. We pass dimm value to the dbam_to_cs
* mapper so we can find the multiplier from the
* corresponding DCSM.
*/
size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
size1 = 0;
if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0,
dimm * 2 + 1, size1);
}
}
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
if (pvt->dram_type == MEM_LRDDR3) {
u32 dcsm = pvt->csels[chan].csmasks[0];
/*
* It's assumed all LRDIMMs in a DCT are going to be of
* same 'type' until proven otherwise. So, use a cs
* value of '0' here to get dcsm value.
*/
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
edac_dbg(1, "All DIMMs support ECC:%s\n",
(dclr & BIT(19)) ? "yes" : "no");
edac_dbg(1, " PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
(dclr & BIT(13)) ? "yes" : "no",
(dclr & BIT(14)) ? "yes" : "no",
(dclr & BIT(15)) ? "yes" : "no");
}
#define CS_EVEN_PRIMARY BIT(0)
#define CS_ODD_PRIMARY BIT(1)
#define CS_EVEN_SECONDARY BIT(2)
#define CS_ODD_SECONDARY BIT(3)
#define CS_3R_INTERLEAVE BIT(4)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
u8 base, count = 0;
int cs_mode = 0;
if (csrow_enabled(2 * dimm, ctrl, pvt))
cs_mode |= CS_EVEN_PRIMARY;
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
/* Asymmetric dual-rank DIMM support. */
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
/*
* 3 Rank inteleaving support.
* There should be only three bases enabled and their two masks should
* be equal.
*/
for_each_chip_select(base, ctrl, pvt)
count += csrow_enabled(base, ctrl, pvt);
if (count == 3 &&
pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
edac_dbg(1, "3R interleaving in use.\n");
cs_mode |= CS_3R_INTERLEAVE;
}
return cs_mode;
}
static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
int csrow_nr, int dimm)
{
u32 msb, weight, num_zero_bits;
u32 addr_mask_deinterleaved;
int size = 0;
/*
* The number of zero bits in the mask is equal to the number of bits
* in a full mask minus the number of bits in the current mask.
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
*
* In the special 3 Rank interleaving case, a single bit is flipped
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
}
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
int cs_mask_nr = csrow_nr;
u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
if (!cs_mode)
return size;
/* Requested size of an even CS but none are enabled. */
if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
return size;
/* Requested size of an odd CS but none are enabled. */
if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
return size;
/*
* Family 17h introduced systems with one mask per DIMM,
* and two Chip Selects per DIMM.
*
* CS0 and CS1 -> MASK0 / DIMM0
* CS2 and CS3 -> MASK1 / DIMM1
*
* Family 19h Model 10h introduced systems with one mask per Chip Select,
* and two Chip Selects per DIMM.
*
* CS0 -> MASK0 -> DIMM0
* CS1 -> MASK1 -> DIMM0
* CS2 -> MASK2 -> DIMM1
* CS3 -> MASK3 -> DIMM1
*
* Keep the mask number equal to the Chip Select number for newer systems,
* and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1, cs_mode;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
for (dimm = 0; dimm < 2; dimm++) {
cs0 = dimm * 2;
cs1 = dimm * 2 + 1;
cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
cs1, size1);
}
}
static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i, tmp, umc_base;
for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
(umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
umc_debug_display_dimm_sizes(pvt, i);
}
}
static void dct_dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
(pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
(pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
(pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
pvt->dhar, dhar_base(pvt),
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
dct_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
dct_debug_display_dimm_sizes(pvt, 1);
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
static void dct_prep_chip_selects(struct amd64_pvt *pvt)
{
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
static void umc_prep_chip_selects(struct amd64_pvt *pvt)
{
int umc;
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
}
}
static void umc_read_base_mask(struct amd64_pvt *pvt)
{
u32 umc_base_reg, umc_base_reg_sec;
u32 umc_mask_reg, umc_mask_reg_sec;
u32 base_reg, base_reg_sec;
u32 mask_reg, mask_reg_sec;
u32 *base, *base_sec;
u32 *mask, *mask_sec;
int cs, umc;
for_each_umc(umc) {
umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
for_each_chip_select(cs, umc, pvt) {
base = &pvt->csels[umc].csbases[cs];
base_sec = &pvt->csels[umc].csbases_sec[cs];
base_reg = umc_base_reg + (cs * 4);
base_reg_sec = umc_base_reg_sec + (cs * 4);
if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base, base_reg);
if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base_sec, base_reg_sec);
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
mask_sec = &pvt->csels[umc].csmasks_sec[cs];
mask_reg = umc_mask_reg + (cs * 4);
mask_reg_sec = umc_mask_reg_sec + (cs * 4);
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask, mask_reg);
if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask_sec, mask_reg_sec);
}
}
}
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
static void dct_read_base_mask(struct amd64_pvt *pvt)
{
int cs;
for_each_chip_select(cs, 0, pvt) {
int reg0 = DCSB0 + (cs * 4);
int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0);
if (pvt->fam == 0xf)
continue;
if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
cs, *base1, (pvt->fam == 0x10) ? reg1
: reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
int reg0 = DCSM0 + (cs * 4);
int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0);
if (pvt->fam == 0xf)
continue;
if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
cs, *mask1, (pvt->fam == 0x10) ? reg1
: reg0);
}
}
static void umc_determine_memory_type(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i;
for_each_umc(i) {
umc = &pvt->umc[i];
if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
umc->dram_type = MEM_EMPTY;
continue;
}
/*
* Check if the system supports the "DDR Type" field in UMC Config
* and has DDR5 DIMMs in use.
*/
if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR5;
else if (umc->dimm_cfg & BIT(4))
umc->dram_type = MEM_RDDR5;
else
umc->dram_type = MEM_DDR5;
} else {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR4;
else if (umc->dimm_cfg & BIT(4))
umc->dram_type = MEM_RDDR4;
else
umc->dram_type = MEM_DDR4;
}
edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
}
}
static void dct_determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
goto ddr3;
pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
return;
case 0x10:
if (pvt->dchr0 & DDR3_MODE)
goto ddr3;
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
return;
case 0x15:
if (pvt->model < 0x60)
goto ddr3;
/*
* Model 0x60h needs special handling:
*
* We use a Chip Select value of '0' to obtain dcsm.
* Theoretically, it is possible to populate LRDIMMs of different
* 'Rank' value on a DCT. But this is not the common case. So,
* it's reasonable to assume all DIMMs are going to be of same
* 'type' until proven otherwise.
*/
amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
dcsm = pvt->csels[0].csmasks[0];
if (((dram_ctrl >> 8) & 0x7) == 0x2)
pvt->dram_type = MEM_DDR4;
else if (pvt->dclr0 & BIT(16))
pvt->dram_type = MEM_DDR3;
else if (dcsm & 0x3)
pvt->dram_type = MEM_LRDDR3;
else
pvt->dram_type = MEM_RDDR3;
return;
case 0x16:
goto ddr3;
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
}
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
return;
ddr3:
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
}
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
u16 mce_nid = topology_die_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
u64 addr;
mci = edac_mc_find(mce_nid);
if (!mci)
return 0;
pvt = mci->pvt_info;
if (pvt->fam == 0xf) {
start_bit = 3;
end_bit = 39;
}
addr = m->addr & GENMASK_ULL(end_bit, start_bit);
/*
* Erratum 637 workaround
*/
if (pvt->fam == 0x15) {
u64 cc6_base, tmp_addr;
u32 tmp;
u8 intlv_en;
if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
return addr;
amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
intlv_en = tmp >> 21 & 0x7;
/* add [47:27] + 3 trailing bits */
cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
/* reverse and add DramIntlvEn */
cc6_base |= intlv_en ^ 0x7;
/* pin at [47:24] */
cc6_base <<= 24;
if (!intlv_en)
return cc6_base | (addr & GENMASK_ULL(23, 0));
amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
/* faster log2 */
tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
/* OR DramIntlvSel into bits [14:12] */
tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
/* add remaining [11:0] bits from original MC4_ADDR */
tmp_addr |= addr & GENMASK_ULL(11, 0);
return cc6_base | tmp_addr;
}
return addr;
}
static struct pci_dev *pci_get_related_function(unsigned int vendor,
unsigned int device,
struct pci_dev *related)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(vendor, device, dev))) {
if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
(dev->bus->number == related->bus->number) &&
(PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
break;
}
return dev;
}
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
struct amd_northbridge *nb;
struct pci_dev *f1 = NULL;
unsigned int pci_func;
int off = range << 3;
u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
if (pvt->fam == 0xf)
return;
if (!dram_rw(pvt, range))
return;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
/* F15h: factor in CC6 save area by reading dst node's limit reg */
if (pvt->fam != 0x15)
return;
nb = node_to_amd_nb(dram_dst_node(pvt, range));
if (WARN_ON(!nb))
return;
if (pvt->model == 0x60)
pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
else if (pvt->model == 0x30)
pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
else
pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
if (WARN_ON(!f1))
return;
amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
/* {[39:27],111b} */
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
/* [47:40] */
pvt->ranges[range].lim.hi |= llim >> 13;
pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *err)
{
struct amd64_pvt *pvt = mci->pvt_info;
error_address_to_page_and_offset(sys_addr, err);
/*
* Find out which node the error address belongs to. This may be
* different from the node that detected the error.
*/
err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
if (!err->src_mci) {
amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
(unsigned long)sys_addr);
err->err_code = ERR_NODE;
return;
}
/* Now map the sys_addr to a CSROW */
err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
if (err->csrow < 0) {
err->err_code = ERR_CSROW;
return;
}
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
if (err->channel < 0) {
/*
* Syndrome didn't map, so we don't know which of the
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
"possible error reporting race\n",
err->syndrome);
err->err_code = ERR_CHANNEL;
return;
}
} else {
/*
* non-chipkill ecc mode
*
* The k8 documentation is unclear about how to determine the
* channel number when using non-chipkill memory. This method
* was obtained from email communication with someone at AMD.
* (Wish the email was placed in this comment - norsk)
*/
err->channel = ((sys_addr & BIT(3)) != 0);
}
}
static int ddr2_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
if (i <= 2)
shift = i;
else if (!(i & 0x1))
shift = i >> 1;
else
shift = (i + 1) >> 1;
return 128 << (shift + !!dct_width);
}
static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr)
{
u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
if (pvt->ext_model >= K8_REV_F) {
WARN_ON(cs_mode > 11);
return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
else if (pvt->ext_model >= K8_REV_D) {
unsigned diff;
WARN_ON(cs_mode > 10);
/*
* the below calculation, besides trying to win an obfuscated C
* contest, maps cs_mode values to DIMM chip select sizes. The
* mappings are:
*
* cs_mode CS size (mb)
* ======= ============
* 0 32
* 1 64
* 2 128
* 3 128
* 4 256
* 5 512
* 6 256
* 7 512
* 8 1024
* 9 1024
* 10 2048
*
* Basically, it calculates a value with which to shift the
* smallest CS size of 32MB.
*
* ddr[23]_cs_size have a similar purpose.
*/
diff = cs_mode/3 + (unsigned)(cs_mode > 5);
return 32 << (cs_mode - diff);
}
else {
WARN_ON(cs_mode > 6);
return 32 << cs_mode;
}
}
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
int cs_size = 0;
if (i == 0 || i == 3 || i == 4)
cs_size = -1;
else if (i <= 2)
shift = i;
else if (i == 12)
shift = 7;
else if (!(i & 0x1))
shift = i >> 1;
else
shift = (i + 1) >> 1;
if (cs_size != -1)
cs_size = (128 * (1 << !!dct_width)) << shift;
return cs_size;
}
static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
{
unsigned shift = 0;
int cs_size = 0;
if (i < 4 || i == 6)
cs_size = -1;
else if (i == 12)
shift = 7;
else if (!(i & 0x1))
shift = i >> 1;
else
shift = (i + 1) >> 1;
if (cs_size != -1)
cs_size = rank_multiply * (128 << shift);
return cs_size;
}
static int ddr4_cs_size(unsigned i)
{
int cs_size = 0;
if (i == 0)
cs_size = -1;
else if (i == 1)
cs_size = 1024;
else
/* Min cs_size = 1G */
cs_size = 1024 * (1 << (i >> 1));
return cs_size;
}
static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr)
{
u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
WARN_ON(cs_mode > 11);
if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
else
return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
* F15h supports only 64bit DCT interfaces
*/
static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr)
{
WARN_ON(cs_mode > 12);
return ddr3_cs_size(cs_mode, false);
}
/* F15h M60h supports DDR4 mapping as well.. */
static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr)
{
int cs_size;
u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
WARN_ON(cs_mode > 12);
if (pvt->dram_type == MEM_DDR4) {
if (cs_mode > 9)
return -1;
cs_size = ddr4_cs_size(cs_mode);
} else if (pvt->dram_type == MEM_LRDDR3) {
unsigned rank_multiply = dcsm & 0xf;
if (rank_multiply == 3)
rank_multiply = 4;
cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
} else {
/* Minimum cs size is 512mb for F15hM60h*/
if (cs_mode == 0x1)
return -1;
cs_size = ddr3_cs_size(cs_mode, false);
}
return cs_size;
}
/*
* F16h and F15h model 30h have only limited cs_modes.
*/
static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr)
{
WARN_ON(cs_mode > 12);
if (cs_mode == 6 || cs_mode == 8 ||
cs_mode == 9 || cs_mode == 12)
return -1;
else
return ddr3_cs_size(cs_mode, false);
}
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
if (pvt->fam == 0xf)
return;
if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
edac_dbg(0, " DCTs operate in %s mode\n",
(dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
* Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
* 2.10.12 Memory Interleaving Modes).
*/
static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
u8 intlv_en, int num_dcts_intlv,
u32 dct_sel)
{
u8 channel = 0;
u8 select;
if (!(intlv_en))
return (u8)(dct_sel);
if (num_dcts_intlv == 2) {
select = (sys_addr >> 8) & 0x3;
channel = select ? 0x3 : 0;
} else if (num_dcts_intlv == 4) {
u8 intlv_addr = dct_sel_interleave_addr(pvt);
switch (intlv_addr) {
case 0x4:
channel = (sys_addr >> 8) & 0x3;
break;
case 0x5:
channel = (sys_addr >> 9) & 0x3;
break;
}
}
return channel;
}
/*
* Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
bool hi_range_sel, u8 intlv_en)
{
u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
return 0;
if (hi_range_sel)
return dct_sel_high;
/*
* see F2x110[DctSelIntLvAddr] - channel interleave mode
*/
if (dct_interleave_enabled(pvt)) {
u8 intlv_addr = dct_sel_interleave_addr(pvt);
/* return DCT select function: 0=DCT0, 1=DCT1 */
if (!intlv_addr)
return sys_addr >> 6 & 1;
if (intlv_addr & 0x2) {
u8 shift = intlv_addr & 0x1 ? 9 : 6;
u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
return ((sys_addr >> shift) & 1) ^ temp;
}
if (intlv_addr & 0x4) {
u8 shift = intlv_addr & 0x1 ? 9 : 8;
return (sys_addr >> shift) & 1;
}
return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
}
if (dct_high_range_enabled(pvt))
return ~dct_sel_high & 1;
return 0;
}
/* Convert the sys_addr to the normalized DCT address */
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
* if
* base address of high range is below 4Gb
* (bits [47:27] at [31:11])
* DRAM address space on this DCT is hoisted above 4Gb &&
* sys_addr > 4Gb
*
* remove hole offset from sys_addr
* else
* remove high range offset from sys_addr
*/
if ((!(dct_sel_base_addr >> 16) ||
dct_sel_base_addr < dhar_base(pvt)) &&
dhar_valid(pvt) &&
(sys_addr >= BIT_64(32)))
chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
/*
* if
* we have a valid hole &&
* sys_addr > 4Gb
*
* remove hole
* else
* remove dram base to normalize to DCT address
*/
if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
chan_off = hole_off;
else
chan_off = dram_base;
}
return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
}
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
{
int tmp_cs;
if (online_spare_swap_done(pvt, dct) &&
csrow == online_spare_bad_dramcs(pvt, dct)) {
for_each_chip_select(tmp_cs, dct, pvt) {
if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
csrow = tmp_cs;
break;
}
}
}
return csrow;
}
/*
* Iterate over the DRAM DCT "base" and "mask" registers looking for a
* SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
*
* Return:
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
mci = edac_mc_find(nid);
if (!mci)
return cs_found;
pvt = mci->pvt_info;
edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
for_each_chip_select(csrow, dct, pvt) {
if (!csrow_enabled(csrow, dct, pvt))
continue;
get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
csrow, cs_base, cs_mask);
cs_mask = ~cs_mask;
edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
(in_addr & cs_mask), (cs_base & cs_mask));
if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
if (pvt->fam == 0x15 && pvt->model >= 0x30) {
cs_found = csrow;
break;
}
cs_found = f10_process_possible_spare(pvt, dct, csrow);
edac_dbg(1, " MATCH csrow=%d\n", cs_found);
break;
}
}
return cs_found;
}
/*
* See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
* swapped with a region located at the bottom of memory so that the GPU can use
* the interleaved region and thus two channels.
*/
static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
if (pvt->fam == 0x10) {
/* only revC3 and revE have that feature */
if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
return sys_addr;
}
amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
if (!(swap_reg & 0x1))
return sys_addr;
swap_base = (swap_reg >> 3) & 0x7f;
swap_limit = (swap_reg >> 11) & 0x7f;
rgn_size = (swap_reg >> 20) & 0x7f;
tmp_addr = sys_addr >> 27;
if (!(sys_addr >> 34) &&
(((tmp_addr >= swap_base) &&
(tmp_addr <= swap_limit)) ||
(tmp_addr < rgn_size)))
return sys_addr ^ (u64)swap_base << 27;
return sys_addr;
}
/* For a given @dram_range, check if @sys_addr falls within it. */
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
u64 sys_addr, int *chan_sel)
{
int cs_found = -EINVAL;
u64 chan_addr;
u32 dct_sel_base;
u8 channel;
bool high_range = false;
u8 node_id = dram_dst_node(pvt, range);
u8 intlv_en = dram_intlv_en(pvt, range);
u32 intlv_sel = dram_intlv_sel(pvt, range);
edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
range, sys_addr, get_dram_limit(pvt, range));
if (dhar_valid(pvt) &&
dhar_base(pvt) <= sys_addr &&
sys_addr < BIT_64(32)) {
amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
sys_addr);
return -EINVAL;
}
if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
return -EINVAL;
sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
/*
* check whether addresses >= DctSelBaseAddr[47:27] are to be used to
* select between DCT0 and DCT1.
*/
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
high_range = true;
channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
high_range, dct_sel_base);
/* Remove node interleaving, see F1x120 */
if (intlv_en)
chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
(chan_addr & 0xfff);
/* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
if (dct_sel_interleave_addr(pvt) != 1) {
if (dct_sel_interleave_addr(pvt) == 0x3)
/* hash 9 */
chan_addr = ((chan_addr >> 10) << 9) |
(chan_addr & 0x1ff);
else
/* A[6] or hash 6 */
chan_addr = ((chan_addr >> 7) << 6) |
(chan_addr & 0x3f);
} else
/* A[12] */
chan_addr = ((chan_addr >> 13) << 12) |
(chan_addr & 0xfff);
}
edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0)
*chan_sel = channel;
return cs_found;
}
static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
u64 sys_addr, int *chan_sel)
{
int cs_found = -EINVAL;
int num_dcts_intlv = 0;
u64 chan_addr, chan_offset;
u64 dct_base, dct_limit;
u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
u64 dhar_offset = f10_dhar_offset(pvt);
u8 intlv_addr = dct_sel_interleave_addr(pvt);
u8 node_id = dram_dst_node(pvt, range);
u8 intlv_en = dram_intlv_en(pvt, range);
amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
range, sys_addr, get_dram_limit(pvt, range));
if (!(get_dram_base(pvt, range) <= sys_addr) &&
!(get_dram_limit(pvt, range) >= sys_addr))
return -EINVAL;
if (dhar_valid(pvt) &&
dhar_base(pvt) <= sys_addr &&
sys_addr < BIT_64(32)) {
amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
sys_addr);
return -EINVAL;
}
/* Verify sys_addr is within DCT Range. */
dct_base = (u64) dct_sel_baseaddr(pvt);
dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
if (!(dct_cont_base_reg & BIT(0)) &&
!(dct_base <= (sys_addr >> 27) &&
dct_limit >= (sys_addr >> 27)))
return -EINVAL;
/* Verify number of dct's that participate in channel interleaving. */
num_dcts_intlv = (int) hweight8(intlv_en);
if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
return -EINVAL;
if (pvt->model >= 0x60)
channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
else
channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
num_dcts_intlv, dct_sel);
/* Verify we stay within the MAX number of channels allowed */
if (channel > 3)
return -EINVAL;
leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
/* Get normalized DCT addr */
if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
chan_offset = dhar_offset;
else
chan_offset = dct_base << 27;
chan_addr = sys_addr - chan_offset;
/* remove channel interleave */
if (num_dcts_intlv == 2) {
if (intlv_addr == 0x4)
chan_addr = ((chan_addr >> 9) << 8) |
(chan_addr & 0xff);
else if (intlv_addr == 0x5)
chan_addr = ((chan_addr >> 10) << 9) |
(chan_addr & 0x1ff);
else
return -EINVAL;
} else if (num_dcts_intlv == 4) {
if (intlv_addr == 0x4)
chan_addr = ((chan_addr >> 10) << 8) |
(chan_addr & 0xff);
else if (intlv_addr == 0x5)
chan_addr = ((chan_addr >> 11) << 9) |
(chan_addr & 0x1ff);
else
return -EINVAL;
}
if (dct_offset_en) {
amd64_read_pci_cfg(pvt->F1,
DRAM_CONT_HIGH_OFF + (int) channel * 4,
&tmp);
chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
}
f15h_select_dct(pvt, channel);
edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
/*
* Find Chip select:
* if channel = 3, then alias it to 1. This is because, in F15 M30h,
* there is support for 4 DCT's, but only 2 are currently functional.
* They are DCT0 and DCT3. But we have read all registers of DCT3 into
* pvt->csels[1]. So we need to use '1' here to get correct info.
* Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
*/
alias_channel = (channel == 3) ? 1 : channel;
cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
if (cs_found >= 0)
*chan_sel = alias_channel;
return cs_found;
}
static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
u64 sys_addr,
int *chan_sel)
{
int cs_found = -EINVAL;
unsigned range;
for (range = 0; range < DRAM_RANGES; range++) {
if (!dram_rw(pvt, range))
continue;
if (pvt->fam == 0x15 && pvt->model >= 0x30)
cs_found = f15_m30h_match_to_this_node(pvt, range,
sys_addr,
chan_sel);
else if ((get_dram_base(pvt, range) <= sys_addr) &&
(get_dram_limit(pvt, range) >= sys_addr)) {
cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, chan_sel);
if (cs_found >= 0)
break;
}
}
return cs_found;
}
/*
* For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
* a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
*
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *err)
{
struct amd64_pvt *pvt = mci->pvt_info;
error_address_to_page_and_offset(sys_addr, err);
err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
if (err->csrow < 0) {
err->err_code = ERR_CSROW;
return;
}
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
if (dct_ganging_enabled(pvt))
err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
}
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
* uses those to find the symbol in error and thus the DIMM.
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
0x1013, 0x3032, 0x4044, 0x8088,
0x106b, 0x30d6, 0x70fc, 0xe0a8,
0x4857, 0xc4fe, 0x13cc, 0x3288,
0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
0x1f39, 0x251e, 0xbd6c, 0x6bd8,
0x15c1, 0x2a42, 0x89ac, 0x4758,
0x2b03, 0x1602, 0x4f0c, 0xca08,
0x1f07, 0x3a0e, 0x6b04, 0xbd08,
0x8ba7, 0x465e, 0x244c, 0x1cc8,
0x2b87, 0x164e, 0x642c, 0xdc18,
0x40b9, 0x80de, 0x1094, 0x20e8,
0x27db, 0x1eb6, 0x9dac, 0x7b58,
0x11c1, 0x2242, 0x84ac, 0x4c58,
0x1be5, 0x2d7a, 0x5e34, 0xa718,
0x4b39, 0x8d1e, 0x14b4, 0x28d8,
0x4c97, 0xc87e, 0x11fc, 0x33a8,
0x8e97, 0x497e, 0x2ffc, 0x1aa8,
0x16b3, 0x3d62, 0x4f34, 0x8518,
0x1e2f, 0x391a, 0x5cac, 0xf858,
0x1d9f, 0x3b7a, 0x572c, 0xfe18,
0x15f5, 0x2a5a, 0x5264, 0xa3b8,
0x1dbb, 0x3b66, 0x715c, 0xe3f8,
0x4397, 0xc27e, 0x17fc, 0x3ea8,
0x1617, 0x3d3e, 0x6464, 0xb8b8,
0x23ff, 0x12aa, 0xab6c, 0x56d8,
0x2dfb, 0x1ba6, 0x913c, 0x7328,
0x185d, 0x2ca6, 0x7914, 0x9e28,
0x171b, 0x3e36, 0x7d7c, 0xebe8,
0x4199, 0x82ee, 0x19f4, 0x2e58,
0x4807, 0xc40e, 0x130c, 0x3208,
0x1905, 0x2e0a, 0x5804, 0xac08,
0x213f, 0x132a, 0xadfc, 0x5ba8,
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
u16 s = syndrome;
unsigned v_idx = err_sym * v_dim;
unsigned v_end = (err_sym + 1) * v_dim;
/* walk over all 16 bits of the syndrome */
for (i = 1; i < (1U << 16); i <<= 1) {
/* if bit is set in that eigenvector... */
if (v_idx < v_end && vectors[v_idx] & i) {
u16 ev_comp = vectors[v_idx++];
/* ... and bit set in the modified syndrome, */
if (s & i) {
/* remove it. */
s ^= ev_comp;
if (!s)
return err_sym;
}
} else if (s & i)
/* can't get to zero, move to next symbol */
break;
}
}
edac_dbg(0, "syndrome(%x) not found\n", syndrome);
return -1;
}
static int map_err_sym_to_channel(int err_sym, int sym_size)
{
if (sym_size == 4)
switch (err_sym) {
case 0x20:
case 0x21:
return 0;
case 0x22:
case 0x23:
return 1;
default:
return err_sym >> 4;
}
/* x8 symbols */
else
switch (err_sym) {
/* imaginary bits not in a DIMM */
case 0x10:
WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
err_sym);
return -1;
case 0x11:
return 0;
case 0x12:
return 1;
default:
return err_sym >> 3;
}
return -1;
}
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
pvt->ecc_sym_sz);
else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
pvt->ecc_sym_sz);
else {
amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
u8 ecc_type)
{
enum hw_event_mc_err_type err_type;
const char *string;
if (ecc_type == 2)
err_type = HW_EVENT_ERR_CORRECTED;
else if (ecc_type == 1)
err_type = HW_EVENT_ERR_UNCORRECTED;
else if (ecc_type == 3)
err_type = HW_EVENT_ERR_DEFERRED;
else {
WARN(1, "Something is rotten in the state of Denmark.\n");
return;
}
switch (err->err_code) {
case DECODE_OK:
string = "";
break;
case ERR_NODE:
string = "Failed to map error addr to a node";
break;
case ERR_CSROW:
string = "Failed to map error addr to a csrow";
break;
case ERR_CHANNEL:
string = "Unknown syndrome - possible error reporting race";
break;
case ERR_SYND:
string = "MCA_SYND not valid - unknown syndrome and csrow";
break;
case ERR_NORM_ADDR:
string = "Cannot decode normalized address";
break;
default:
string = "WTF error";
break;
}
edac_mc_handle_error(err_type, mci, 1,
err->page, err->offset, err->syndrome,
err->csrow, err->channel, -1,
string, "");
}
static inline void decode_bus_error(int node_id, struct mce *m)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
u8 ecc_type = (m->status >> 45) & 0x3;
u8 xec = XEC(m->status, 0x1f);
u16 ec = EC(m->status);
u64 sys_addr;
struct err_info err;
mci = edac_mc_find(node_id);
if (!mci)
return;
pvt = mci->pvt_info;
/* Bail out early if this was an 'observed' error */
if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
if (xec && xec != F10_NBSL_EXT_ERR_ECC)
return;
memset(&err, 0, sizeof(err));
sys_addr = get_error_address(pvt, m);
if (ecc_type == 2)
err.syndrome = extract_syndrome(m->status);
pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
__log_ecc_error(mci, &err, ecc_type);
}
/*
* To find the UMC channel represented by this bank we need to match on its
* instance_id. The instance_id of a bank is held in the lower 32 bits of its
* IPID.
*
* Currently, we can derive the channel number by looking at the 6th nibble in
* the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
* number.
*
* For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
* the MCA_SYND[ErrorInformation] field.
*/
static void umc_get_err_info(struct mce *m, struct err_info *err)
{
err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
err->csrow = m->synd & 0x7;
}
static void decode_umc_error(int node_id, struct mce *m)
{
u8 ecc_type = (m->status >> 45) & 0x3;
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
struct err_info err;
u64 sys_addr;
node_id = fixup_node_id(node_id, m);
mci = edac_mc_find(node_id);
if (!mci)
return;
pvt = mci->pvt_info;
memset(&err, 0, sizeof(err));
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
}
if (ecc_type == 2) {
u8 length = (m->synd >> 18) & 0x3f;
if (length)
err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
else
err.err_code = ERR_CHANNEL;
}
pvt->ops->get_err_info(m, &err);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
error_address_to_page_and_offset(sys_addr, &err);
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
*/
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
return -ENODEV;
}
/* Reserve the DCT Device */
pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
if (!pvt->F2) {
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
if (!pci_ctl_dev)
pci_ctl_dev = &pvt->F2->dev;
edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
return 0;
}
static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{
pvt->ecc_sym_sz = 4;
if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
/* F16h has only DCT0, so no need to read dbam1. */
if (pvt->fam != 0x16)
amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
/* F10h, revD and later can do x8 ECC too. */
if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
pvt->ecc_sym_sz = 8;
}
}
/*
* Retrieve the hardware registers of the memory controller.
*/
static void umc_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
u32 i, umc_base;
/* Read registers from each UMC */
for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
}
}
/*
* Retrieve the hardware registers of the memory controller (this includes the
* 'Address Map' and 'Misc' device regs)
*/
static void dct_read_mc_regs(struct amd64_pvt *pvt)
{
unsigned int range;
u64 msr_val;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
rdmsrl(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
}
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
read_dram_ctl_register(pvt);
for (range = 0; range < DRAM_RANGES; range++) {
u8 rw;
/* read settings for this DRAM range */
read_dram_base_limit_regs(pvt, range);
rw = dram_rw(pvt, range);
if (!rw)
continue;
edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
range,
get_dram_base(pvt, range),
get_dram_limit(pvt, range));
edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
(rw & 0x1) ? "R" : "-",
(rw & 0x2) ? "W" : "-",
dram_intlv_sel(pvt, range),
dram_dst_node(pvt, range));
}
amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
if (!dct_ganging_enabled(pvt)) {
amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
determine_ecc_sym_sz(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
* @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
* DCL register where dual_channel_active is
*
* The DBAM register consists of 4 sets of 4 bits each definitions:
*
* Bits: CSROWs
* 0-3 CSROWs 0 and 1
* 4-7 CSROWs 2 and 3
* 8-11 CSROWs 4 and 5
* 12-15 CSROWs 6 and 7
*
* Values range from: 0 to 15
* The meaning of the values depends on CPU revision and dual-channel state,
* see relevant BKDG more info.
*
* The memory controller provides for total of only 8 CSROWs in its current
* architecture. Each "pair" of CSROWs normally represents just one DIMM in
* single channel or two (2) DIMMs in dual channel mode.
*
* The following code logic collapses the various tables for CSROW based on CPU
* revision.
*
* Returns:
* The number of PAGE_SIZE pages on the specified CSROW number it
* encompasses
*
*/
static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
u32 cs_mode, nr_pages;
csrow_nr >>= 1;
cs_mode = DBAM_DIMM(csrow_nr, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
csrow_nr, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
{
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
csrow_nr_orig, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
static void umc_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
enum dev_type dev_type = DEV_UNKNOWN;
struct dimm_info *dimm;
u8 umc, cs;
if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
edac_mode = EDAC_S16ECD16ED;
dev_type = DEV_X16;
} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
edac_mode = EDAC_S8ECD8ED;
dev_type = DEV_X8;
} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
edac_mode = EDAC_S4ECD4ED;
dev_type = DEV_X4;
} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
edac_mode = EDAC_SECDED;
}
for_each_umc(umc) {
for_each_chip_select(cs, umc, pvt) {
if (!csrow_enabled(cs, umc, pvt))
continue;
dimm = mci->csrows[cs]->channels[umc]->dimm;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, cs);
dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
}
}
}
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
*/
static void dct_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
int nr_pages = 0;
int i, j;
u32 val;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
!!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
*/
for_each_chip_select(i, 0, pvt) {
bool row_dct0 = !!csrow_enabled(i, 0, pvt);
bool row_dct1 = false;
if (pvt->fam != 0xf)
row_dct1 = !!csrow_enabled(i, 1, pvt);
if (!row_dct0 && !row_dct1)
continue;
csrow = mci->csrows[i];
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, i);
if (row_dct0) {
nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
}
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
}
for (j = 0; j < pvt->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
dimm->grain = 64;
}
}
}
/* get all cores on this DCT */
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
for_each_online_cpu(cpu)
if (topology_die_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
/* check MCG_CTL on all the cpus on this node */
static bool nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
amd64_warn("%s: Error allocating mask\n", __func__);
return false;
}
get_cpus_on_this_dct_cpumask(mask, nid);
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
}
ret = true;
out:
free_cpumask_var(mask);
return ret;
}
static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_warn("%s: error allocating mask\n", __func__);
return -ENOMEM;
}
get_cpus_on_this_dct_cpumask(cmask, nid);
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
free_cpumask_var(cmask);
return 0;
}
static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
amd64_read_pci_cfg(F3, NBCTL, &value);
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
amd64_write_pci_cfg(F3, NBCTL, value);
amd64_read_pci_cfg(F3, NBCFG, &value);
edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
nid, value, !!(value & NBCFG_ECC_ENABLE));
if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
value |= NBCFG_ECC_ENABLE;
amd64_write_pci_cfg(F3, NBCFG, value);
amd64_read_pci_cfg(F3, NBCFG, &value);
if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
} else {
amd64_info("Hardware accepted DRAM ECC Enable\n");
}
} else {
s->flags.nb_ecc_prev = 1;
}
edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
if (!s->nbctl_valid)
return;
amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
amd64_read_pci_cfg(F3, NBCFG, &value);
value &= ~NBCFG_ECC_ENABLE;
amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
if (toggle_ecc_err_reporting(s, nid, OFF))
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
static bool dct_ecc_enabled(struct amd64_pvt *pvt)
{
u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
u8 ecc_en = 0;
u32 value;
amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
ecc_en = !!(value & NBCFG_ECC_ENABLE);
nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en)
return false;
else
return true;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
{
u8 umc_en_mask = 0, ecc_en_mask = 0;
u16 nid = pvt->mc_node_id;
struct amd64_umc *umc;
u8 ecc_en = 0, i;
for_each_umc(i) {
umc = &pvt->umc[i];
/* Only check enabled UMCs. */
if (!(umc->sdp_ctrl & UMC_SDP_INIT))
continue;
umc_en_mask |= BIT(i);
if (umc->umc_cap_hi & UMC_ECC_ENABLED)
ecc_en_mask |= BIT(i);
}
/* Check whether at least one UMC is enabled: */
if (umc_en_mask)
ecc_en = umc_en_mask == ecc_en_mask;
else
edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en)
return false;
else
return true;
}
static inline void
umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
/* Set chipkill only if ECC is enabled: */
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
if (!cpk_en)
return;
if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
else if (dev_x16)
mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
else
mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}
static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = dct_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
dct_init_csrows(mci);
}
static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
umc_determine_edac_ctl_cap(mci, pvt);
mci->edac_cap = umc_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
umc_init_csrows(mci);
}
static int dct_hw_info_get(struct amd64_pvt *pvt)
{
int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
if (ret)
return ret;
dct_prep_chip_selects(pvt);
dct_read_base_mask(pvt);
dct_read_mc_regs(pvt);
dct_determine_memory_type(pvt);
return 0;
}
static int umc_hw_info_get(struct amd64_pvt *pvt)
{
pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
umc_prep_chip_selects(pvt);
umc_read_base_mask(pvt);
umc_read_mc_regs(pvt);
umc_determine_memory_type(pvt);
return 0;
}
/*
* The CPUs have one channel per UMC, so UMC number is equivalent to a
* channel number. The GPUs have 8 channels per UMC, so the UMC number no
* longer works as a channel number.
*
* The channel number within a GPU UMC is given in MCA_IPID[15:12].
* However, the IDs are split such that two UMC values go to one UMC, and
* the channel numbers are split in two groups of four.
*
* Refer to comment on gpu_get_umc_base().
*
* For example,
* UMC0 CH[3:0] = 0x0005[3:0]000
* UMC0 CH[7:4] = 0x0015[3:0]000
* UMC1 CH[3:0] = 0x0025[3:0]000
* UMC1 CH[7:4] = 0x0035[3:0]000
*/
static void gpu_get_err_info(struct mce *m, struct err_info *err)
{
u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
u8 phy = ((m->ipid >> 12) & 0xf);
err->channel = ch % 2 ? phy + 4 : phy;
err->csrow = phy;
}
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int size, cs_mode, cs = 0;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
for_each_chip_select(cs, ctrl, pvt) {
size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
}
}
static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i;
for_each_umc(i) {
umc = &pvt->umc[i];
edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
gpu_debug_display_dimm_sizes(pvt, i);
}
}
static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 nr_pages;
int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
static void gpu_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
struct dimm_info *dimm;
u8 umc, cs;
for_each_umc(umc) {
for_each_chip_select(cs, umc, pvt) {
if (!csrow_enabled(cs, umc, pvt))
continue;
dimm = mci->csrows[umc]->channels[cs]->dimm;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, cs);
dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
dimm->edac_mode = EDAC_SECDED;
dimm->mtype = MEM_HBM2;
dimm->dtype = DEV_X16;
dimm->grain = 64;
}
}
}
static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_HBM2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_EC;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
gpu_init_csrows(mci);
}
/* ECC is enabled by default on GPU nodes */
static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
{
return true;
}
static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
{
/*
* On CPUs, there is one channel per UMC, so UMC numbering equals
* channel numbering. On GPUs, there are eight channels per UMC,
* so the channel numbering is different from UMC numbering.
*
* On CPU nodes channels are selected in 6th nibble
* UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
*
* On GPU nodes channels are selected in 3rd nibble
* HBM chX[3:0]= [Y ]5X[3:0]000;
* HBM chX[7:4]= [Y+1]5X[3:0]000
*/
umc *= 2;
if (channel >= 4)
umc++;
return 0x50000 + (umc << 20) + ((channel % 4) << 12);
}
static void gpu_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
u32 i, umc_base;
/* Read registers from each UMC */
for_each_umc(i) {
umc_base = gpu_get_umc_base(i, 0);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
}
}
static void gpu_read_base_mask(struct amd64_pvt *pvt)
{
u32 base_reg, mask_reg;
u32 *base, *mask;
int umc, cs;
for_each_umc(umc) {
for_each_chip_select(cs, umc, pvt) {
base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
base = &pvt->csels[umc].csbases[cs];
if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base, base_reg);
}
mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
mask = &pvt->csels[umc].csmasks[cs];
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask, mask_reg);
}
}
}
}
static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
{
int umc;
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 8;
pvt->csels[umc].m_cnt = 8;
}
}
static int gpu_hw_info_get(struct amd64_pvt *pvt)
{
int ret;
ret = gpu_get_node_map();
if (ret)
return ret;
pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
gpu_prep_chip_selects(pvt);
gpu_read_base_mask(pvt);
gpu_read_mc_regs(pvt);
return 0;
}
static void hw_info_put(struct amd64_pvt *pvt)
{
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
kfree(pvt->umc);
}
static struct low_ops umc_ops = {
.hw_info_get = umc_hw_info_get,
.ecc_enabled = umc_ecc_enabled,
.setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
.dump_misc_regs = umc_dump_misc_regs,
.get_err_info = umc_get_err_info,
};
static struct low_ops gpu_ops = {
.hw_info_get = gpu_hw_info_get,
.ecc_enabled = gpu_ecc_enabled,
.setup_mci_misc_attrs = gpu_setup_mci_misc_attrs,
.dump_misc_regs = gpu_dump_misc_regs,
.get_err_info = gpu_get_err_info,
};
/* Use Family 16h versions for defaults and adjust as needed below. */
static struct low_ops dct_ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
.hw_info_get = dct_hw_info_get,
.ecc_enabled = dct_ecc_enabled,
.setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
.dump_misc_regs = dct_dump_misc_regs,
};
static int per_family_init(struct amd64_pvt *pvt)
{
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
pvt->max_mcs = 2;
/*
* Decide on which ops group to use here and do any family/model
* overrides below.
*/
if (pvt->fam >= 0x17)
pvt->ops = &umc_ops;
else
pvt->ops = &dct_ops;
switch (pvt->fam) {
case 0xf:
pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
"K8 revF or later" : "K8 revE or earlier";
pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
break;
case 0x10:
pvt->ctl_name = "F10h";
pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
break;
case 0x15:
switch (pvt->model) {
case 0x30:
pvt->ctl_name = "F15h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
break;
case 0x60:
pvt->ctl_name = "F15h_M60h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
break;
case 0x13:
/* Richland is only client */
return -ENODEV;
default:
pvt->ctl_name = "F15h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
break;
}
break;
case 0x16:
switch (pvt->model) {
case 0x30:
pvt->ctl_name = "F16h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
break;
default:
pvt->ctl_name = "F16h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
}
break;
case 0x17:
switch (pvt->model) {
case 0x10 ... 0x2f:
pvt->ctl_name = "F17h_M10h";
break;
case 0x30 ... 0x3f:
pvt->ctl_name = "F17h_M30h";
pvt->max_mcs = 8;
break;
case 0x60 ... 0x6f:
pvt->ctl_name = "F17h_M60h";
break;
case 0x70 ... 0x7f:
pvt->ctl_name = "F17h_M70h";
break;
default:
pvt->ctl_name = "F17h";
break;
}
break;
case 0x18:
pvt->ctl_name = "F18h";
break;
case 0x19:
switch (pvt->model) {
case 0x00 ... 0x0f:
pvt->ctl_name = "F19h";
pvt->max_mcs = 8;
break;
case 0x10 ... 0x1f:
pvt->ctl_name = "F19h_M10h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x20 ... 0x2f:
pvt->ctl_name = "F19h_M20h";
break;
case 0x30 ... 0x3f:
if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
pvt->ctl_name = "MI200";
pvt->max_mcs = 4;
pvt->ops = &gpu_ops;
} else {
pvt->ctl_name = "F19h_M30h";
pvt->max_mcs = 8;
}
break;
case 0x50 ... 0x5f:
pvt->ctl_name = "F19h_M50h";
break;
case 0x60 ... 0x6f:
pvt->ctl_name = "F19h_M60h";
pvt->flags.zn_regs_v2 = 1;
break;
case 0x70 ... 0x7f:
pvt->ctl_name = "F19h_M70h";
pvt->flags.zn_regs_v2 = 1;
break;
case 0xa0 ... 0xaf:
pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
}
break;
case 0x1A:
switch (pvt->model) {
case 0x00 ... 0x1f:
pvt->ctl_name = "F1Ah";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x40 ... 0x4f:
pvt->ctl_name = "F1Ah_M40h";
pvt->flags.zn_regs_v2 = 1;
break;
}
break;
default:
amd64_err("Unsupported family!\n");
return -ENODEV;
}
return 0;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
&dbg_group,
&inj_group,
#endif
NULL
};
static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int ret = -ENOMEM;
/*
* For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should
* be swapped to fit into the layers.
*/
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
pvt->max_mcs : pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
pvt->csels[0].b_cnt : pvt->max_mcs;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return ret;
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
pvt->ops->setup_mci_misc_attrs(mci);
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
edac_dbg(1, "failed edac_mc_add_mc()\n");
edac_mc_free(mci);
return ret;
}
return 0;
}
static bool instance_has_memory(struct amd64_pvt *pvt)
{
bool cs_enabled = false;
int cs = 0, dct = 0;
for (dct = 0; dct < pvt->max_mcs; dct++) {
for_each_chip_select(cs, dct, pvt)
cs_enabled |= csrow_enabled(cs, dct, pvt);
}
return cs_enabled;
}
static int probe_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct amd64_pvt *pvt = NULL;
struct ecc_settings *s;
int ret;
ret = -ENOMEM;
s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
if (!s)
goto err_out;
ecc_stngs[nid] = s;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
if (!pvt)
goto err_settings;
pvt->mc_node_id = nid;
pvt->F3 = F3;
ret = per_family_init(pvt);
if (ret < 0)
goto err_enable;
ret = pvt->ops->hw_info_get(pvt);
if (ret < 0)
goto err_enable;
ret = 0;
if (!instance_has_memory(pvt)) {
amd64_info("Node %d: No DIMMs detected.\n", nid);
goto err_enable;
}
if (!pvt->ops->ecc_enabled(pvt)) {
ret = -ENODEV;
if (!ecc_enable_override)
goto err_enable;
if (boot_cpu_data.x86 >= 0x17) {
amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
goto err_enable;
} else
amd64_warn("Forcing ECC on!\n");
if (!enable_ecc_error_reporting(s, nid, F3))
goto err_enable;
}
ret = init_one_instance(pvt);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
if (boot_cpu_data.x86 < 0x17)
restore_ecc_error_reporting(s, nid, F3);
goto err_enable;
}
amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
/* Display and decode various registers for debug purposes. */
pvt->ops->dump_misc_regs(pvt);
return ret;
err_enable:
hw_info_put(pvt);
kfree(pvt);
err_settings:
kfree(s);
ecc_stngs[nid] = NULL;
err_out:
return ret;
}
static void remove_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
return;
pvt = mci->pvt_info;
restore_ecc_error_reporting(s, nid, F3);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
/* Free the EDAC CORE resources */
mci->pvt_info = NULL;
hw_info_put(pvt);
kfree(pvt);
edac_mc_free(mci);
}
static void setup_pci_device(void)
{
if (pci_ctl)
return;
pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
if (!pci_ctl) {
pr_warn("%s(): Unable to create PCI control\n", __func__);
pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
}
}
static const struct x86_cpu_id amd64_cpuids[] = {
X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x1A, NULL),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
const char *owner;
int err = -ENODEV;
int i;
if (ghes_get_devices())
return -EBUSY;
owner = edac_get_owner();
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
if (!amd_nb_num())
return -ENODEV;
opstate_init();
err = -ENOMEM;
ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!ecc_stngs)
goto err_free;
msrs = msrs_alloc();
if (!msrs)
goto err_free;
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
/* unwind properly */
while (--i >= 0)
remove_one_instance(i);
goto err_pci;
}
}
if (!edac_has_mcs()) {
err = -ENODEV;
goto err_pci;
}
/* register stuff with EDAC MCE */
if (boot_cpu_data.x86 >= 0x17) {
amd_register_ecc_decoder(decode_umc_error);
} else {
amd_register_ecc_decoder(decode_bus_error);
setup_pci_device();
}
#ifdef CONFIG_X86_32
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif
return 0;
err_pci:
pci_ctl_dev = NULL;
msrs_free(msrs);
msrs = NULL;
err_free:
kfree(ecc_stngs);
ecc_stngs = NULL;
return err;
}
static void __exit amd64_edac_exit(void)
{
int i;
if (pci_ctl)
edac_pci_release_generic_ctl(pci_ctl);
/* unregister from EDAC MCE */
if (boot_cpu_data.x86 >= 0x17)
amd_unregister_ecc_decoder(decode_umc_error);
else
amd_unregister_ecc_decoder(decode_bus_error);
for (i = 0; i < amd_nb_num(); i++)
remove_one_instance(i);
kfree(ecc_stngs);
ecc_stngs = NULL;
pci_ctl_dev = NULL;
msrs_free(msrs);
msrs = NULL;
}
module_init(amd64_edac_init);
module_exit(amd64_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/amd64_edac.c |
/*
* Intel 82975X Memory Controller kernel module
* (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
* (C) 2007 jetzbroadband (http://jetzbroadband.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Arvind R.
* Copied from i82875p_edac.c source:
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "i82975x_edac"
#define i82975x_printk(level, fmt, arg...) \
edac_printk(level, "i82975x", fmt, ##arg)
#define i82975x_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_82975_0
#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
#define I82975X_NR_DIMMS 8
#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
*
* 31:7 128 byte cache-line address
* 6:1 reserved
* 0 0: CH0; 1: CH1
*/
#define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b)
*
* 7:0 DRAM ECC Syndrome
*/
#define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b)
* 0h: Processor Memory Reads
* 1h:7h reserved
* More - See Page 65 of Intel DocSheet.
*/
#define I82975X_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15:12 reserved
* 11 Thermal Sensor Event
* 10 reserved
* 9 non-DRAM lock error (ndlock)
* 8 Refresh Timeout
* 7:2 reserved
* 1 ECC UE (multibit DRAM error)
* 0 ECC CE (singlebit DRAM error)
*/
/* Error Reporting is supported by 3 mechanisms:
1. DMI SERR generation ( ERRCMD )
2. SMI DMI generation ( SMICMD )
3. SCI DMI generation ( SCICMD )
NOTE: Only ONE of the three must be enabled
*/
#define I82975X_ERRCMD 0xca /* Error Command (16b)
*
* 15:12 reserved
* 11 Thermal Sensor Event
* 10 reserved
* 9 non-DRAM lock error (ndlock)
* 8 Refresh Timeout
* 7:2 reserved
* 1 ECC UE (multibit DRAM error)
* 0 ECC CE (singlebit DRAM error)
*/
#define I82975X_SMICMD 0xcc /* Error Command (16b)
*
* 15:2 reserved
* 1 ECC UE (multibit DRAM error)
* 0 ECC CE (singlebit DRAM error)
*/
#define I82975X_SCICMD 0xce /* Error Command (16b)
*
* 15:2 reserved
* 1 ECC UE (multibit DRAM error)
* 0 ECC CE (singlebit DRAM error)
*/
#define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b)
*
* 7:1 reserved
* 0 Bit32 of the Dram Error Address
*/
#define I82975X_MCHBAR 0x44 /*
*
* 31:14 Base Addr of 16K memory-mapped
* configuration space
* 13:1 reserved
* 0 mem-mapped config space enable
*/
/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
/* Intel 82975x memory mapped register space */
#define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */
#define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8)
*
* 7 set to 1 in highest DRB of
* channel if 4GB in ch.
* 6:2 upper boundary of rank in
* 32MB grains
* 1:0 set to 0
*/
#define I82975X_DRB_CH0R0 0x100
#define I82975X_DRB_CH0R1 0x101
#define I82975X_DRB_CH0R2 0x102
#define I82975X_DRB_CH0R3 0x103
#define I82975X_DRB_CH1R0 0x180
#define I82975X_DRB_CH1R1 0x181
#define I82975X_DRB_CH1R2 0x182
#define I82975X_DRB_CH1R3 0x183
#define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8)
* defines the PAGE SIZE to be used
* for the rank
* 7 reserved
* 6:4 row attr of odd rank, i.e. 1
* 3 reserved
* 2:0 row attr of even rank, i.e. 0
*
* 000 = unpopulated
* 001 = reserved
* 010 = 4KiB
* 011 = 8KiB
* 100 = 16KiB
* others = reserved
*/
#define I82975X_DRA_CH0R01 0x108
#define I82975X_DRA_CH0R23 0x109
#define I82975X_DRA_CH1R01 0x188
#define I82975X_DRA_CH1R23 0x189
#define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b)
*
* 15:8 reserved
* 7:6 Rank 3 architecture
* 5:4 Rank 2 architecture
* 3:2 Rank 1 architecture
* 1:0 Rank 0 architecture
*
* 00 => 4 banks
* 01 => 8 banks
*/
#define I82975X_C0BNKARC 0x10e
#define I82975X_C1BNKARC 0x18e
#define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b)
*
* 31:30 reserved
* 29 init complete
* 28:11 reserved, according to Intel
* 22:21 number of channels
* 00=1 01=2 in 82875
* seems to be ECC mode
* bits in 82975 in Asus
* P5W
* 19:18 Data Integ Mode
* 00=none 01=ECC in 82875
* 10:8 refresh mode
* 7 reserved
* 6:4 mode select
* 3:2 reserved
* 1:0 DRAM type 10=Second Revision
* DDR2 SDRAM
* 00, 01, 11 reserved
*/
#define I82975X_DRC_CH0M0 0x120
#define I82975X_DRC_CH1M0 0x1A0
#define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b)
* 31 0=Standard Address Map
* 1=Enhanced Address Map
* 30:0 reserved
*/
#define I82975X_DRC_CH0M1 0x124
#define I82975X_DRC_CH1M1 0x1A4
enum i82975x_chips {
I82975X = 0,
};
struct i82975x_pvt {
void __iomem *mch_window;
};
struct i82975x_dev_info {
const char *ctl_name;
};
struct i82975x_error_info {
u16 errsts;
u32 eap;
u8 des;
u8 derrsyn;
u16 errsts2;
u8 chan; /* the channel is bit 0 of EAP */
u8 xeap; /* extended eap bit */
};
static const struct i82975x_dev_info i82975x_devs[] = {
[I82975X] = {
.ctl_name = "i82975x"
},
};
static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
* already registered driver
*/
static int i82975x_registered = 1;
static void i82975x_get_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
pci_read_config_byte(pdev, I82975X_DES, &info->des);
pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
/*
* If the error is the same then we can for both reads then
* the first set of reads is valid. If there is a change then
* there is a CE no info and the second set of reads is valid
* and should be UE info.
*/
if (!(info->errsts2 & 0x0003))
return;
if ((info->errsts ^ info->errsts2) & 0x0003) {
pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
pci_read_config_byte(pdev, I82975X_DES, &info->des);
pci_read_config_byte(pdev, I82975X_DERRSYN,
&info->derrsyn);
}
}
static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
int row, chan;
unsigned long offst, page;
if (!(info->errsts2 & 0x0003))
return 0;
if (!handle_errors)
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
page = (unsigned long) info->eap;
page >>= 1;
if (info->xeap & 1)
page |= 0x80000000;
page >>= (PAGE_SHIFT - 1);
row = edac_mc_find_csrow_by_page(mci, page);
if (row == -1) {
i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
"\tXEAP=%u\n"
"\t EAP=0x%08x\n"
"\tPAGE=0x%08x\n",
(info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
return 0;
}
chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
(1 << mci->csrows[row]->channels[chan]->dimm->grain));
if (info->errsts & 0x0002)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offst, 0,
row, -1, -1,
"i82975x UE", "");
else
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offst, info->derrsyn,
row, chan ? chan : 0, -1,
"i82975x CE", "");
return 1;
}
static void i82975x_check(struct mem_ctl_info *mci)
{
struct i82975x_error_info info;
i82975x_get_error_info(mci, &info);
i82975x_process_error_info(mci, &info, 1);
}
/* Return 1 if dual channel mode is active. Else return 0. */
static int dual_channel_active(void __iomem *mch_window)
{
/*
* We treat interleaved-symmetric configuration as dual-channel - EAP's
* bit-0 giving the channel of the error location.
*
* All other configurations are treated as single channel - the EAP's
* bit-0 will resolve ok in symmetric area of mixed
* (symmetric/asymmetric) configurations
*/
u8 drb[4][2];
int row;
int dualch;
for (dualch = 1, row = 0; dualch && (row < 4); row++) {
drb[row][0] = readb(mch_window + I82975X_DRB + row);
drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
dualch = dualch && (drb[row][0] == drb[row][1]);
}
return dualch;
}
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 cumul_size, nr_pages;
int index, chan;
struct dimm_info *dimm;
last_cumul_size = 0;
/*
* 82875 comment:
* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all rows.
*
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
value = readb(mch_window + I82975X_DRB + index +
((index >= 4) ? 0x80 : 0));
cumul_size = value;
cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
/*
* Adjust cumul_size w.r.t number of channels
*
*/
if (csrow->nr_channels > 1)
cumul_size <<= 1;
edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
nr_pages = cumul_size - last_cumul_size;
if (!nr_pages)
continue;
/*
* Initialise dram labels
* index values:
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
for (chan = 0; chan < csrow->nr_channels; chan++) {
dimm = mci->csrows[index]->channels[chan]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels;
snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
(chan == 0) ? 'A' : 'B',
index);
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
/* ECC is possible on i92975x ONLY with DEV_X8. */
dimm->dtype = DEV_X8;
dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
dimm->edac_mode = EDAC_SECDED; /* only supported */
}
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
last_cumul_size = cumul_size;
}
}
/* #define i82975x_DEBUG_IOMEM */
#ifdef i82975x_DEBUG_IOMEM
static void i82975x_print_dram_timings(void __iomem *mch_window)
{
/*
* The register meanings are from Intel specs;
* (shows 13-5-5-5 for 800-DDR2)
* Asus P5W Bios reports 15-5-4-4
* What's your religion?
*/
static const int caslats[4] = { 5, 4, 3, 6 };
u32 dtreg[2];
dtreg[0] = readl(mch_window + 0x114);
dtreg[1] = readl(mch_window + 0x194);
i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n"
" RAS Active Min = %d %d\n"
" CAS latency = %d %d\n"
" RAS to CAS = %d %d\n"
" RAS precharge = %d %d\n",
(dtreg[0] >> 19 ) & 0x0f,
(dtreg[1] >> 19) & 0x0f,
caslats[(dtreg[0] >> 8) & 0x03],
caslats[(dtreg[1] >> 8) & 0x03],
((dtreg[0] >> 4) & 0x07) + 2,
((dtreg[1] >> 4) & 0x07) + 2,
(dtreg[0] & 0x07) + 2,
(dtreg[1] & 0x07) + 2
);
}
#endif
static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i82975x_pvt *pvt;
void __iomem *mch_window;
u32 mchbar;
u32 drc[2];
struct i82975x_error_info discard;
int chans;
#ifdef i82975x_DEBUG_IOMEM
u8 c0drb[4];
u8 c1drb[4];
#endif
edac_dbg(0, "\n");
pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
if (!(mchbar & 1)) {
edac_dbg(3, "failed, MCHBAR disabled!\n");
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
mch_window = ioremap(mchbar, 0x1000);
if (!mch_window) {
edac_dbg(3, "error ioremapping MCHBAR!\n");
goto fail0;
}
#ifdef i82975x_DEBUG_IOMEM
i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
mchbar, mch_window);
c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
#endif
drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
#ifdef i82975x_DEBUG_IOMEM
i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
((drc[0] >> 21) & 3) == 1 ?
"ECC enabled" : "ECC disabled");
i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
((drc[1] >> 21) & 3) == 1 ?
"ECC enabled" : "ECC disabled");
i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
readw(mch_window + I82975X_C0BNKARC));
i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
readw(mch_window + I82975X_C1BNKARC));
i82975x_print_dram_timings(mch_window);
goto fail1;
#endif
if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
goto fail1;
}
chans = dual_channel_active(mch_window) + 1;
/* assuming only one controller, index thus is 0 */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I82975X_NR_DIMMS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = I82975X_NR_CSROWS(chans);
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail1;
}
edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
edac_dbg(3, "init pvt\n");
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
mci->scrub_mode = SCRUB_HW_SRC;
i82975x_get_error_info(mci, &discard); /* clear counters */
/* finalize this instance of memory controller with edac core */
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail2;
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail2:
edac_mc_free(mci);
fail1:
iounmap(mch_window);
fail0:
return rc;
}
/* returns count (>= 0), or negative on error */
static int i82975x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i82975x_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i82975x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82975x_pvt *pvt;
edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (mci == NULL)
return;
pvt = mci->pvt_info;
if (pvt->mch_window)
iounmap( pvt->mch_window );
edac_mc_free(mci);
}
static const struct pci_device_id i82975x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82975X
},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
static struct pci_driver i82975x_driver = {
.name = EDAC_MOD_STR,
.probe = i82975x_init_one,
.remove = i82975x_remove_one,
.id_table = i82975x_pci_tbl,
};
static int __init i82975x_init(void)
{
int pci_rc;
edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i82975x_driver);
if (pci_rc < 0)
goto fail0;
if (mci_pdev == NULL) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82975_0, NULL);
if (!mci_pdev) {
edac_dbg(0, "i82975x pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "i82975x init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i82975x_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i82975x_exit(void)
{
edac_dbg(3, "\n");
pci_unregister_driver(&i82975x_driver);
if (!i82975x_registered) {
i82975x_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(i82975x_init);
module_exit(i82975x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arvind R. <[email protected]>");
MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i82975x_edac.c |
/*
* Intel e7xxx Memory Controller kernel module
* (C) 2003 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* See "enum e7xxx_chips" below for supported chipsets
*
* Written by Thayne Harbaugh
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
* Datasheet:
* http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
*
* Contributors:
* Eric Biederman (Linux Networx)
* Tom Zimmerman (Linux Networx)
* Jim Garlick (Lawrence Livermore National Labs)
* Dave Peterson (Lawrence Livermore National Labs)
* That One Guy (Some other place)
* Wang Zhenyu (intel.com)
*
* $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "e7xxx_edac"
#define e7xxx_printk(level, fmt, arg...) \
edac_printk(level, "e7xxx", fmt, ##arg)
#define e7xxx_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_7205_0
#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7500_0
#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7501_0
#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7505_0
#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
#define E7XXX_NR_CSROWS 8 /* number of csrows */
#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
/*
* 31 Device width row 7 0=x8 1=x4
* 27 Device width row 6
* 23 Device width row 5
* 19 Device width row 4
* 15 Device width row 3
* 11 Device width row 2
* 7 Device width row 1
* 3 Device width row 0
*/
#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
/*
* 22 Number channels 0=1,1=2
* 19:18 DRB Granularity 32/64MB
*/
#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
/* E7XXX register addresses - device 0 function 1 */
#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
/* error address register (32b) */
/*
* 31:28 Reserved
* 27:6 CE address (4k block 33:12)
* 5:0 Reserved
*/
#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
/* error address register (32b) */
/*
* 31:28 Reserved
* 27:6 CE address (4k block 33:12)
* 5:0 Reserved
*/
#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
/* error syndrome register (16b) */
enum e7xxx_chips {
E7500 = 0,
E7501,
E7505,
E7205,
};
struct e7xxx_pvt {
struct pci_dev *bridge_ck;
u32 tolm;
u32 remapbase;
u32 remaplimit;
const struct e7xxx_dev_info *dev_info;
};
struct e7xxx_dev_info {
u16 err_dev;
const char *ctl_name;
};
struct e7xxx_error_info {
u8 dram_ferr;
u8 dram_nerr;
u32 dram_celog_add;
u16 dram_celog_syndrome;
u32 dram_uelog_add;
};
static struct edac_pci_ctl_info *e7xxx_pci;
static const struct e7xxx_dev_info e7xxx_devs[] = {
[E7500] = {
.err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
.ctl_name = "E7500"},
[E7501] = {
.err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
.ctl_name = "E7501"},
[E7505] = {
.err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
.ctl_name = "E7505"},
[E7205] = {
.err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
.ctl_name = "E7205"},
};
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
static inline int e7xxx_find_channel(u16 syndrome)
{
edac_dbg(3, "\n");
if ((syndrome & 0xff00) == 0)
return 0;
if ((syndrome & 0x00ff) == 0)
return 1;
if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
return 0;
return 1;
}
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
unsigned long page)
{
u32 remap;
struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
edac_dbg(3, "\n");
if ((page < pvt->tolm) ||
((page >= 0x100000) && (page < pvt->remapbase)))
return page;
remap = (page - pvt->tolm) + pvt->remapbase;
if (remap < pvt->remaplimit)
return remap;
e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
return pvt->tolm - 1;
}
static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
{
u32 error_1b, page;
u16 syndrome;
int row;
int channel;
edac_dbg(3, "\n");
/* read the error address */
error_1b = info->dram_celog_add;
/* FIXME - should use PAGE_SHIFT */
page = error_1b >> 6; /* convert the address to 4k page */
/* read the syndrome */
syndrome = info->dram_celog_syndrome;
/* FIXME - check for -1 */
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
row, channel, -1, "e7xxx CE", "");
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
"e7xxx CE log register overflow", "");
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
{
u32 error_2b, block_page;
int row;
edac_dbg(3, "\n");
/* read the error address */
error_2b = info->dram_uelog_add;
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0,
row, -1, -1, "e7xxx UE", "");
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
"e7xxx UE log register overflow", "");
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
struct e7xxx_error_info *info)
{
struct e7xxx_pvt *pvt;
pvt = (struct e7xxx_pvt *)mci->pvt_info;
pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
&info->dram_celog_add);
pci_read_config_word(pvt->bridge_ck,
E7XXX_DRAM_CELOG_SYNDROME,
&info->dram_celog_syndrome);
}
if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
&info->dram_uelog_add);
if (info->dram_ferr & 3)
pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
if (info->dram_nerr & 3)
pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
}
static int e7xxx_process_error_info(struct mem_ctl_info *mci,
struct e7xxx_error_info *info,
int handle_errors)
{
int error_found;
error_found = 0;
/* decode and report errors */
if (info->dram_ferr & 1) { /* check first error correctable */
error_found = 1;
if (handle_errors)
process_ce(mci, info);
}
if (info->dram_ferr & 2) { /* check first error uncorrectable */
error_found = 1;
if (handle_errors)
process_ue(mci, info);
}
if (info->dram_nerr & 1) { /* check next error correctable */
error_found = 1;
if (handle_errors) {
if (info->dram_ferr & 1)
process_ce_no_info(mci);
else
process_ce(mci, info);
}
}
if (info->dram_nerr & 2) { /* check next error uncorrectable */
error_found = 1;
if (handle_errors) {
if (info->dram_ferr & 2)
process_ue_no_info(mci);
else
process_ue(mci, info);
}
}
return error_found;
}
static void e7xxx_check(struct mem_ctl_info *mci)
{
struct e7xxx_error_info info;
e7xxx_get_error_info(mci, &info);
e7xxx_process_error_info(mci, &info, 1);
}
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc, int dev_idx)
{
return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
}
/* Return DRB granularity (0=32mb, 1=64mb). */
static inline int drb_granularity(u32 drc, int dev_idx)
{
/* only e7501 can be single channel */
return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
}
static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
int index, j;
u8 value;
u32 dra, cumul_size, nr_pages;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
struct dimm_info *dimm;
enum edac_type edac_mode;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
drc_drbg = drb_granularity(drc, dev_idx);
drc_ddim = (drc >> 20) & 0x3;
last_cumul_size = 0;
/* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 3)) & 0x1;
csrow = mci->csrows[index];
pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
/*
* if single channel or x8 devices then SECDED
* if dual channel and x4 then S4ECD4ED
*/
if (drc_ddim) {
if (drc_chan && mem_dev) {
edac_mode = EDAC_S4ECD4ED;
mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
} else {
edac_mode = EDAC_SECDED;
mci->edac_cap |= EDAC_FLAG_SECDED;
}
} else
edac_mode = EDAC_NONE;
for (j = 0; j < drc_chan + 1; j++) {
dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / (drc_chan + 1);
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
dimm->mtype = MEM_RDDR; /* only one type supported */
dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
dimm->edac_mode = edac_mode;
}
}
}
static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
struct e7xxx_error_info discard;
edac_dbg(0, "mci\n");
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
/*
* According with the datasheet, this device has a maximum of
* 4 DIMMS per channel, either single-rank or dual-rank. So, the
* total amount of dimms is 8 (E7XXX_NR_DIMMS).
* That means that the DIMM is mapped as CSROWs, and the channel
* will map the rank. So, an error to either channel should be
* attributed to the same dimm.
*/
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = E7XXX_NR_CSROWS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = drc_chan + 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
pvt = (struct e7xxx_pvt *)mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail0;
}
edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
mci->edac_cap |= EDAC_FLAG_NONE;
edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
pvt->remapbase = ((u32) pci_data) << 14;
pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e7xxx_printk(KERN_INFO,
"tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
pvt->remapbase, pvt->remaplimit);
/* clear any pending errors, or initial state bits */
e7xxx_get_error_info(mci, &discard);
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
/* allocating generic PCI control info */
e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!e7xxx_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail1:
pci_dev_put(pvt->bridge_ck);
fail0:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
/* wake up and enable device */
return pci_enable_device(pdev) ?
-EIO : e7xxx_probe1(pdev, ent->driver_data);
}
static void e7xxx_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
edac_dbg(0, "\n");
if (e7xxx_pci)
edac_pci_release_generic_ctl(e7xxx_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
pvt = (struct e7xxx_pvt *)mci->pvt_info;
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
static const struct pci_device_id e7xxx_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7205},
{
PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7500},
{
PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7501},
{
PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7505},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
static struct pci_driver e7xxx_driver = {
.name = EDAC_MOD_STR,
.probe = e7xxx_init_one,
.remove = e7xxx_remove_one,
.id_table = e7xxx_pci_tbl,
};
static int __init e7xxx_init(void)
{
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
return pci_register_driver(&e7xxx_driver);
}
static void __exit e7xxx_exit(void)
{
pci_unregister_driver(&e7xxx_driver);
}
module_init(e7xxx_init);
module_exit(e7xxx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/e7xxx_edac.c |
// SPDX-License-Identifier: GPL-2.0
/*
*
* Shared code by both skx_edac and i10nm_edac. Originally split out
* from the skx_edac driver.
*
* This file is linked into both skx_edac and i10nm_edac drivers. In
* order to avoid link errors, this file must be like a pure library
* without including symbols and defines which would otherwise conflict,
* when linked once into a module and into a built-in object, at the
* same time. For example, __this_module symbol references when that
* file is being linked into a built-in object.
*
* Copyright (c) 2018, Intel Corporation.
*/
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
#include "edac_module.h"
#include "skx_common.h"
static const char * const component_names[] = {
[INDEX_SOCKET] = "ProcessorSocketId",
[INDEX_MEMCTRL] = "MemoryControllerId",
[INDEX_CHANNEL] = "ChannelId",
[INDEX_DIMM] = "DimmSlotId",
[INDEX_CS] = "ChipSelect",
[INDEX_NM_MEMCTRL] = "NmMemoryControllerId",
[INDEX_NM_CHANNEL] = "NmChannelId",
[INDEX_NM_DIMM] = "NmDimmSlotId",
[INDEX_NM_CS] = "NmChipSelect",
};
static int component_indices[ARRAY_SIZE(component_names)];
static int adxl_component_count;
static const char * const *adxl_component_names;
static u64 *adxl_values;
static char *adxl_msg;
static unsigned long adxl_nm_bitmap;
static char skx_msg[MSG_SIZE];
static skx_decode_f driver_decode;
static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
static bool skx_mem_cfg_2lm;
int __init skx_adxl_get(void)
{
const char * const *names;
int i, j;
names = adxl_get_component_names();
if (!names) {
skx_printk(KERN_NOTICE, "No firmware support for address translation.\n");
return -ENODEV;
}
for (i = 0; i < INDEX_MAX; i++) {
for (j = 0; names[j]; j++) {
if (!strcmp(component_names[i], names[j])) {
component_indices[i] = j;
if (i >= INDEX_NM_FIRST)
adxl_nm_bitmap |= 1 << i;
break;
}
}
if (!names[j] && i < INDEX_NM_FIRST)
goto err;
}
if (skx_mem_cfg_2lm) {
if (!adxl_nm_bitmap)
skx_printk(KERN_NOTICE, "Not enough ADXL components for 2-level memory.\n");
else
edac_dbg(2, "adxl_nm_bitmap: 0x%lx\n", adxl_nm_bitmap);
}
adxl_component_names = names;
while (*names++)
adxl_component_count++;
adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values),
GFP_KERNEL);
if (!adxl_values) {
adxl_component_count = 0;
return -ENOMEM;
}
adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
if (!adxl_msg) {
adxl_component_count = 0;
kfree(adxl_values);
return -ENOMEM;
}
return 0;
err:
skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ",
component_names[i]);
for (j = 0; names[j]; j++)
skx_printk(KERN_CONT, "%s ", names[j]);
skx_printk(KERN_CONT, "\n");
return -ENODEV;
}
void __exit skx_adxl_put(void)
{
kfree(adxl_values);
kfree(adxl_msg);
}
static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
{
struct skx_dev *d;
int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
edac_dbg(0, "Address 0x%llx out of range\n", res->addr);
return false;
}
if (adxl_decode(res->addr, adxl_values)) {
edac_dbg(0, "Failed to decode 0x%llx\n", res->addr);
return false;
}
res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
if (error_in_1st_level_mem) {
res->imc = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
(int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
(int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1;
res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ?
(int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1;
res->cs = (adxl_nm_bitmap & BIT_NM_CS) ?
(int)adxl_values[component_indices[INDEX_NM_CS]] : -1;
} else {
res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
if (res->imc > NUM_IMC - 1 || res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
list_for_each_entry(d, &dev_edac_list, list) {
if (d->imc[0].src_id == res->socket) {
res->dev = d;
break;
}
}
if (!res->dev) {
skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
res->socket, res->imc);
return false;
}
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx",
adxl_component_names[i], adxl_values[i]);
if (MSG_SIZE - len <= 0)
break;
}
res->decoded_by_adxl = true;
return true;
}
void skx_set_mem_cfg(bool mem_cfg_2lm)
{
skx_mem_cfg_2lm = mem_cfg_2lm;
}
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
driver_decode = decode;
skx_show_retry_rd_err_log = show_retry_log;
}
int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
if (pci_read_config_dword(d->util_all, off, ®)) {
skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
*id = GET_BITFIELD(reg, 12, 14);
return 0;
}
int skx_get_node_id(struct skx_dev *d, u8 *id)
{
u32 reg;
if (pci_read_config_dword(d->util_all, 0xf4, ®)) {
skx_printk(KERN_ERR, "Failed to read node id\n");
return -ENODEV;
}
*id = GET_BITFIELD(reg, 0, 2);
return 0;
}
static int get_width(u32 mtr)
{
switch (GET_BITFIELD(mtr, 8, 9)) {
case 0:
return DEV_X4;
case 1:
return DEV_X8;
case 2:
return DEV_X16;
}
return DEV_UNKNOWN;
}
/*
* We use the per-socket device @cfg->did to count how many sockets are present,
* and to detemine which PCI buses are associated with each socket. Allocate
* and build the full list of all the skx_dev structures that we need here.
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
int ndev = 0;
prev = NULL;
for (;;) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev);
if (!pdev)
break;
ndev++;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
}
if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, ®)) {
kfree(d);
pci_dev_put(pdev);
skx_printk(KERN_ERR, "Failed to read bus idx\n");
return -ENODEV;
}
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
if (cfg->type == SKX) {
d->seg = pci_domain_nr(pdev->bus);
d->bus[2] = GET_BITFIELD(reg, 16, 23);
d->bus[3] = GET_BITFIELD(reg, 24, 31);
} else {
d->seg = GET_BITFIELD(reg, 16, 23);
}
edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
}
if (list)
*list = &dev_edac_list;
return ndev;
}
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
{
struct pci_dev *pdev;
u32 reg;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
if (pci_read_config_dword(pdev, off[0], ®)) {
skx_printk(KERN_ERR, "Failed to read tolm\n");
goto fail;
}
skx_tolm = reg;
if (pci_read_config_dword(pdev, off[1], ®)) {
skx_printk(KERN_ERR, "Failed to read lower tohm\n");
goto fail;
}
skx_tohm = reg;
if (pci_read_config_dword(pdev, off[2], ®)) {
skx_printk(KERN_ERR, "Failed to read upper tohm\n");
goto fail;
}
skx_tohm |= (u64)reg << 32;
pci_dev_put(pdev);
*tolm = skx_tolm;
*tohm = skx_tohm;
edac_dbg(2, "tolm = 0x%llx tohm = 0x%llx\n", skx_tolm, skx_tohm);
return 0;
fail:
pci_dev_put(pdev);
return -ENODEV;
}
static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
int minval, int maxval, const char *name)
{
u32 val = GET_BITFIELD(reg, lobit, hibit);
if (val < minval || val > maxval) {
edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg);
return -EINVAL;
}
return val + add;
}
#define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks")
#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno,
struct res_config *cfg)
{
int banks, ranks, rows, cols, npages;
enum mem_type mtype;
u64 size;
ranks = numrank(mtr);
rows = numrow(mtr);
cols = imc->hbm_mc ? 6 : numcol(mtr);
if (imc->hbm_mc) {
banks = 32;
mtype = MEM_HBM2;
} else if (cfg->support_ddr5 && (amap & 0x8)) {
banks = 32;
mtype = MEM_DDR5;
} else {
banks = 16;
mtype = MEM_DDR4;
}
/*
* Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
*/
size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%x, col: 0x%x\n",
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
imc->chan[chan].dimms[dimmno].rowbits = rows;
imc->chan[chan].dimms[dimmno].colbits = cols;
dimm->nr_pages = npages;
dimm->grain = 32;
dimm->dtype = get_width(mtr);
dimm->mtype = mtype;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
if (imc->hbm_mc)
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_HBMC#%u_Chan#%u",
imc->src_id, imc->lmc, chan);
else
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
imc->src_id, imc->lmc, chan, dimmno);
return 1;
}
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
int chan, int dimmno, const char *mod_str)
{
int smbios_handle;
u32 dev_handle;
u16 flags;
u64 size = 0;
dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc,
imc->src_id, 0);
smbios_handle = nfit_get_smbios_id(dev_handle, &flags);
if (smbios_handle == -EOPNOTSUPP) {
pr_warn_once("%s: Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n", mod_str);
goto unknown_size;
}
if (smbios_handle < 0) {
skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle);
goto unknown_size;
}
if (flags & ACPI_NFIT_MEM_MAP_FAILED) {
skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle);
goto unknown_size;
}
size = dmi_memdev_size(smbios_handle);
if (size == ~0ull)
skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n",
dev_handle, smbios_handle);
unknown_size:
dimm->nr_pages = size >> PAGE_SHIFT;
dimm->grain = 32;
dimm->dtype = DEV_UNKNOWN;
dimm->mtype = MEM_NVDIMM;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
imc->src_id, imc->lmc, chan, dimmno);
return (size == 0 || size == ~0ull) ? 0 : 1;
}
int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
const char *ctl_name, const char *mod_str,
get_dimm_config_f get_dimm_config,
struct res_config *cfg)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct skx_pvt *pvt;
int rc;
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
layers[1].size = NUM_DIMMS;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
if (unlikely(!mci))
return -ENOMEM;
edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
/* Associate skx_dev and mci for future usage */
imc->mci = mci;
pvt = mci->pvt_info;
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
imc->node_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
}
mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM;
if (cfg->support_ddr5)
mci->mtype_cap |= MEM_FLAG_DDR5;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = mod_str;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
rc = get_dimm_config(mci, cfg);
if (rc < 0)
goto fail;
/* Record ptr to the generic device */
mci->pdev = &pdev->dev;
/* Add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
rc = -EINVAL;
goto fail;
}
return 0;
fail:
kfree(mci->ctl_name);
fail0:
edac_mc_free(mci);
imc->mci = NULL;
return rc;
}
static void skx_unregister_mci(struct skx_imc *imc)
{
struct mem_ctl_info *mci = imc->mci;
if (!mci)
return;
edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->pdev);
edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
}
static void skx_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m,
struct decoded_addr *res)
{
enum hw_event_mc_err_type tp_event;
char *optype;
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
bool scrub_err = false;
bool recoverable;
int len;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
tp_event = HW_EVENT_ERR_UNCORRECTED;
} else {
tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}
switch (optypenum) {
case 0:
optype = "generic undef request error";
break;
case 1:
optype = "memory read error";
break;
case 2:
optype = "memory write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "memory scrubbing error";
scrub_err = true;
break;
default:
optype = "reserved";
break;
}
if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
len = snprintf(skx_msg, MSG_SIZE,
"%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode,
res->socket, res->imc, res->rank,
res->row, res->column, res->bank_address, res->bank_group);
}
if (skx_show_retry_rd_err_log)
skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len, scrub_err);
edac_dbg(0, "%s\n", skx_msg);
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
res->channel, res->dimm, -1,
optype, skx_msg);
}
static bool skx_error_in_1st_level_mem(const struct mce *m)
{
u32 errcode;
if (!skx_mem_cfg_2lm)
return false;
errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
return errcode == MCACOD_EXT_MEM_ERR;
}
static bool skx_error_in_mem(const struct mce *m)
{
u32 errcode;
errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
struct decoded_addr res;
struct mem_ctl_info *mci;
char *type;
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
/* Ignore unless this is memory related with an address */
if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
/* Then try firmware decoder (ACPI DSM methods) */
if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
return NOTIFY_DONE;
}
mci = res.dev->imc[res.imc].mci;
if (!mci)
return NOTIFY_DONE;
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
type = "Event";
skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx "
"Bank %d: 0x%llx\n", mce->extcpu, type,
mce->mcgstatus, mce->bank, mce->status);
skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc);
skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr);
skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc);
skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET "
"%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
skx_mce_output_error(mci, mce, &res);
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_DONE;
}
void skx_remove(void)
{
int i, j;
struct skx_dev *d, *tmp;
edac_dbg(0, "\n");
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
for (i = 0; i < NUM_IMC; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
if (d->imc[i].mdev)
pci_dev_put(d->imc[i].mdev);
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
for (j = 0; j < NUM_CHANNELS; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
}
if (d->util_all)
pci_dev_put(d->util_all);
if (d->pcu_cr3)
pci_dev_put(d->pcu_cr3);
if (d->sad_all)
pci_dev_put(d->sad_all);
if (d->uracu)
pci_dev_put(d->uracu);
kfree(d);
}
}
| linux-master | drivers/edac/skx_common.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Cavium, Inc.
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/edac.h>
#include <asm/octeon/cvmx.h>
#include "edac_module.h"
#define EDAC_MOD_STR "octeon-l2c"
static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c)
{
union cvmx_l2t_err l2t_err, l2t_err_reset;
union cvmx_l2d_err l2d_err, l2d_err_reset;
l2t_err_reset.u64 = 0;
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
if (l2t_err.s.sec_err) {
edac_device_handle_ce(l2c, 0, 0,
"Tag Single bit error (corrected)");
l2t_err_reset.s.sec_err = 1;
}
if (l2t_err.s.ded_err) {
edac_device_handle_ue(l2c, 0, 0,
"Tag Double bit error (detected)");
l2t_err_reset.s.ded_err = 1;
}
if (l2t_err_reset.u64)
cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64);
l2d_err_reset.u64 = 0;
l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
if (l2d_err.s.sec_err) {
edac_device_handle_ce(l2c, 0, 1,
"Data Single bit error (corrected)");
l2d_err_reset.s.sec_err = 1;
}
if (l2d_err.s.ded_err) {
edac_device_handle_ue(l2c, 0, 1,
"Data Double bit error (detected)");
l2d_err_reset.s.ded_err = 1;
}
if (l2d_err_reset.u64)
cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64);
}
static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad)
{
union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset;
union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset;
char buf1[64];
char buf2[80];
err_tdtx_reset.u64 = 0;
err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad));
if (err_tdtx.s.dbe || err_tdtx.s.sbe ||
err_tdtx.s.vdbe || err_tdtx.s.vsbe)
snprintf(buf1, sizeof(buf1),
"type:%d, syn:0x%x, way:%d",
err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx);
if (err_tdtx.s.dbe) {
snprintf(buf2, sizeof(buf2),
"L2D Double bit error (detected):%s", buf1);
err_tdtx_reset.s.dbe = 1;
edac_device_handle_ue(l2c, tad, 1, buf2);
}
if (err_tdtx.s.sbe) {
snprintf(buf2, sizeof(buf2),
"L2D Single bit error (corrected):%s", buf1);
err_tdtx_reset.s.sbe = 1;
edac_device_handle_ce(l2c, tad, 1, buf2);
}
if (err_tdtx.s.vdbe) {
snprintf(buf2, sizeof(buf2),
"VBF Double bit error (detected):%s", buf1);
err_tdtx_reset.s.vdbe = 1;
edac_device_handle_ue(l2c, tad, 1, buf2);
}
if (err_tdtx.s.vsbe) {
snprintf(buf2, sizeof(buf2),
"VBF Single bit error (corrected):%s", buf1);
err_tdtx_reset.s.vsbe = 1;
edac_device_handle_ce(l2c, tad, 1, buf2);
}
if (err_tdtx_reset.u64)
cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64);
err_ttgx_reset.u64 = 0;
err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad));
if (err_ttgx.s.dbe || err_ttgx.s.sbe)
snprintf(buf1, sizeof(buf1),
"type:%d, syn:0x%x, way:%d",
err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx);
if (err_ttgx.s.dbe) {
snprintf(buf2, sizeof(buf2),
"Tag Double bit error (detected):%s", buf1);
err_ttgx_reset.s.dbe = 1;
edac_device_handle_ue(l2c, tad, 0, buf2);
}
if (err_ttgx.s.sbe) {
snprintf(buf2, sizeof(buf2),
"Tag Single bit error (corrected):%s", buf1);
err_ttgx_reset.s.sbe = 1;
edac_device_handle_ce(l2c, tad, 0, buf2);
}
if (err_ttgx_reset.u64)
cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64);
}
static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c)
{
int i;
for (i = 0; i < l2c->nr_instances; i++)
_octeon_l2c_poll_oct2(l2c, i);
}
static int octeon_l2c_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *l2c;
int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1;
/* 'Tags' are block 0, 'Data' is block 1*/
l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
NULL, 0, edac_device_alloc_index());
if (!l2c)
return -ENOMEM;
l2c->dev = &pdev->dev;
platform_set_drvdata(pdev, l2c);
l2c->dev_name = dev_name(&pdev->dev);
l2c->mod_name = "octeon-l2c";
l2c->ctl_name = "octeon_l2c_err";
if (OCTEON_IS_OCTEON1PLUS()) {
union cvmx_l2t_err l2t_err;
union cvmx_l2d_err l2d_err;
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
l2t_err.s.sec_intena = 0; /* We poll */
l2t_err.s.ded_intena = 0;
cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
l2d_err.s.sec_intena = 0; /* We poll */
l2d_err.s.ded_intena = 0;
cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64);
l2c->edac_check = octeon_l2c_poll_oct1;
} else {
/* OCTEON II */
l2c->edac_check = octeon_l2c_poll_oct2;
}
if (edac_device_add_device(l2c) > 0) {
pr_err("%s: edac_device_add_device() failed\n", __func__);
goto err;
}
return 0;
err:
edac_device_free_ctl_info(l2c);
return -ENXIO;
}
static int octeon_l2c_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(l2c);
return 0;
}
static struct platform_driver octeon_l2c_driver = {
.probe = octeon_l2c_probe,
.remove = octeon_l2c_remove,
.driver = {
.name = "octeon_l2c_edac",
}
};
module_platform_driver(octeon_l2c_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
| linux-master | drivers/edac/octeon_edac-l2c.c |
/*
* file for managing the edac_device subsystem of devices for EDAC
*
* (C) 2007 SoftwareBitMaker
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written Doug Thompson <[email protected]>
*
*/
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include "edac_device.h"
#include "edac_module.h"
#define EDAC_DEVICE_SYMLINK "device"
#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
/*
* Set of edac_device_ctl_info attribute store/show functions
*/
/* 'log_ue' */
static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ue);
}
static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'log_ce' */
static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ce);
}
static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'panic_on_ue' */
static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->panic_on_ue);
}
static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'poll_msec' show and store functions*/
static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->poll_msec);
}
static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
unsigned long value;
/* get the value and enforce that it is non-zero, must be at least
* one millisecond for the delay period, between scans
* Then cancel last outstanding delay for the work request
* and set a new one.
*/
value = simple_strtoul(data, NULL, 0);
edac_device_reset_delay_period(ctl_info, value);
return count;
}
/* edac_device_ctl_info specific attribute structure */
struct ctl_info_attribute {
struct attribute attr;
ssize_t(*show) (struct edac_device_ctl_info *, char *);
ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
};
#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
if (ctl_info_attr->show)
return ctl_info_attr->show(edac_dev, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
if (ctl_info_attr->store)
return ctl_info_attr->store(edac_dev, buffer, count);
return -EIO;
}
/* edac_dev file operations for an 'ctl_info' */
static const struct sysfs_ops device_ctl_info_ops = {
.show = edac_dev_ctl_info_show,
.store = edac_dev_ctl_info_store
};
#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
static struct ctl_info_attribute attr_ctl_info_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
/* Declare the various ctl_info attributes here and their respective ops */
CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
edac_device_ctl_panic_on_ue_show,
edac_device_ctl_panic_on_ue_store);
CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
/* Base Attributes of the EDAC_DEVICE ECC object */
static struct attribute *device_ctrl_attrs[] = {
&attr_ctl_info_panic_on_ue.attr,
&attr_ctl_info_log_ue.attr,
&attr_ctl_info_log_ce.attr,
&attr_ctl_info_poll_msec.attr,
NULL,
};
ATTRIBUTE_GROUPS(device_ctrl);
/*
* edac_device_ctrl_master_release
*
* called when the reference count for the 'main' kobj
* for a edac_device control struct reaches zero
*
* Reference count model:
* One 'main' kobject for each control structure allocated.
* That main kobj is initially set to one AND
* the reference count for the EDAC 'core' module is
* bumped by one, thus added 'keep in memory' dependency.
*
* Each new internal kobj (in instances and blocks) then
* bumps the 'main' kobject.
*
* When they are released their release functions decrement
* the 'main' kobj.
*
* When the main kobj reaches zero (0) then THIS function
* is called which then decrements the EDAC 'core' module.
* When the module reference count reaches zero then the
* module no longer has dependency on keeping the release
* function code in memory and module can be unloaded.
*
* This will support several control objects as well, each
* with its own 'main' kobj.
*/
static void edac_device_ctrl_master_release(struct kobject *kobj)
{
struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
edac_dbg(4, "control index=%d\n", edac_dev->dev_idx);
/* decrement the EDAC CORE module ref count */
module_put(edac_dev->owner);
__edac_device_free_ctl_info(edac_dev);
}
/* ktype for the main (master) kobject */
static struct kobj_type ktype_device_ctrl = {
.release = edac_device_ctrl_master_release,
.sysfs_ops = &device_ctl_info_ops,
.default_groups = device_ctrl_groups,
};
/*
* edac_device_register_sysfs_main_kobj
*
* perform the high level setup for the new edac_device instance
*
* Return: 0 SUCCESS
* !0 FAILURE
*/
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
struct device *dev_root;
struct bus_type *edac_subsys;
int err = -ENODEV;
edac_dbg(1, "\n");
/* get the /sys/devices/system/edac reference */
edac_subsys = edac_get_sysfs_subsys();
/* Point to the 'edac_subsys' this instance 'reports' to */
edac_dev->edac_subsys = edac_subsys;
/* Init the devices's kobject */
memset(&edac_dev->kobj, 0, sizeof(struct kobject));
/* Record which module 'owns' this control structure
* and bump the ref count of the module
*/
edac_dev->owner = THIS_MODULE;
if (!try_module_get(edac_dev->owner))
goto err_out;
/* register */
dev_root = bus_get_dev_root(edac_subsys);
if (dev_root) {
err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
&dev_root->kobj, "%s", edac_dev->name);
put_device(dev_root);
}
if (err) {
edac_dbg(1, "Failed to register '.../edac/%s'\n",
edac_dev->name);
goto err_kobj_reg;
}
kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
/* At this point, to 'free' the control struct,
* edac_device_unregister_sysfs_main_kobj() must be used
*/
edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name);
return 0;
/* Error exit stack */
err_kobj_reg:
kobject_put(&edac_dev->kobj);
module_put(edac_dev->owner);
err_out:
return err;
}
/*
* edac_device_unregister_sysfs_main_kobj:
* the '..../edac/<name>' kobject
*/
void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
{
edac_dbg(0, "\n");
edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj));
/*
* Unregister the edac device's kobject and
* allow for reference count to reach 0 at which point
* the callback will be called to:
* a) module_put() this module
* b) 'kfree' the memory
*/
kobject_put(&dev->kobj);
}
/* edac_dev -> instance information */
/*
* Set of low-level instance attribute show functions
*/
static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
char *data)
{
return sprintf(data, "%u\n", instance->counters.ue_count);
}
static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
char *data)
{
return sprintf(data, "%u\n", instance->counters.ce_count);
}
#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
/* DEVICE instance kobject release() function */
static void edac_device_ctrl_instance_release(struct kobject *kobj)
{
struct edac_device_instance *instance;
edac_dbg(1, "\n");
/* map from this kobj to the main control struct
* and then dec the main kobj count
*/
instance = to_instance(kobj);
kobject_put(&instance->ctl->kobj);
}
/* instance specific attribute structure */
struct instance_attribute {
struct attribute attr;
ssize_t(*show) (struct edac_device_instance *, char *);
ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
};
/* Function to 'show' fields from the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->show)
return instance_attr->show(instance, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->store)
return instance_attr->store(instance, buffer, count);
return -EIO;
}
/* edac_dev file operations for an 'instance' */
static const struct sysfs_ops device_instance_ops = {
.show = edac_dev_instance_show,
.store = edac_dev_instance_store
};
#define INSTANCE_ATTR(_name,_mode,_show,_store) \
static struct instance_attribute attr_instance_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
/*
* Define attributes visible for the edac_device instance object
* Each contains a pointer to a show and an optional set
* function pointer that does the low level output/input
*/
INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
/* list of edac_dev 'instance' attributes */
static struct attribute *device_instance_attrs[] = {
&attr_instance_ce_count.attr,
&attr_instance_ue_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(device_instance);
/* The 'ktype' for each edac_dev 'instance' */
static struct kobj_type ktype_instance_ctrl = {
.release = edac_device_ctrl_instance_release,
.sysfs_ops = &device_instance_ops,
.default_groups = device_instance_groups,
};
/* edac_dev -> instance -> block information */
#define to_block(k) container_of(k, struct edac_device_block, kobj)
#define to_block_attr(a) \
container_of(a, struct edac_dev_sysfs_block_attribute, attr)
/*
* Set of low-level block attribute show functions
*/
static ssize_t block_ue_count_show(struct kobject *kobj,
struct attribute *attr, char *data)
{
struct edac_device_block *block = to_block(kobj);
return sprintf(data, "%u\n", block->counters.ue_count);
}
static ssize_t block_ce_count_show(struct kobject *kobj,
struct attribute *attr, char *data)
{
struct edac_device_block *block = to_block(kobj);
return sprintf(data, "%u\n", block->counters.ce_count);
}
/* DEVICE block kobject release() function */
static void edac_device_ctrl_block_release(struct kobject *kobj)
{
struct edac_device_block *block;
edac_dbg(1, "\n");
/* get the container of the kobj */
block = to_block(kobj);
/* map from 'block kobj' to 'block->instance->controller->main_kobj'
* now 'release' the block kobject
*/
kobject_put(&block->instance->ctl->kobj);
}
/* Function to 'show' fields from the edac_dev 'block' structure */
static ssize_t edac_dev_block_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_dev_sysfs_block_attribute *block_attr =
to_block_attr(attr);
if (block_attr->show)
return block_attr->show(kobj, attr, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'block' structure */
static ssize_t edac_dev_block_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_dev_sysfs_block_attribute *block_attr;
block_attr = to_block_attr(attr);
if (block_attr->store)
return block_attr->store(kobj, attr, buffer, count);
return -EIO;
}
/* edac_dev file operations for a 'block' */
static const struct sysfs_ops device_block_ops = {
.show = edac_dev_block_show,
.store = edac_dev_block_store
};
#define BLOCK_ATTR(_name,_mode,_show,_store) \
static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
/* list of edac_dev 'block' attributes */
static struct attribute *device_block_attrs[] = {
&attr_block_ce_count.attr,
&attr_block_ue_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(device_block);
/* The 'ktype' for each edac_dev 'block' */
static struct kobj_type ktype_block_ctrl = {
.release = edac_device_ctrl_block_release,
.sysfs_ops = &device_block_ops,
.default_groups = device_block_groups,
};
/* block ctor/dtor code */
/*
* edac_device_create_block
*/
static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
struct edac_device_instance *instance,
struct edac_device_block *block)
{
int i;
int err;
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
struct kobject *main_kobj;
edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n",
instance->name, instance, block->name, block);
edac_dbg(4, "block kobj=%p block kobj->parent=%p\n",
&block->kobj, &block->kobj.parent);
/* init this block's kobject */
memset(&block->kobj, 0, sizeof(struct kobject));
/* bump the main kobject's reference count for this controller
* and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
err = -ENODEV;
goto err_out;
}
/* Add this block's kobject */
err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
&instance->kobj,
"%s", block->name);
if (err) {
edac_dbg(1, "Failed to register instance '%s'\n", block->name);
kobject_put(main_kobj);
err = -ENODEV;
goto err_out;
}
/* If there are driver level block attributes, then added them
* to the block kobject
*/
sysfs_attrib = block->block_attributes;
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n",
sysfs_attrib->attr.name,
sysfs_attrib, &block->kobj);
/* Create each block_attribute file */
err = sysfs_create_file(&block->kobj,
&sysfs_attrib->attr);
if (err)
goto err_on_attrib;
}
}
kobject_uevent(&block->kobj, KOBJ_ADD);
return 0;
/* Error unwind stack */
err_on_attrib:
kobject_put(&block->kobj);
err_out:
return err;
}
/*
* edac_device_delete_block(edac_dev,block);
*/
static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
struct edac_device_block *block)
{
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
int i;
/* if this block has 'attributes' then we need to iterate over the list
* and 'remove' the attributes on this block
*/
sysfs_attrib = block->block_attributes;
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
/* remove each block_attrib file */
sysfs_remove_file(&block->kobj,
(struct attribute *) sysfs_attrib);
}
}
/* unregister this block's kobject, SEE:
* edac_device_ctrl_block_release() callback operation
*/
kobject_put(&block->kobj);
}
/* instance ctor/dtor code */
/*
* edac_device_create_instance
* create just one instance of an edac_device 'instance'
*/
static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
int idx)
{
int i, j;
int err;
struct edac_device_instance *instance;
struct kobject *main_kobj;
instance = &edac_dev->instances[idx];
/* Init the instance's kobject */
memset(&instance->kobj, 0, sizeof(struct kobject));
instance->ctl = edac_dev;
/* bump the main kobject's reference count for this controller
* and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
err = -ENODEV;
goto err_out;
}
/* Formally register this instance's kobject under the edac_device */
err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
&edac_dev->kobj, "%s", instance->name);
if (err != 0) {
edac_dbg(2, "Failed to register instance '%s'\n",
instance->name);
kobject_put(main_kobj);
goto err_out;
}
edac_dbg(4, "now register '%d' blocks for instance %d\n",
instance->nr_blocks, idx);
/* register all blocks of this instance */
for (i = 0; i < instance->nr_blocks; i++) {
err = edac_device_create_block(edac_dev, instance,
&instance->blocks[i]);
if (err) {
/* If any fail, remove all previous ones */
for (j = 0; j < i; j++)
edac_device_delete_block(edac_dev,
&instance->blocks[j]);
goto err_release_instance_kobj;
}
}
kobject_uevent(&instance->kobj, KOBJ_ADD);
edac_dbg(4, "Registered instance %d '%s' kobject\n",
idx, instance->name);
return 0;
/* error unwind stack */
err_release_instance_kobj:
kobject_put(&instance->kobj);
err_out:
return err;
}
/*
* edac_device_remove_instance
* remove an edac_device instance
*/
static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
int idx)
{
struct edac_device_instance *instance;
int i;
instance = &edac_dev->instances[idx];
/* unregister all blocks in this instance */
for (i = 0; i < instance->nr_blocks; i++)
edac_device_delete_block(edac_dev, &instance->blocks[i]);
/* unregister this instance's kobject, SEE:
* edac_device_ctrl_instance_release() for callback operation
*/
kobject_put(&instance->kobj);
}
/*
* edac_device_create_instances
* create the first level of 'instances' for this device
* (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc
*/
static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
{
int i, j;
int err;
edac_dbg(0, "\n");
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++) {
err = edac_device_create_instance(edac_dev, i);
if (err) {
/* unwind previous instances on error */
for (j = 0; j < i; j++)
edac_device_delete_instance(edac_dev, j);
return err;
}
}
return 0;
}
/*
* edac_device_delete_instances(edac_dev);
* unregister all the kobjects of the instances
*/
static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
{
int i;
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++)
edac_device_delete_instance(edac_dev, i);
}
/* edac_dev sysfs ctor/dtor code */
/*
* edac_device_add_main_sysfs_attributes
* add some attributes to this instance's main kobject
*/
static int edac_device_add_main_sysfs_attributes(
struct edac_device_ctl_info *edac_dev)
{
struct edac_dev_sysfs_attribute *sysfs_attrib;
int err = 0;
sysfs_attrib = edac_dev->sysfs_attributes;
if (sysfs_attrib) {
/* iterate over the array and create an attribute for each
* entry in the list
*/
while (sysfs_attrib->attr.name != NULL) {
err = sysfs_create_file(&edac_dev->kobj,
(struct attribute*) sysfs_attrib);
if (err)
goto err_out;
sysfs_attrib++;
}
}
err_out:
return err;
}
/*
* edac_device_remove_main_sysfs_attributes
* remove any attributes to this instance's main kobject
*/
static void edac_device_remove_main_sysfs_attributes(
struct edac_device_ctl_info *edac_dev)
{
struct edac_dev_sysfs_attribute *sysfs_attrib;
/* if there are main attributes, defined, remove them. First,
* point to the start of the array and iterate over it
* removing each attribute listed from this device's instance's kobject
*/
sysfs_attrib = edac_dev->sysfs_attributes;
if (sysfs_attrib) {
while (sysfs_attrib->attr.name != NULL) {
sysfs_remove_file(&edac_dev->kobj,
(struct attribute *) sysfs_attrib);
sysfs_attrib++;
}
}
}
/*
* edac_device_create_sysfs() Constructor
*
* accept a created edac_device control structure
* and 'export' it to sysfs. The 'main' kobj should already have been
* created. 'instance' and 'block' kobjects should be registered
* along with any 'block' attributes from the low driver. In addition,
* the main attributes (if any) are connected to the main kobject of
* the control structure.
*
* Return:
* 0 Success
* !0 Failure
*/
int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
{
int err;
struct kobject *edac_kobj = &edac_dev->kobj;
edac_dbg(0, "idx=%d\n", edac_dev->dev_idx);
/* go create any main attributes callers wants */
err = edac_device_add_main_sysfs_attributes(edac_dev);
if (err) {
edac_dbg(0, "failed to add sysfs attribs\n");
goto err_out;
}
/* create a symlink from the edac device
* to the platform 'device' being used for this
*/
err = sysfs_create_link(edac_kobj,
&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
if (err) {
edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto err_remove_main_attribs;
}
/* Create the first level instance directories
* In turn, the nested blocks beneath the instances will
* be registered as well
*/
err = edac_device_create_instances(edac_dev);
if (err) {
edac_dbg(0, "edac_device_create_instances() returned err= %d\n",
err);
goto err_remove_link;
}
edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx);
return 0;
/* Error unwind stack */
err_remove_link:
/* remove the sym link */
sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
err_remove_main_attribs:
edac_device_remove_main_sysfs_attributes(edac_dev);
err_out:
return err;
}
/*
* edac_device_remove_sysfs() destructor
*
* given an edac_device struct, tear down the kobject resources
*/
void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(0, "\n");
/* remove any main attributes for this device */
edac_device_remove_main_sysfs_attributes(edac_dev);
/* remove the device sym link */
sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
/* walk the instance/block kobject tree, deconstructing it */
edac_device_delete_instances(edac_dev);
}
| linux-master | drivers/edac/edac_device_sysfs.c |
/*
* Cavium ThunderX memory controller kernel module
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright Cavium, Inc. (C) 2015-2017. All rights reserved.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/stop_machine.h>
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <asm/page.h>
#include "edac_module.h"
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define THUNDERX_NODE GENMASK(45, 44)
enum {
ERR_CORRECTED = 1,
ERR_UNCORRECTED = 2,
ERR_UNKNOWN = 3,
};
#define MAX_SYNDROME_REGS 4
struct error_syndrome {
u64 reg[MAX_SYNDROME_REGS];
};
struct error_descr {
int type;
u64 mask;
char *descr;
};
static void decode_register(char *str, size_t size,
const struct error_descr *descr,
const uint64_t reg)
{
int ret = 0;
while (descr->type && descr->mask && descr->descr) {
if (reg & descr->mask) {
ret = snprintf(str, size, "\n\t%s, %s",
descr->type == ERR_CORRECTED ?
"Corrected" : "Uncorrected",
descr->descr);
str += ret;
size -= ret;
}
descr++;
}
}
static unsigned long get_bits(unsigned long data, int pos, int width)
{
return (data >> pos) & ((1 << width) - 1);
}
#define L2C_CTL 0x87E080800000
#define L2C_CTL_DISIDXALIAS BIT(0)
#define PCI_DEVICE_ID_THUNDER_LMC 0xa022
#define LMC_FADR 0x20
#define LMC_FADR_FDIMM(x) ((x >> 37) & 0x1)
#define LMC_FADR_FBUNK(x) ((x >> 36) & 0x1)
#define LMC_FADR_FBANK(x) ((x >> 32) & 0xf)
#define LMC_FADR_FROW(x) ((x >> 14) & 0xffff)
#define LMC_FADR_FCOL(x) ((x >> 0) & 0x1fff)
#define LMC_NXM_FADR 0x28
#define LMC_ECC_SYND 0x38
#define LMC_ECC_PARITY_TEST 0x108
#define LMC_INT_W1S 0x150
#define LMC_INT_ENA_W1C 0x158
#define LMC_INT_ENA_W1S 0x160
#define LMC_CONFIG 0x188
#define LMC_CONFIG_BG2 BIT(62)
#define LMC_CONFIG_RANK_ENA BIT(42)
#define LMC_CONFIG_PBANK_LSB(x) (((x) >> 5) & 0xF)
#define LMC_CONFIG_ROW_LSB(x) (((x) >> 2) & 0x7)
#define LMC_CONTROL 0x190
#define LMC_CONTROL_XOR_BANK BIT(16)
#define LMC_INT 0x1F0
#define LMC_INT_DDR_ERR BIT(11)
#define LMC_INT_DED_ERR (0xFUL << 5)
#define LMC_INT_SEC_ERR (0xFUL << 1)
#define LMC_INT_NXM_WR_MASK BIT(0)
#define LMC_DDR_PLL_CTL 0x258
#define LMC_DDR_PLL_CTL_DDR4 BIT(29)
#define LMC_FADR_SCRAMBLED 0x330
#define LMC_INT_UE (LMC_INT_DDR_ERR | LMC_INT_DED_ERR | \
LMC_INT_NXM_WR_MASK)
#define LMC_INT_CE (LMC_INT_SEC_ERR)
static const struct error_descr lmc_errors[] = {
{
.type = ERR_CORRECTED,
.mask = LMC_INT_SEC_ERR,
.descr = "Single-bit ECC error",
},
{
.type = ERR_UNCORRECTED,
.mask = LMC_INT_DDR_ERR,
.descr = "DDR chip error",
},
{
.type = ERR_UNCORRECTED,
.mask = LMC_INT_DED_ERR,
.descr = "Double-bit ECC error",
},
{
.type = ERR_UNCORRECTED,
.mask = LMC_INT_NXM_WR_MASK,
.descr = "Non-existent memory write",
},
{0, 0, NULL},
};
#define LMC_INT_EN_DDR_ERROR_ALERT_ENA BIT(5)
#define LMC_INT_EN_DLCRAM_DED_ERR BIT(4)
#define LMC_INT_EN_DLCRAM_SEC_ERR BIT(3)
#define LMC_INT_INTR_DED_ENA BIT(2)
#define LMC_INT_INTR_SEC_ENA BIT(1)
#define LMC_INT_INTR_NXM_WR_ENA BIT(0)
#define LMC_INT_ENA_ALL GENMASK(5, 0)
#define LMC_DDR_PLL_CTL 0x258
#define LMC_DDR_PLL_CTL_DDR4 BIT(29)
#define LMC_CONTROL 0x190
#define LMC_CONTROL_RDIMM BIT(0)
#define LMC_SCRAM_FADR 0x330
#define LMC_CHAR_MASK0 0x228
#define LMC_CHAR_MASK2 0x238
#define RING_ENTRIES 8
struct debugfs_entry {
const char *name;
umode_t mode;
const struct file_operations fops;
};
struct lmc_err_ctx {
u64 reg_int;
u64 reg_fadr;
u64 reg_nxm_fadr;
u64 reg_scram_fadr;
u64 reg_ecc_synd;
};
struct thunderx_lmc {
void __iomem *regs;
struct pci_dev *pdev;
struct msix_entry msix_ent;
atomic_t ecc_int;
u64 mask0;
u64 mask2;
u64 parity_test;
u64 node;
int xbits;
int bank_width;
int pbank_lsb;
int dimm_lsb;
int rank_lsb;
int bank_lsb;
int row_lsb;
int col_hi_lsb;
int xor_bank;
int l2c_alias;
struct page *mem;
struct lmc_err_ctx err_ctx[RING_ENTRIES];
unsigned long ring_head;
unsigned long ring_tail;
};
#define ring_pos(pos, size) ((pos) & (size - 1))
#define DEBUGFS_STRUCT(_name, _mode, _write, _read) \
static struct debugfs_entry debugfs_##_name = { \
.name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
.fops = { \
.open = simple_open, \
.write = _write, \
.read = _read, \
.llseek = generic_file_llseek, \
}, \
}
#define DEBUGFS_FIELD_ATTR(_type, _field) \
static ssize_t thunderx_##_type##_##_field##_read(struct file *file, \
char __user *data, \
size_t count, loff_t *ppos) \
{ \
struct thunderx_##_type *pdata = file->private_data; \
char buf[20]; \
\
snprintf(buf, count, "0x%016llx", pdata->_field); \
return simple_read_from_buffer(data, count, ppos, \
buf, sizeof(buf)); \
} \
\
static ssize_t thunderx_##_type##_##_field##_write(struct file *file, \
const char __user *data, \
size_t count, loff_t *ppos) \
{ \
struct thunderx_##_type *pdata = file->private_data; \
int res; \
\
res = kstrtoull_from_user(data, count, 0, &pdata->_field); \
\
return res ? res : count; \
} \
\
DEBUGFS_STRUCT(_field, 0600, \
thunderx_##_type##_##_field##_write, \
thunderx_##_type##_##_field##_read) \
#define DEBUGFS_REG_ATTR(_type, _name, _reg) \
static ssize_t thunderx_##_type##_##_name##_read(struct file *file, \
char __user *data, \
size_t count, loff_t *ppos) \
{ \
struct thunderx_##_type *pdata = file->private_data; \
char buf[20]; \
\
sprintf(buf, "0x%016llx", readq(pdata->regs + _reg)); \
return simple_read_from_buffer(data, count, ppos, \
buf, sizeof(buf)); \
} \
\
static ssize_t thunderx_##_type##_##_name##_write(struct file *file, \
const char __user *data, \
size_t count, loff_t *ppos) \
{ \
struct thunderx_##_type *pdata = file->private_data; \
u64 val; \
int res; \
\
res = kstrtoull_from_user(data, count, 0, &val); \
\
if (!res) { \
writeq(val, pdata->regs + _reg); \
res = count; \
} \
\
return res; \
} \
\
DEBUGFS_STRUCT(_name, 0600, \
thunderx_##_type##_##_name##_write, \
thunderx_##_type##_##_name##_read)
#define LMC_DEBUGFS_ENT(_field) DEBUGFS_FIELD_ATTR(lmc, _field)
/*
* To get an ECC error injected, the following steps are needed:
* - Setup the ECC injection by writing the appropriate parameters:
* echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask0
* echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask2
* echo 0x802 > /sys/kernel/debug/<device number>/ecc_parity_test
* - Do the actual injection:
* echo 1 > /sys/kernel/debug/<device number>/inject_ecc
*/
static ssize_t thunderx_lmc_inject_int_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
u64 val;
int res;
res = kstrtoull_from_user(data, count, 0, &val);
if (!res) {
/* Trigger the interrupt */
writeq(val, lmc->regs + LMC_INT_W1S);
res = count;
}
return res;
}
static ssize_t thunderx_lmc_int_read(struct file *file,
char __user *data,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
char buf[20];
u64 lmc_int = readq(lmc->regs + LMC_INT);
snprintf(buf, sizeof(buf), "0x%016llx", lmc_int);
return simple_read_from_buffer(data, count, ppos, buf, sizeof(buf));
}
#define TEST_PATTERN 0xa5
static int inject_ecc_fn(void *arg)
{
struct thunderx_lmc *lmc = arg;
uintptr_t addr, phys;
unsigned int cline_size = cache_line_size();
const unsigned int lines = PAGE_SIZE / cline_size;
unsigned int i, cl_idx;
addr = (uintptr_t)page_address(lmc->mem);
phys = (uintptr_t)page_to_phys(lmc->mem);
cl_idx = (phys & 0x7f) >> 4;
lmc->parity_test &= ~(7ULL << 8);
lmc->parity_test |= (cl_idx << 8);
writeq(lmc->mask0, lmc->regs + LMC_CHAR_MASK0);
writeq(lmc->mask2, lmc->regs + LMC_CHAR_MASK2);
writeq(lmc->parity_test, lmc->regs + LMC_ECC_PARITY_TEST);
readq(lmc->regs + LMC_CHAR_MASK0);
readq(lmc->regs + LMC_CHAR_MASK2);
readq(lmc->regs + LMC_ECC_PARITY_TEST);
for (i = 0; i < lines; i++) {
memset((void *)addr, TEST_PATTERN, cline_size);
barrier();
/*
* Flush L1 cachelines to the PoC (L2).
* This will cause cacheline eviction to the L2.
*/
asm volatile("dc civac, %0\n"
"dsb sy\n"
: : "r"(addr + i * cline_size));
}
for (i = 0; i < lines; i++) {
/*
* Flush L2 cachelines to the DRAM.
* This will cause cacheline eviction to the DRAM
* and ECC corruption according to the masks set.
*/
__asm__ volatile("sys #0,c11,C1,#2, %0\n"
: : "r"(phys + i * cline_size));
}
for (i = 0; i < lines; i++) {
/*
* Invalidate L2 cachelines.
* The subsequent load will cause cacheline fetch
* from the DRAM and an error interrupt
*/
__asm__ volatile("sys #0,c11,C1,#1, %0"
: : "r"(phys + i * cline_size));
}
for (i = 0; i < lines; i++) {
/*
* Invalidate L1 cachelines.
* The subsequent load will cause cacheline fetch
* from the L2 and/or DRAM
*/
asm volatile("dc ivac, %0\n"
"dsb sy\n"
: : "r"(addr + i * cline_size));
}
return 0;
}
static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
unsigned int cline_size = cache_line_size();
u8 *tmp;
void __iomem *addr;
unsigned int offs, timeout = 100000;
atomic_set(&lmc->ecc_int, 0);
lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
if (!lmc->mem)
return -ENOMEM;
tmp = kmalloc(cline_size, GFP_KERNEL);
if (!tmp) {
__free_pages(lmc->mem, 0);
return -ENOMEM;
}
addr = page_address(lmc->mem);
while (!atomic_read(&lmc->ecc_int) && timeout--) {
stop_machine(inject_ecc_fn, lmc, NULL);
for (offs = 0; offs < PAGE_SIZE; offs += cline_size) {
/*
* Do a load from the previously rigged location
* This should generate an error interrupt.
*/
memcpy(tmp, addr + offs, cline_size);
asm volatile("dsb ld\n");
}
}
kfree(tmp);
__free_pages(lmc->mem, 0);
return count;
}
LMC_DEBUGFS_ENT(mask0);
LMC_DEBUGFS_ENT(mask2);
LMC_DEBUGFS_ENT(parity_test);
DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
static struct debugfs_entry *lmc_dfs_ents[] = {
&debugfs_mask0,
&debugfs_mask2,
&debugfs_parity_test,
&debugfs_inject_ecc,
&debugfs_inject_int,
&debugfs_int_w1c,
};
static int thunderx_create_debugfs_nodes(struct dentry *parent,
struct debugfs_entry *attrs[],
void *data,
size_t num)
{
int i;
struct dentry *ent;
if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
return 0;
if (!parent)
return -ENOENT;
for (i = 0; i < num; i++) {
ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
parent, data, &attrs[i]->fops);
if (IS_ERR(ent))
break;
}
return i;
}
static phys_addr_t thunderx_faddr_to_phys(u64 faddr, struct thunderx_lmc *lmc)
{
phys_addr_t addr = 0;
int bank, xbits;
addr |= lmc->node << 40;
addr |= LMC_FADR_FDIMM(faddr) << lmc->dimm_lsb;
addr |= LMC_FADR_FBUNK(faddr) << lmc->rank_lsb;
addr |= LMC_FADR_FROW(faddr) << lmc->row_lsb;
addr |= (LMC_FADR_FCOL(faddr) >> 4) << lmc->col_hi_lsb;
bank = LMC_FADR_FBANK(faddr) << lmc->bank_lsb;
if (lmc->xor_bank)
bank ^= get_bits(addr, 12 + lmc->xbits, lmc->bank_width);
addr |= bank << lmc->bank_lsb;
xbits = PCI_FUNC(lmc->pdev->devfn);
if (lmc->l2c_alias)
xbits ^= get_bits(addr, 20, lmc->xbits) ^
get_bits(addr, 12, lmc->xbits);
addr |= xbits << 7;
return addr;
}
static unsigned int thunderx_get_num_lmcs(unsigned int node)
{
unsigned int number = 0;
struct pci_dev *pdev = NULL;
do {
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_LMC,
pdev);
if (pdev) {
#ifdef CONFIG_NUMA
if (pdev->dev.numa_node == node)
number++;
#else
number++;
#endif
}
} while (pdev);
return number;
}
#define LMC_MESSAGE_SIZE 120
#define LMC_OTHER_SIZE (50 * ARRAY_SIZE(lmc_errors))
static irqreturn_t thunderx_lmc_err_isr(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct thunderx_lmc *lmc = mci->pvt_info;
unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx));
struct lmc_err_ctx *ctx = &lmc->err_ctx[head];
writeq(0, lmc->regs + LMC_CHAR_MASK0);
writeq(0, lmc->regs + LMC_CHAR_MASK2);
writeq(0x2, lmc->regs + LMC_ECC_PARITY_TEST);
ctx->reg_int = readq(lmc->regs + LMC_INT);
ctx->reg_fadr = readq(lmc->regs + LMC_FADR);
ctx->reg_nxm_fadr = readq(lmc->regs + LMC_NXM_FADR);
ctx->reg_scram_fadr = readq(lmc->regs + LMC_SCRAM_FADR);
ctx->reg_ecc_synd = readq(lmc->regs + LMC_ECC_SYND);
lmc->ring_head++;
atomic_set(&lmc->ecc_int, 1);
/* Clear the interrupt */
writeq(ctx->reg_int, lmc->regs + LMC_INT);
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_lmc_threaded_isr(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct thunderx_lmc *lmc = mci->pvt_info;
phys_addr_t phys_addr;
unsigned long tail;
struct lmc_err_ctx *ctx;
irqreturn_t ret = IRQ_NONE;
char *msg;
char *other;
msg = kmalloc(LMC_MESSAGE_SIZE, GFP_KERNEL);
other = kmalloc(LMC_OTHER_SIZE, GFP_KERNEL);
if (!msg || !other)
goto err_free;
while (CIRC_CNT(lmc->ring_head, lmc->ring_tail,
ARRAY_SIZE(lmc->err_ctx))) {
tail = ring_pos(lmc->ring_tail, ARRAY_SIZE(lmc->err_ctx));
ctx = &lmc->err_ctx[tail];
dev_dbg(&lmc->pdev->dev, "LMC_INT: %016llx\n",
ctx->reg_int);
dev_dbg(&lmc->pdev->dev, "LMC_FADR: %016llx\n",
ctx->reg_fadr);
dev_dbg(&lmc->pdev->dev, "LMC_NXM_FADR: %016llx\n",
ctx->reg_nxm_fadr);
dev_dbg(&lmc->pdev->dev, "LMC_SCRAM_FADR: %016llx\n",
ctx->reg_scram_fadr);
dev_dbg(&lmc->pdev->dev, "LMC_ECC_SYND: %016llx\n",
ctx->reg_ecc_synd);
snprintf(msg, LMC_MESSAGE_SIZE,
"DIMM %lld rank %lld bank %lld row %lld col %lld",
LMC_FADR_FDIMM(ctx->reg_scram_fadr),
LMC_FADR_FBUNK(ctx->reg_scram_fadr),
LMC_FADR_FBANK(ctx->reg_scram_fadr),
LMC_FADR_FROW(ctx->reg_scram_fadr),
LMC_FADR_FCOL(ctx->reg_scram_fadr));
decode_register(other, LMC_OTHER_SIZE, lmc_errors,
ctx->reg_int);
phys_addr = thunderx_faddr_to_phys(ctx->reg_fadr, lmc);
if (ctx->reg_int & LMC_INT_UE)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
phys_to_pfn(phys_addr),
offset_in_page(phys_addr),
0, -1, -1, -1, msg, other);
else if (ctx->reg_int & LMC_INT_CE)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
phys_to_pfn(phys_addr),
offset_in_page(phys_addr),
0, -1, -1, -1, msg, other);
lmc->ring_tail++;
}
ret = IRQ_HANDLED;
err_free:
kfree(msg);
kfree(other);
return ret;
}
static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
{ 0, },
};
static inline int pci_dev_to_mc_idx(struct pci_dev *pdev)
{
int node = dev_to_node(&pdev->dev);
int ret = PCI_FUNC(pdev->devfn);
ret += max(node, 0) << 3;
return ret;
}
static int thunderx_lmc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct thunderx_lmc *lmc;
struct edac_mc_layer layer;
struct mem_ctl_info *mci;
u64 lmc_control, lmc_ddr_pll_ctl, lmc_config;
int ret;
u64 lmc_int;
void *l2c_ioaddr;
layer.type = EDAC_MC_LAYER_SLOT;
layer.size = 2;
layer.is_virt_csrow = false;
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_lmc");
if (ret) {
dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
return ret;
}
mci = edac_mc_alloc(pci_dev_to_mc_idx(pdev), 1, &layer,
sizeof(struct thunderx_lmc));
if (!mci)
return -ENOMEM;
mci->pdev = &pdev->dev;
lmc = mci->pvt_info;
pci_set_drvdata(pdev, mci);
lmc->regs = pcim_iomap_table(pdev)[0];
lmc_control = readq(lmc->regs + LMC_CONTROL);
lmc_ddr_pll_ctl = readq(lmc->regs + LMC_DDR_PLL_CTL);
lmc_config = readq(lmc->regs + LMC_CONFIG);
if (lmc_control & LMC_CONTROL_RDIMM) {
mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
lmc_ddr_pll_ctl) ?
MEM_RDDR4 : MEM_RDDR3;
} else {
mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
lmc_ddr_pll_ctl) ?
MEM_DDR4 : MEM_DDR3;
}
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "thunderx-lmc";
mci->ctl_name = "thunderx-lmc";
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_NONE;
lmc->pdev = pdev;
lmc->msix_ent.entry = 0;
lmc->ring_head = 0;
lmc->ring_tail = 0;
ret = pci_enable_msix_exact(pdev, &lmc->msix_ent, 1);
if (ret) {
dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
goto err_free;
}
ret = devm_request_threaded_irq(&pdev->dev, lmc->msix_ent.vector,
thunderx_lmc_err_isr,
thunderx_lmc_threaded_isr, 0,
"[EDAC] ThunderX LMC", mci);
if (ret) {
dev_err(&pdev->dev, "Cannot set ISR: %d\n", ret);
goto err_free;
}
lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
lmc->bank_width = (FIELD_GET(LMC_DDR_PLL_CTL_DDR4, lmc_ddr_pll_ctl) &&
FIELD_GET(LMC_CONFIG_BG2, lmc_config)) ? 4 : 3;
lmc->pbank_lsb = (lmc_config >> 5) & 0xf;
lmc->dimm_lsb = 28 + lmc->pbank_lsb + lmc->xbits;
lmc->rank_lsb = lmc->dimm_lsb;
lmc->rank_lsb -= FIELD_GET(LMC_CONFIG_RANK_ENA, lmc_config) ? 1 : 0;
lmc->bank_lsb = 7 + lmc->xbits;
lmc->row_lsb = 14 + LMC_CONFIG_ROW_LSB(lmc_config) + lmc->xbits;
lmc->col_hi_lsb = lmc->bank_lsb + lmc->bank_width;
lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
if (!l2c_ioaddr) {
dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
ret = -ENOMEM;
goto err_free;
}
lmc->l2c_alias = !(readq(l2c_ioaddr) & L2C_CTL_DISIDXALIAS);
iounmap(l2c_ioaddr);
ret = edac_mc_add_mc(mci);
if (ret) {
dev_err(&pdev->dev, "Cannot add the MC: %d\n", ret);
goto err_free;
}
lmc_int = readq(lmc->regs + LMC_INT);
writeq(lmc_int, lmc->regs + LMC_INT);
writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1S);
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
ret = thunderx_create_debugfs_nodes(mci->debugfs,
lmc_dfs_ents,
lmc,
ARRAY_SIZE(lmc_dfs_ents));
if (ret != ARRAY_SIZE(lmc_dfs_ents)) {
dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
ret, ret >= 0 ? " created" : "");
}
}
return 0;
err_free:
pci_set_drvdata(pdev, NULL);
edac_mc_free(mci);
return ret;
}
static void thunderx_lmc_remove(struct pci_dev *pdev)
{
struct mem_ctl_info *mci = pci_get_drvdata(pdev);
struct thunderx_lmc *lmc = mci->pvt_info;
writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1C);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
}
MODULE_DEVICE_TABLE(pci, thunderx_lmc_pci_tbl);
static struct pci_driver thunderx_lmc_driver = {
.name = "thunderx_lmc_edac",
.probe = thunderx_lmc_probe,
.remove = thunderx_lmc_remove,
.id_table = thunderx_lmc_pci_tbl,
};
/*---------------------- OCX driver ---------------------------------*/
#define PCI_DEVICE_ID_THUNDER_OCX 0xa013
#define OCX_LINK_INTS 3
#define OCX_INTS (OCX_LINK_INTS + 1)
#define OCX_RX_LANES 24
#define OCX_RX_LANE_STATS 15
#define OCX_COM_INT 0x100
#define OCX_COM_INT_W1S 0x108
#define OCX_COM_INT_ENA_W1S 0x110
#define OCX_COM_INT_ENA_W1C 0x118
#define OCX_COM_IO_BADID BIT(54)
#define OCX_COM_MEM_BADID BIT(53)
#define OCX_COM_COPR_BADID BIT(52)
#define OCX_COM_WIN_REQ_BADID BIT(51)
#define OCX_COM_WIN_REQ_TOUT BIT(50)
#define OCX_COM_RX_LANE GENMASK(23, 0)
#define OCX_COM_INT_CE (OCX_COM_IO_BADID | \
OCX_COM_MEM_BADID | \
OCX_COM_COPR_BADID | \
OCX_COM_WIN_REQ_BADID | \
OCX_COM_WIN_REQ_TOUT)
static const struct error_descr ocx_com_errors[] = {
{
.type = ERR_CORRECTED,
.mask = OCX_COM_IO_BADID,
.descr = "Invalid IO transaction node ID",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_MEM_BADID,
.descr = "Invalid memory transaction node ID",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_COPR_BADID,
.descr = "Invalid coprocessor transaction node ID",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_WIN_REQ_BADID,
.descr = "Invalid SLI transaction node ID",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_WIN_REQ_TOUT,
.descr = "Window/core request timeout",
},
{0, 0, NULL},
};
#define OCX_COM_LINKX_INT(x) (0x120 + (x) * 8)
#define OCX_COM_LINKX_INT_W1S(x) (0x140 + (x) * 8)
#define OCX_COM_LINKX_INT_ENA_W1S(x) (0x160 + (x) * 8)
#define OCX_COM_LINKX_INT_ENA_W1C(x) (0x180 + (x) * 8)
#define OCX_COM_LINK_BAD_WORD BIT(13)
#define OCX_COM_LINK_ALIGN_FAIL BIT(12)
#define OCX_COM_LINK_ALIGN_DONE BIT(11)
#define OCX_COM_LINK_UP BIT(10)
#define OCX_COM_LINK_STOP BIT(9)
#define OCX_COM_LINK_BLK_ERR BIT(8)
#define OCX_COM_LINK_REINIT BIT(7)
#define OCX_COM_LINK_LNK_DATA BIT(6)
#define OCX_COM_LINK_RXFIFO_DBE BIT(5)
#define OCX_COM_LINK_RXFIFO_SBE BIT(4)
#define OCX_COM_LINK_TXFIFO_DBE BIT(3)
#define OCX_COM_LINK_TXFIFO_SBE BIT(2)
#define OCX_COM_LINK_REPLAY_DBE BIT(1)
#define OCX_COM_LINK_REPLAY_SBE BIT(0)
static const struct error_descr ocx_com_link_errors[] = {
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_REPLAY_SBE,
.descr = "Replay buffer single-bit error",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_TXFIFO_SBE,
.descr = "TX FIFO single-bit error",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_RXFIFO_SBE,
.descr = "RX FIFO single-bit error",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_BLK_ERR,
.descr = "Block code error",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_ALIGN_FAIL,
.descr = "Link alignment failure",
},
{
.type = ERR_CORRECTED,
.mask = OCX_COM_LINK_BAD_WORD,
.descr = "Bad code word",
},
{
.type = ERR_UNCORRECTED,
.mask = OCX_COM_LINK_REPLAY_DBE,
.descr = "Replay buffer double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = OCX_COM_LINK_TXFIFO_DBE,
.descr = "TX FIFO double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = OCX_COM_LINK_RXFIFO_DBE,
.descr = "RX FIFO double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = OCX_COM_LINK_STOP,
.descr = "Link stopped",
},
{0, 0, NULL},
};
#define OCX_COM_LINK_INT_UE (OCX_COM_LINK_REPLAY_DBE | \
OCX_COM_LINK_TXFIFO_DBE | \
OCX_COM_LINK_RXFIFO_DBE | \
OCX_COM_LINK_STOP)
#define OCX_COM_LINK_INT_CE (OCX_COM_LINK_REPLAY_SBE | \
OCX_COM_LINK_TXFIFO_SBE | \
OCX_COM_LINK_RXFIFO_SBE | \
OCX_COM_LINK_BLK_ERR | \
OCX_COM_LINK_ALIGN_FAIL | \
OCX_COM_LINK_BAD_WORD)
#define OCX_LNE_INT(x) (0x8018 + (x) * 0x100)
#define OCX_LNE_INT_EN(x) (0x8020 + (x) * 0x100)
#define OCX_LNE_BAD_CNT(x) (0x8028 + (x) * 0x100)
#define OCX_LNE_CFG(x) (0x8000 + (x) * 0x100)
#define OCX_LNE_STAT(x, y) (0x8040 + (x) * 0x100 + (y) * 8)
#define OCX_LNE_CFG_RX_BDRY_LOCK_DIS BIT(8)
#define OCX_LNE_CFG_RX_STAT_WRAP_DIS BIT(2)
#define OCX_LNE_CFG_RX_STAT_RDCLR BIT(1)
#define OCX_LNE_CFG_RX_STAT_ENA BIT(0)
#define OCX_LANE_BAD_64B67B BIT(8)
#define OCX_LANE_DSKEW_FIFO_OVFL BIT(5)
#define OCX_LANE_SCRM_SYNC_LOSS BIT(4)
#define OCX_LANE_UKWN_CNTL_WORD BIT(3)
#define OCX_LANE_CRC32_ERR BIT(2)
#define OCX_LANE_BDRY_SYNC_LOSS BIT(1)
#define OCX_LANE_SERDES_LOCK_LOSS BIT(0)
#define OCX_COM_LANE_INT_UE (0)
#define OCX_COM_LANE_INT_CE (OCX_LANE_SERDES_LOCK_LOSS | \
OCX_LANE_BDRY_SYNC_LOSS | \
OCX_LANE_CRC32_ERR | \
OCX_LANE_UKWN_CNTL_WORD | \
OCX_LANE_SCRM_SYNC_LOSS | \
OCX_LANE_DSKEW_FIFO_OVFL | \
OCX_LANE_BAD_64B67B)
static const struct error_descr ocx_lane_errors[] = {
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_SERDES_LOCK_LOSS,
.descr = "RX SerDes lock lost",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_BDRY_SYNC_LOSS,
.descr = "RX word boundary lost",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_CRC32_ERR,
.descr = "CRC32 error",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_UKWN_CNTL_WORD,
.descr = "Unknown control word",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_SCRM_SYNC_LOSS,
.descr = "Scrambler synchronization lost",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_DSKEW_FIFO_OVFL,
.descr = "RX deskew FIFO overflow",
},
{
.type = ERR_CORRECTED,
.mask = OCX_LANE_BAD_64B67B,
.descr = "Bad 64B/67B codeword",
},
{0, 0, NULL},
};
#define OCX_LNE_INT_ENA_ALL (GENMASK(9, 8) | GENMASK(6, 0))
#define OCX_COM_INT_ENA_ALL (GENMASK(54, 50) | GENMASK(23, 0))
#define OCX_COM_LINKX_INT_ENA_ALL (GENMASK(13, 12) | \
GENMASK(9, 7) | GENMASK(5, 0))
#define OCX_TLKX_ECC_CTL(x) (0x10018 + (x) * 0x2000)
#define OCX_RLKX_ECC_CTL(x) (0x18018 + (x) * 0x2000)
struct ocx_com_err_ctx {
u64 reg_com_int;
u64 reg_lane_int[OCX_RX_LANES];
u64 reg_lane_stat11[OCX_RX_LANES];
};
struct ocx_link_err_ctx {
u64 reg_com_link_int;
int link;
};
struct thunderx_ocx {
void __iomem *regs;
int com_link;
struct pci_dev *pdev;
struct edac_device_ctl_info *edac_dev;
struct dentry *debugfs;
struct msix_entry msix_ent[OCX_INTS];
struct ocx_com_err_ctx com_err_ctx[RING_ENTRIES];
struct ocx_link_err_ctx link_err_ctx[RING_ENTRIES];
unsigned long com_ring_head;
unsigned long com_ring_tail;
unsigned long link_ring_head;
unsigned long link_ring_tail;
};
#define OCX_MESSAGE_SIZE SZ_1K
#define OCX_OTHER_SIZE (50 * ARRAY_SIZE(ocx_com_link_errors))
/* This handler is threaded */
static irqreturn_t thunderx_ocx_com_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
msix_ent[msix->entry]);
int lane;
unsigned long head = ring_pos(ocx->com_ring_head,
ARRAY_SIZE(ocx->com_err_ctx));
struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head];
ctx->reg_com_int = readq(ocx->regs + OCX_COM_INT);
for (lane = 0; lane < OCX_RX_LANES; lane++) {
ctx->reg_lane_int[lane] =
readq(ocx->regs + OCX_LNE_INT(lane));
ctx->reg_lane_stat11[lane] =
readq(ocx->regs + OCX_LNE_STAT(lane, 11));
writeq(ctx->reg_lane_int[lane], ocx->regs + OCX_LNE_INT(lane));
}
writeq(ctx->reg_com_int, ocx->regs + OCX_COM_INT);
ocx->com_ring_head++;
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
msix_ent[msix->entry]);
irqreturn_t ret = IRQ_NONE;
unsigned long tail;
struct ocx_com_err_ctx *ctx;
int lane;
char *msg;
char *other;
msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
if (!msg || !other)
goto err_free;
while (CIRC_CNT(ocx->com_ring_head, ocx->com_ring_tail,
ARRAY_SIZE(ocx->com_err_ctx))) {
tail = ring_pos(ocx->com_ring_tail,
ARRAY_SIZE(ocx->com_err_ctx));
ctx = &ocx->com_err_ctx[tail];
snprintf(msg, OCX_MESSAGE_SIZE, "%s: OCX_COM_INT: %016llx",
ocx->edac_dev->ctl_name, ctx->reg_com_int);
decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int);
strncat(msg, other, OCX_MESSAGE_SIZE);
for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) {
snprintf(other, OCX_OTHER_SIZE,
"\n\tOCX_LNE_INT[%02d]: %016llx OCX_LNE_STAT11[%02d]: %016llx",
lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE);
decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors,
ctx->reg_lane_int[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE);
}
if (ctx->reg_com_int & OCX_COM_INT_CE)
edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
ocx->com_ring_tail++;
}
ret = IRQ_HANDLED;
err_free:
kfree(other);
kfree(msg);
return ret;
}
static irqreturn_t thunderx_ocx_lnk_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
msix_ent[msix->entry]);
unsigned long head = ring_pos(ocx->link_ring_head,
ARRAY_SIZE(ocx->link_err_ctx));
struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head];
ctx->link = msix->entry;
ctx->reg_com_link_int = readq(ocx->regs + OCX_COM_LINKX_INT(ctx->link));
writeq(ctx->reg_com_link_int, ocx->regs + OCX_COM_LINKX_INT(ctx->link));
ocx->link_ring_head++;
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
msix_ent[msix->entry]);
irqreturn_t ret = IRQ_NONE;
unsigned long tail;
struct ocx_link_err_ctx *ctx;
char *msg;
char *other;
msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
if (!msg || !other)
goto err_free;
while (CIRC_CNT(ocx->link_ring_head, ocx->link_ring_tail,
ARRAY_SIZE(ocx->link_err_ctx))) {
tail = ring_pos(ocx->link_ring_head,
ARRAY_SIZE(ocx->link_err_ctx));
ctx = &ocx->link_err_ctx[tail];
snprintf(msg, OCX_MESSAGE_SIZE,
"%s: OCX_COM_LINK_INT[%d]: %016llx",
ocx->edac_dev->ctl_name,
ctx->link, ctx->reg_com_link_int);
decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int);
strncat(msg, other, OCX_MESSAGE_SIZE);
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
else if (ctx->reg_com_link_int & OCX_COM_LINK_INT_CE)
edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
ocx->link_ring_tail++;
}
ret = IRQ_HANDLED;
err_free:
kfree(other);
kfree(msg);
return ret;
}
#define OCX_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(ocx, _name, _reg)
OCX_DEBUGFS_ATTR(tlk0_ecc_ctl, OCX_TLKX_ECC_CTL(0));
OCX_DEBUGFS_ATTR(tlk1_ecc_ctl, OCX_TLKX_ECC_CTL(1));
OCX_DEBUGFS_ATTR(tlk2_ecc_ctl, OCX_TLKX_ECC_CTL(2));
OCX_DEBUGFS_ATTR(rlk0_ecc_ctl, OCX_RLKX_ECC_CTL(0));
OCX_DEBUGFS_ATTR(rlk1_ecc_ctl, OCX_RLKX_ECC_CTL(1));
OCX_DEBUGFS_ATTR(rlk2_ecc_ctl, OCX_RLKX_ECC_CTL(2));
OCX_DEBUGFS_ATTR(com_link0_int, OCX_COM_LINKX_INT_W1S(0));
OCX_DEBUGFS_ATTR(com_link1_int, OCX_COM_LINKX_INT_W1S(1));
OCX_DEBUGFS_ATTR(com_link2_int, OCX_COM_LINKX_INT_W1S(2));
OCX_DEBUGFS_ATTR(lne00_badcnt, OCX_LNE_BAD_CNT(0));
OCX_DEBUGFS_ATTR(lne01_badcnt, OCX_LNE_BAD_CNT(1));
OCX_DEBUGFS_ATTR(lne02_badcnt, OCX_LNE_BAD_CNT(2));
OCX_DEBUGFS_ATTR(lne03_badcnt, OCX_LNE_BAD_CNT(3));
OCX_DEBUGFS_ATTR(lne04_badcnt, OCX_LNE_BAD_CNT(4));
OCX_DEBUGFS_ATTR(lne05_badcnt, OCX_LNE_BAD_CNT(5));
OCX_DEBUGFS_ATTR(lne06_badcnt, OCX_LNE_BAD_CNT(6));
OCX_DEBUGFS_ATTR(lne07_badcnt, OCX_LNE_BAD_CNT(7));
OCX_DEBUGFS_ATTR(lne08_badcnt, OCX_LNE_BAD_CNT(8));
OCX_DEBUGFS_ATTR(lne09_badcnt, OCX_LNE_BAD_CNT(9));
OCX_DEBUGFS_ATTR(lne10_badcnt, OCX_LNE_BAD_CNT(10));
OCX_DEBUGFS_ATTR(lne11_badcnt, OCX_LNE_BAD_CNT(11));
OCX_DEBUGFS_ATTR(lne12_badcnt, OCX_LNE_BAD_CNT(12));
OCX_DEBUGFS_ATTR(lne13_badcnt, OCX_LNE_BAD_CNT(13));
OCX_DEBUGFS_ATTR(lne14_badcnt, OCX_LNE_BAD_CNT(14));
OCX_DEBUGFS_ATTR(lne15_badcnt, OCX_LNE_BAD_CNT(15));
OCX_DEBUGFS_ATTR(lne16_badcnt, OCX_LNE_BAD_CNT(16));
OCX_DEBUGFS_ATTR(lne17_badcnt, OCX_LNE_BAD_CNT(17));
OCX_DEBUGFS_ATTR(lne18_badcnt, OCX_LNE_BAD_CNT(18));
OCX_DEBUGFS_ATTR(lne19_badcnt, OCX_LNE_BAD_CNT(19));
OCX_DEBUGFS_ATTR(lne20_badcnt, OCX_LNE_BAD_CNT(20));
OCX_DEBUGFS_ATTR(lne21_badcnt, OCX_LNE_BAD_CNT(21));
OCX_DEBUGFS_ATTR(lne22_badcnt, OCX_LNE_BAD_CNT(22));
OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
static struct debugfs_entry *ocx_dfs_ents[] = {
&debugfs_tlk0_ecc_ctl,
&debugfs_tlk1_ecc_ctl,
&debugfs_tlk2_ecc_ctl,
&debugfs_rlk0_ecc_ctl,
&debugfs_rlk1_ecc_ctl,
&debugfs_rlk2_ecc_ctl,
&debugfs_com_link0_int,
&debugfs_com_link1_int,
&debugfs_com_link2_int,
&debugfs_lne00_badcnt,
&debugfs_lne01_badcnt,
&debugfs_lne02_badcnt,
&debugfs_lne03_badcnt,
&debugfs_lne04_badcnt,
&debugfs_lne05_badcnt,
&debugfs_lne06_badcnt,
&debugfs_lne07_badcnt,
&debugfs_lne08_badcnt,
&debugfs_lne09_badcnt,
&debugfs_lne10_badcnt,
&debugfs_lne11_badcnt,
&debugfs_lne12_badcnt,
&debugfs_lne13_badcnt,
&debugfs_lne14_badcnt,
&debugfs_lne15_badcnt,
&debugfs_lne16_badcnt,
&debugfs_lne17_badcnt,
&debugfs_lne18_badcnt,
&debugfs_lne19_badcnt,
&debugfs_lne20_badcnt,
&debugfs_lne21_badcnt,
&debugfs_lne22_badcnt,
&debugfs_lne23_badcnt,
&debugfs_com_int,
};
static const struct pci_device_id thunderx_ocx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_OCX) },
{ 0, },
};
static void thunderx_ocx_clearstats(struct thunderx_ocx *ocx)
{
int lane, stat, cfg;
for (lane = 0; lane < OCX_RX_LANES; lane++) {
cfg = readq(ocx->regs + OCX_LNE_CFG(lane));
cfg |= OCX_LNE_CFG_RX_STAT_RDCLR;
cfg &= ~OCX_LNE_CFG_RX_STAT_ENA;
writeq(cfg, ocx->regs + OCX_LNE_CFG(lane));
for (stat = 0; stat < OCX_RX_LANE_STATS; stat++)
readq(ocx->regs + OCX_LNE_STAT(lane, stat));
}
}
static int thunderx_ocx_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct thunderx_ocx *ocx;
struct edac_device_ctl_info *edac_dev;
char name[32];
int idx;
int i;
int ret;
u64 reg;
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_ocx");
if (ret) {
dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
return ret;
}
idx = edac_device_alloc_index();
snprintf(name, sizeof(name), "OCX%d", idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
name, 1, "CCPI", 1,
0, NULL, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
}
ocx = edac_dev->pvt_info;
ocx->edac_dev = edac_dev;
ocx->com_ring_head = 0;
ocx->com_ring_tail = 0;
ocx->link_ring_head = 0;
ocx->link_ring_tail = 0;
ocx->regs = pcim_iomap_table(pdev)[0];
if (!ocx->regs) {
dev_err(&pdev->dev, "Cannot map PCI resources\n");
ret = -ENODEV;
goto err_free;
}
ocx->pdev = pdev;
for (i = 0; i < OCX_INTS; i++) {
ocx->msix_ent[i].entry = i;
ocx->msix_ent[i].vector = 0;
}
ret = pci_enable_msix_exact(pdev, ocx->msix_ent, OCX_INTS);
if (ret) {
dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
goto err_free;
}
for (i = 0; i < OCX_INTS; i++) {
ret = devm_request_threaded_irq(&pdev->dev,
ocx->msix_ent[i].vector,
(i == 3) ?
thunderx_ocx_com_isr :
thunderx_ocx_lnk_isr,
(i == 3) ?
thunderx_ocx_com_threaded_isr :
thunderx_ocx_lnk_threaded_isr,
0, "[EDAC] ThunderX OCX",
&ocx->msix_ent[i]);
if (ret)
goto err_free;
}
edac_dev->dev = &pdev->dev;
edac_dev->dev_name = dev_name(&pdev->dev);
edac_dev->mod_name = "thunderx-ocx";
edac_dev->ctl_name = "thunderx-ocx";
ret = edac_device_add_device(edac_dev);
if (ret) {
dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
goto err_free;
}
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
ocx->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
ret = thunderx_create_debugfs_nodes(ocx->debugfs,
ocx_dfs_ents,
ocx,
ARRAY_SIZE(ocx_dfs_ents));
if (ret != ARRAY_SIZE(ocx_dfs_ents)) {
dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
ret, ret >= 0 ? " created" : "");
}
}
pci_set_drvdata(pdev, edac_dev);
thunderx_ocx_clearstats(ocx);
for (i = 0; i < OCX_RX_LANES; i++) {
writeq(OCX_LNE_INT_ENA_ALL,
ocx->regs + OCX_LNE_INT_EN(i));
reg = readq(ocx->regs + OCX_LNE_INT(i));
writeq(reg, ocx->regs + OCX_LNE_INT(i));
}
for (i = 0; i < OCX_LINK_INTS; i++) {
reg = readq(ocx->regs + OCX_COM_LINKX_INT(i));
writeq(reg, ocx->regs + OCX_COM_LINKX_INT(i));
writeq(OCX_COM_LINKX_INT_ENA_ALL,
ocx->regs + OCX_COM_LINKX_INT_ENA_W1S(i));
}
reg = readq(ocx->regs + OCX_COM_INT);
writeq(reg, ocx->regs + OCX_COM_INT);
writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1S);
return 0;
err_free:
edac_device_free_ctl_info(edac_dev);
return ret;
}
static void thunderx_ocx_remove(struct pci_dev *pdev)
{
struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
struct thunderx_ocx *ocx = edac_dev->pvt_info;
int i;
writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1C);
for (i = 0; i < OCX_INTS; i++) {
writeq(OCX_COM_LINKX_INT_ENA_ALL,
ocx->regs + OCX_COM_LINKX_INT_ENA_W1C(i));
}
edac_debugfs_remove_recursive(ocx->debugfs);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
}
MODULE_DEVICE_TABLE(pci, thunderx_ocx_pci_tbl);
static struct pci_driver thunderx_ocx_driver = {
.name = "thunderx_ocx_edac",
.probe = thunderx_ocx_probe,
.remove = thunderx_ocx_remove,
.id_table = thunderx_ocx_pci_tbl,
};
/*---------------------- L2C driver ---------------------------------*/
#define PCI_DEVICE_ID_THUNDER_L2C_TAD 0xa02e
#define PCI_DEVICE_ID_THUNDER_L2C_CBC 0xa02f
#define PCI_DEVICE_ID_THUNDER_L2C_MCI 0xa030
#define L2C_TAD_INT_W1C 0x40000
#define L2C_TAD_INT_W1S 0x40008
#define L2C_TAD_INT_ENA_W1C 0x40020
#define L2C_TAD_INT_ENA_W1S 0x40028
#define L2C_TAD_INT_L2DDBE BIT(1)
#define L2C_TAD_INT_SBFSBE BIT(2)
#define L2C_TAD_INT_SBFDBE BIT(3)
#define L2C_TAD_INT_FBFSBE BIT(4)
#define L2C_TAD_INT_FBFDBE BIT(5)
#define L2C_TAD_INT_TAGDBE BIT(9)
#define L2C_TAD_INT_RDDISLMC BIT(15)
#define L2C_TAD_INT_WRDISLMC BIT(16)
#define L2C_TAD_INT_LFBTO BIT(17)
#define L2C_TAD_INT_GSYNCTO BIT(18)
#define L2C_TAD_INT_RTGSBE BIT(32)
#define L2C_TAD_INT_RTGDBE BIT(33)
#define L2C_TAD_INT_RDDISOCI BIT(34)
#define L2C_TAD_INT_WRDISOCI BIT(35)
#define L2C_TAD_INT_ECC (L2C_TAD_INT_L2DDBE | \
L2C_TAD_INT_SBFSBE | L2C_TAD_INT_SBFDBE | \
L2C_TAD_INT_FBFSBE | L2C_TAD_INT_FBFDBE)
#define L2C_TAD_INT_CE (L2C_TAD_INT_SBFSBE | \
L2C_TAD_INT_FBFSBE)
#define L2C_TAD_INT_UE (L2C_TAD_INT_L2DDBE | \
L2C_TAD_INT_SBFDBE | \
L2C_TAD_INT_FBFDBE | \
L2C_TAD_INT_TAGDBE | \
L2C_TAD_INT_RTGDBE | \
L2C_TAD_INT_WRDISOCI | \
L2C_TAD_INT_RDDISOCI | \
L2C_TAD_INT_WRDISLMC | \
L2C_TAD_INT_RDDISLMC | \
L2C_TAD_INT_LFBTO | \
L2C_TAD_INT_GSYNCTO)
static const struct error_descr l2_tad_errors[] = {
{
.type = ERR_CORRECTED,
.mask = L2C_TAD_INT_SBFSBE,
.descr = "SBF single-bit error",
},
{
.type = ERR_CORRECTED,
.mask = L2C_TAD_INT_FBFSBE,
.descr = "FBF single-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_L2DDBE,
.descr = "L2D double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_SBFDBE,
.descr = "SBF double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_FBFDBE,
.descr = "FBF double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_TAGDBE,
.descr = "TAG double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_RTGDBE,
.descr = "RTG double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_WRDISOCI,
.descr = "Write to a disabled CCPI",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_RDDISOCI,
.descr = "Read from a disabled CCPI",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_WRDISLMC,
.descr = "Write to a disabled LMC",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_RDDISLMC,
.descr = "Read from a disabled LMC",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_LFBTO,
.descr = "LFB entry timeout",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_TAD_INT_GSYNCTO,
.descr = "Global sync CCPI timeout",
},
{0, 0, NULL},
};
#define L2C_TAD_INT_TAG (L2C_TAD_INT_TAGDBE)
#define L2C_TAD_INT_RTG (L2C_TAD_INT_RTGDBE)
#define L2C_TAD_INT_DISLMC (L2C_TAD_INT_WRDISLMC | L2C_TAD_INT_RDDISLMC)
#define L2C_TAD_INT_DISOCI (L2C_TAD_INT_WRDISOCI | L2C_TAD_INT_RDDISOCI)
#define L2C_TAD_INT_ENA_ALL (L2C_TAD_INT_ECC | L2C_TAD_INT_TAG | \
L2C_TAD_INT_RTG | \
L2C_TAD_INT_DISLMC | L2C_TAD_INT_DISOCI | \
L2C_TAD_INT_LFBTO)
#define L2C_TAD_TIMETWO 0x50000
#define L2C_TAD_TIMEOUT 0x50100
#define L2C_TAD_ERR 0x60000
#define L2C_TAD_TQD_ERR 0x60100
#define L2C_TAD_TTG_ERR 0x60200
#define L2C_CBC_INT_W1C 0x60000
#define L2C_CBC_INT_RSDSBE BIT(0)
#define L2C_CBC_INT_RSDDBE BIT(1)
#define L2C_CBC_INT_RSD (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_RSDDBE)
#define L2C_CBC_INT_MIBSBE BIT(4)
#define L2C_CBC_INT_MIBDBE BIT(5)
#define L2C_CBC_INT_MIB (L2C_CBC_INT_MIBSBE | L2C_CBC_INT_MIBDBE)
#define L2C_CBC_INT_IORDDISOCI BIT(6)
#define L2C_CBC_INT_IOWRDISOCI BIT(7)
#define L2C_CBC_INT_IODISOCI (L2C_CBC_INT_IORDDISOCI | \
L2C_CBC_INT_IOWRDISOCI)
#define L2C_CBC_INT_CE (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_MIBSBE)
#define L2C_CBC_INT_UE (L2C_CBC_INT_RSDDBE | L2C_CBC_INT_MIBDBE)
static const struct error_descr l2_cbc_errors[] = {
{
.type = ERR_CORRECTED,
.mask = L2C_CBC_INT_RSDSBE,
.descr = "RSD single-bit error",
},
{
.type = ERR_CORRECTED,
.mask = L2C_CBC_INT_MIBSBE,
.descr = "MIB single-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_CBC_INT_RSDDBE,
.descr = "RSD double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_CBC_INT_MIBDBE,
.descr = "MIB double-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_CBC_INT_IORDDISOCI,
.descr = "Read from a disabled CCPI",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_CBC_INT_IOWRDISOCI,
.descr = "Write to a disabled CCPI",
},
{0, 0, NULL},
};
#define L2C_CBC_INT_W1S 0x60008
#define L2C_CBC_INT_ENA_W1C 0x60020
#define L2C_CBC_INT_ENA_ALL (L2C_CBC_INT_RSD | L2C_CBC_INT_MIB | \
L2C_CBC_INT_IODISOCI)
#define L2C_CBC_INT_ENA_W1S 0x60028
#define L2C_CBC_IODISOCIERR 0x80008
#define L2C_CBC_IOCERR 0x80010
#define L2C_CBC_RSDERR 0x80018
#define L2C_CBC_MIBERR 0x80020
#define L2C_MCI_INT_W1C 0x0
#define L2C_MCI_INT_VBFSBE BIT(0)
#define L2C_MCI_INT_VBFDBE BIT(1)
static const struct error_descr l2_mci_errors[] = {
{
.type = ERR_CORRECTED,
.mask = L2C_MCI_INT_VBFSBE,
.descr = "VBF single-bit error",
},
{
.type = ERR_UNCORRECTED,
.mask = L2C_MCI_INT_VBFDBE,
.descr = "VBF double-bit error",
},
{0, 0, NULL},
};
#define L2C_MCI_INT_W1S 0x8
#define L2C_MCI_INT_ENA_W1C 0x20
#define L2C_MCI_INT_ENA_ALL (L2C_MCI_INT_VBFSBE | L2C_MCI_INT_VBFDBE)
#define L2C_MCI_INT_ENA_W1S 0x28
#define L2C_MCI_ERR 0x10000
#define L2C_MESSAGE_SIZE SZ_1K
#define L2C_OTHER_SIZE (50 * ARRAY_SIZE(l2_tad_errors))
struct l2c_err_ctx {
char *reg_ext_name;
u64 reg_int;
u64 reg_ext;
};
struct thunderx_l2c {
void __iomem *regs;
struct pci_dev *pdev;
struct edac_device_ctl_info *edac_dev;
struct dentry *debugfs;
int index;
struct msix_entry msix_ent;
struct l2c_err_ctx err_ctx[RING_ENTRIES];
unsigned long ring_head;
unsigned long ring_tail;
};
static irqreturn_t thunderx_l2c_tad_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_l2c *tad = container_of(msix, struct thunderx_l2c,
msix_ent);
unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx));
struct l2c_err_ctx *ctx = &tad->err_ctx[head];
ctx->reg_int = readq(tad->regs + L2C_TAD_INT_W1C);
if (ctx->reg_int & L2C_TAD_INT_ECC) {
ctx->reg_ext_name = "TQD_ERR";
ctx->reg_ext = readq(tad->regs + L2C_TAD_TQD_ERR);
} else if (ctx->reg_int & L2C_TAD_INT_TAG) {
ctx->reg_ext_name = "TTG_ERR";
ctx->reg_ext = readq(tad->regs + L2C_TAD_TTG_ERR);
} else if (ctx->reg_int & L2C_TAD_INT_LFBTO) {
ctx->reg_ext_name = "TIMEOUT";
ctx->reg_ext = readq(tad->regs + L2C_TAD_TIMEOUT);
} else if (ctx->reg_int & L2C_TAD_INT_DISOCI) {
ctx->reg_ext_name = "ERR";
ctx->reg_ext = readq(tad->regs + L2C_TAD_ERR);
}
writeq(ctx->reg_int, tad->regs + L2C_TAD_INT_W1C);
tad->ring_head++;
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_l2c_cbc_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_l2c *cbc = container_of(msix, struct thunderx_l2c,
msix_ent);
unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx));
struct l2c_err_ctx *ctx = &cbc->err_ctx[head];
ctx->reg_int = readq(cbc->regs + L2C_CBC_INT_W1C);
if (ctx->reg_int & L2C_CBC_INT_RSD) {
ctx->reg_ext_name = "RSDERR";
ctx->reg_ext = readq(cbc->regs + L2C_CBC_RSDERR);
} else if (ctx->reg_int & L2C_CBC_INT_MIB) {
ctx->reg_ext_name = "MIBERR";
ctx->reg_ext = readq(cbc->regs + L2C_CBC_MIBERR);
} else if (ctx->reg_int & L2C_CBC_INT_IODISOCI) {
ctx->reg_ext_name = "IODISOCIERR";
ctx->reg_ext = readq(cbc->regs + L2C_CBC_IODISOCIERR);
}
writeq(ctx->reg_int, cbc->regs + L2C_CBC_INT_W1C);
cbc->ring_head++;
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_l2c_mci_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_l2c *mci = container_of(msix, struct thunderx_l2c,
msix_ent);
unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx));
struct l2c_err_ctx *ctx = &mci->err_ctx[head];
ctx->reg_int = readq(mci->regs + L2C_MCI_INT_W1C);
ctx->reg_ext = readq(mci->regs + L2C_MCI_ERR);
writeq(ctx->reg_int, mci->regs + L2C_MCI_INT_W1C);
ctx->reg_ext_name = "ERR";
mci->ring_head++;
return IRQ_WAKE_THREAD;
}
static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
{
struct msix_entry *msix = irq_id;
struct thunderx_l2c *l2c = container_of(msix, struct thunderx_l2c,
msix_ent);
unsigned long tail = ring_pos(l2c->ring_tail, ARRAY_SIZE(l2c->err_ctx));
struct l2c_err_ctx *ctx = &l2c->err_ctx[tail];
irqreturn_t ret = IRQ_NONE;
u64 mask_ue, mask_ce;
const struct error_descr *l2_errors;
char *reg_int_name;
char *msg;
char *other;
msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
if (!msg || !other)
goto err_free;
switch (l2c->pdev->device) {
case PCI_DEVICE_ID_THUNDER_L2C_TAD:
reg_int_name = "L2C_TAD_INT";
mask_ue = L2C_TAD_INT_UE;
mask_ce = L2C_TAD_INT_CE;
l2_errors = l2_tad_errors;
break;
case PCI_DEVICE_ID_THUNDER_L2C_CBC:
reg_int_name = "L2C_CBC_INT";
mask_ue = L2C_CBC_INT_UE;
mask_ce = L2C_CBC_INT_CE;
l2_errors = l2_cbc_errors;
break;
case PCI_DEVICE_ID_THUNDER_L2C_MCI:
reg_int_name = "L2C_MCI_INT";
mask_ue = L2C_MCI_INT_VBFDBE;
mask_ce = L2C_MCI_INT_VBFSBE;
l2_errors = l2_mci_errors;
break;
default:
dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
l2c->pdev->device);
goto err_free;
}
while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
ARRAY_SIZE(l2c->err_ctx))) {
snprintf(msg, L2C_MESSAGE_SIZE,
"%s: %s: %016llx, %s: %016llx",
l2c->edac_dev->ctl_name, reg_int_name, ctx->reg_int,
ctx->reg_ext_name, ctx->reg_ext);
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
strncat(msg, other, L2C_MESSAGE_SIZE);
if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
else if (ctx->reg_int & mask_ce)
edac_device_handle_ce(l2c->edac_dev, 0, 0, msg);
l2c->ring_tail++;
}
ret = IRQ_HANDLED;
err_free:
kfree(other);
kfree(msg);
return ret;
}
#define L2C_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(l2c, _name, _reg)
L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
static struct debugfs_entry *l2c_tad_dfs_ents[] = {
&debugfs_tad_int,
};
L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
static struct debugfs_entry *l2c_cbc_dfs_ents[] = {
&debugfs_cbc_int,
};
L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
static struct debugfs_entry *l2c_mci_dfs_ents[] = {
&debugfs_mci_int,
};
static const struct pci_device_id thunderx_l2c_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_TAD), },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_CBC), },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_MCI), },
{ 0, },
};
static int thunderx_l2c_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct thunderx_l2c *l2c;
struct edac_device_ctl_info *edac_dev;
struct debugfs_entry **l2c_devattr;
size_t dfs_entries;
irqreturn_t (*thunderx_l2c_isr)(int, void *) = NULL;
char name[32];
const char *fmt;
u64 reg_en_offs, reg_en_mask;
int idx;
int ret;
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_l2c");
if (ret) {
dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
return ret;
}
switch (pdev->device) {
case PCI_DEVICE_ID_THUNDER_L2C_TAD:
thunderx_l2c_isr = thunderx_l2c_tad_isr;
l2c_devattr = l2c_tad_dfs_ents;
dfs_entries = ARRAY_SIZE(l2c_tad_dfs_ents);
fmt = "L2C-TAD%d";
reg_en_offs = L2C_TAD_INT_ENA_W1S;
reg_en_mask = L2C_TAD_INT_ENA_ALL;
break;
case PCI_DEVICE_ID_THUNDER_L2C_CBC:
thunderx_l2c_isr = thunderx_l2c_cbc_isr;
l2c_devattr = l2c_cbc_dfs_ents;
dfs_entries = ARRAY_SIZE(l2c_cbc_dfs_ents);
fmt = "L2C-CBC%d";
reg_en_offs = L2C_CBC_INT_ENA_W1S;
reg_en_mask = L2C_CBC_INT_ENA_ALL;
break;
case PCI_DEVICE_ID_THUNDER_L2C_MCI:
thunderx_l2c_isr = thunderx_l2c_mci_isr;
l2c_devattr = l2c_mci_dfs_ents;
dfs_entries = ARRAY_SIZE(l2c_mci_dfs_ents);
fmt = "L2C-MCI%d";
reg_en_offs = L2C_MCI_INT_ENA_W1S;
reg_en_mask = L2C_MCI_INT_ENA_ALL;
break;
default:
//Should never ever get here
dev_err(&pdev->dev, "Unsupported PCI device: %04x\n",
pdev->device);
return -EINVAL;
}
idx = edac_device_alloc_index();
snprintf(name, sizeof(name), fmt, idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
name, 1, "L2C", 1, 0,
NULL, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
}
l2c = edac_dev->pvt_info;
l2c->edac_dev = edac_dev;
l2c->regs = pcim_iomap_table(pdev)[0];
if (!l2c->regs) {
dev_err(&pdev->dev, "Cannot map PCI resources\n");
ret = -ENODEV;
goto err_free;
}
l2c->pdev = pdev;
l2c->ring_head = 0;
l2c->ring_tail = 0;
l2c->msix_ent.entry = 0;
l2c->msix_ent.vector = 0;
ret = pci_enable_msix_exact(pdev, &l2c->msix_ent, 1);
if (ret) {
dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
goto err_free;
}
ret = devm_request_threaded_irq(&pdev->dev, l2c->msix_ent.vector,
thunderx_l2c_isr,
thunderx_l2c_threaded_isr,
0, "[EDAC] ThunderX L2C",
&l2c->msix_ent);
if (ret)
goto err_free;
edac_dev->dev = &pdev->dev;
edac_dev->dev_name = dev_name(&pdev->dev);
edac_dev->mod_name = "thunderx-l2c";
edac_dev->ctl_name = "thunderx-l2c";
ret = edac_device_add_device(edac_dev);
if (ret) {
dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
goto err_free;
}
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
l2c, dfs_entries);
if (ret != dfs_entries) {
dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
ret, ret >= 0 ? " created" : "");
}
}
pci_set_drvdata(pdev, edac_dev);
writeq(reg_en_mask, l2c->regs + reg_en_offs);
return 0;
err_free:
edac_device_free_ctl_info(edac_dev);
return ret;
}
static void thunderx_l2c_remove(struct pci_dev *pdev)
{
struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
struct thunderx_l2c *l2c = edac_dev->pvt_info;
switch (pdev->device) {
case PCI_DEVICE_ID_THUNDER_L2C_TAD:
writeq(L2C_TAD_INT_ENA_ALL, l2c->regs + L2C_TAD_INT_ENA_W1C);
break;
case PCI_DEVICE_ID_THUNDER_L2C_CBC:
writeq(L2C_CBC_INT_ENA_ALL, l2c->regs + L2C_CBC_INT_ENA_W1C);
break;
case PCI_DEVICE_ID_THUNDER_L2C_MCI:
writeq(L2C_MCI_INT_ENA_ALL, l2c->regs + L2C_MCI_INT_ENA_W1C);
break;
}
edac_debugfs_remove_recursive(l2c->debugfs);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
}
MODULE_DEVICE_TABLE(pci, thunderx_l2c_pci_tbl);
static struct pci_driver thunderx_l2c_driver = {
.name = "thunderx_l2c_edac",
.probe = thunderx_l2c_probe,
.remove = thunderx_l2c_remove,
.id_table = thunderx_l2c_pci_tbl,
};
static int __init thunderx_edac_init(void)
{
int rc = 0;
if (ghes_get_devices())
return -EBUSY;
rc = pci_register_driver(&thunderx_lmc_driver);
if (rc)
return rc;
rc = pci_register_driver(&thunderx_ocx_driver);
if (rc)
goto err_lmc;
rc = pci_register_driver(&thunderx_l2c_driver);
if (rc)
goto err_ocx;
return rc;
err_ocx:
pci_unregister_driver(&thunderx_ocx_driver);
err_lmc:
pci_unregister_driver(&thunderx_lmc_driver);
return rc;
}
static void __exit thunderx_edac_exit(void)
{
pci_unregister_driver(&thunderx_l2c_driver);
pci_unregister_driver(&thunderx_ocx_driver);
pci_unregister_driver(&thunderx_lmc_driver);
}
module_init(thunderx_edac_init);
module_exit(thunderx_edac_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Cavium, Inc.");
MODULE_DESCRIPTION("EDAC Driver for Cavium ThunderX");
| linux-master | drivers/edac/thunderx_edac.c |
/*
* Intel D82875P Memory Controller kernel module
* (C) 2003 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Thayne Harbaugh
* Contributors:
* Wang Zhenyu at intel.com
*
* $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
*
* Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_module.h"
#define EDAC_MOD_STR "i82875p_edac"
#define i82875p_printk(level, fmt, arg...) \
edac_printk(level, "i82875p", fmt, ##arg)
#define i82875p_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_82875_0
#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
#ifndef PCI_DEVICE_ID_INTEL_82875_6
#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
/* four csrows in dual channel, eight in single channel */
#define I82875P_NR_DIMMS 8
#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
*
* 31:12 block address
* 11:0 reserved
*/
#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
*
* 7:0 DRAM ECC Syndrome
*/
#define I82875P_DES 0x5d /* DRAM Error Status (8b)
*
* 7:1 reserved
* 0 Error channel 0/1
*/
#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15:10 reserved
* 9 non-DRAM lock error (ndlock)
* 8 Sftwr Generated SMI
* 7 ECC UE
* 6 reserved
* 5 MCH detects unimplemented cycle
* 4 AGP access outside GA
* 3 Invalid AGP access
* 2 Invalid GA translation table
* 1 Unsupported AGP command
* 0 ECC CE
*/
#define I82875P_ERRCMD 0xca /* Error Command (16b)
*
* 15:10 reserved
* 9 SERR on non-DRAM lock
* 8 SERR on ECC UE
* 7 SERR on ECC CE
* 6 target abort on high exception
* 5 detect unimplemented cyc
* 4 AGP access outside of GA
* 3 SERR on invalid AGP access
* 2 invalid translation table
* 1 SERR on unsupported AGP command
* 0 reserved
*/
/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
*
* 15:10 reserved
* 9 fast back-to-back - ro 0
* 8 SERR enable - ro 0
* 7 addr/data stepping - ro 0
* 6 parity err enable - ro 0
* 5 VGA palette snoop - ro 0
* 4 mem wr & invalidate - ro 0
* 3 special cycle - ro 0
* 2 bus master - ro 0
* 1 mem access dev6 - 0(dis),1(en)
* 0 IO access dev3 - 0(dis),1(en)
*/
#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
*
* 31:12 mem base addr [31:12]
* 11:4 address mask - ro 0
* 3 prefetchable - ro 0(non),1(pre)
* 2:1 mem type - ro 0
* 0 mem space - ro 0
*/
/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
*
* 7 reserved
* 6:0 64MiB row boundary addr
*/
#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
*
* 7 reserved
* 6:4 row attr row 1
* 3 reserved
* 2:0 row attr row 0
*
* 000 = 4KiB
* 001 = 8KiB
* 010 = 16KiB
* 011 = 32KiB
*/
#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
*
* 31:30 reserved
* 29 init complete
* 28:23 reserved
* 22:21 nr chan 00=1,01=2
* 20 reserved
* 19:18 Data Integ Mode 00=none,01=ecc
* 17:11 reserved
* 10:8 refresh mode
* 7 reserved
* 6:4 mode select
* 3:2 reserved
* 1:0 DRAM type 01=DDR
*/
enum i82875p_chips {
I82875P = 0,
};
struct i82875p_pvt {
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
};
struct i82875p_dev_info {
const char *ctl_name;
};
struct i82875p_error_info {
u16 errsts;
u32 eap;
u8 des;
u8 derrsyn;
u16 errsts2;
};
static const struct i82875p_dev_info i82875p_devs[] = {
[I82875P] = {
.ctl_name = "i82875p"},
};
static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
* already registered driver
*/
static struct edac_pci_ctl_info *i82875p_pci;
static void i82875p_get_error_info(struct mem_ctl_info *mci,
struct i82875p_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
if (!(info->errsts & 0x0081))
return;
pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
pci_read_config_byte(pdev, I82875P_DES, &info->des);
pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
/*
* If the error is the same then we can for both reads then
* the first set of reads is valid. If there is a change then
* there is a CE no info and the second set of reads is valid
* and should be UE info.
*/
if ((info->errsts ^ info->errsts2) & 0x0081) {
pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
pci_read_config_byte(pdev, I82875P_DES, &info->des);
pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
}
pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
}
static int i82875p_process_error_info(struct mem_ctl_info *mci,
struct i82875p_error_info *info,
int handle_errors)
{
int row, multi_chan;
multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & 0x0081))
return 0;
if (!handle_errors)
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
"UE overwrote CE", "");
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
info->eap, 0, 0,
row, -1, -1,
"i82875p UE", "");
else
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
info->eap, 0, info->derrsyn,
row, multi_chan ? (info->des & 0x1) : 0,
-1, "i82875p CE", "");
return 1;
}
static void i82875p_check(struct mem_ctl_info *mci)
{
struct i82875p_error_info info;
i82875p_get_error_info(mci, &info);
i82875p_process_error_info(mci, &info, 1);
}
/* Return 0 on success or 1 on failure. */
static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
struct pci_dev **ovrfl_pdev,
void __iomem **ovrfl_window)
{
struct pci_dev *dev;
void __iomem *window;
*ovrfl_pdev = NULL;
*ovrfl_window = NULL;
dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (dev == NULL) {
/* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
* the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
if (dev == NULL)
return 1;
pci_bus_assign_resources(dev->bus);
pci_bus_add_device(dev);
}
*ovrfl_pdev = dev;
if (pci_enable_device(dev)) {
i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
"device\n", __func__);
return 1;
}
if (pci_request_regions(dev, pci_name(dev))) {
#ifdef CORRECT_BIOS
goto fail0;
#endif
}
/* cache is irrelevant for PCI bus reads/writes */
window = pci_ioremap_bar(dev, 0);
if (window == NULL) {
i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
__func__);
goto fail1;
}
*ovrfl_window = window;
return 0;
fail1:
pci_release_regions(dev);
#ifdef CORRECT_BIOS
fail0:
pci_disable_device(dev);
#endif
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return 1;
}
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc)
{
return (drc >> 21) & 0x1;
}
static void i82875p_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev,
void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
unsigned nr_chans = dual_channel_active(drc) + 1;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 cumul_size, nr_pages;
int index, j;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
/* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = mci->csrows[index];
value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
for (j = 0; j < nr_chans; j++) {
dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_chans;
dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
dimm->mtype = MEM_DDR;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
}
static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
u32 drc;
u32 nr_chans;
struct i82875p_error_info discard;
edac_dbg(0, "\n");
if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I82875P_NR_CSROWS(nr_chans);
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_chans;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail0;
}
edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
edac_dbg(3, "init pvt\n");
pvt = (struct i82875p_pvt *)mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
i82875p_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
/* allocating generic PCI control info */
i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i82875p_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
fail1:
edac_mc_free(mci);
fail0:
iounmap(ovrfl_window);
pci_release_regions(ovrfl_pdev);
pci_disable_device(ovrfl_pdev);
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return rc;
}
/* returns count (>= 0), or negative on error */
static int i82875p_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "\n");
i82875p_printk(KERN_INFO, "i82875p init one\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i82875p_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i82875p_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
edac_dbg(0, "\n");
if (i82875p_pci)
edac_pci_release_generic_ctl(i82875p_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
pvt = (struct i82875p_pvt *)mci->pvt_info;
if (pvt->ovrfl_window)
iounmap(pvt->ovrfl_window);
if (pvt->ovrfl_pdev) {
#ifdef CORRECT_BIOS
pci_release_regions(pvt->ovrfl_pdev);
#endif /*CORRECT_BIOS */
pci_disable_device(pvt->ovrfl_pdev);
pci_dev_put(pvt->ovrfl_pdev);
}
edac_mc_free(mci);
}
static const struct pci_device_id i82875p_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82875P},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
static struct pci_driver i82875p_driver = {
.name = EDAC_MOD_STR,
.probe = i82875p_init_one,
.remove = i82875p_remove_one,
.id_table = i82875p_pci_tbl,
};
static int __init i82875p_init(void)
{
int pci_rc;
edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i82875p_driver);
if (pci_rc < 0)
goto fail0;
if (mci_pdev == NULL) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82875_0, NULL);
if (!mci_pdev) {
edac_dbg(0, "875p pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "875p init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i82875p_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i82875p_exit(void)
{
edac_dbg(3, "\n");
i82875p_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
pci_unregister_driver(&i82875p_driver);
}
module_init(i82875p_init);
module_exit(i82875p_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| linux-master | drivers/edac/i82875p_edac.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Cavium, Inc.
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/edac.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/octeon.h>
#include "edac_module.h"
static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
{
union cvmx_pci_cfg01 cfg01;
cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01);
if (cfg01.s.dpe) { /* Detected parity error */
edac_pci_handle_pe(pci, pci->ctl_name);
cfg01.s.dpe = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
if (cfg01.s.sse) {
edac_pci_handle_npe(pci, "Signaled System Error");
cfg01.s.sse = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
if (cfg01.s.rma) {
edac_pci_handle_npe(pci, "Received Master Abort");
cfg01.s.rma = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
if (cfg01.s.rta) {
edac_pci_handle_npe(pci, "Received Target Abort");
cfg01.s.rta = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
if (cfg01.s.sta) {
edac_pci_handle_npe(pci, "Signaled Target Abort");
cfg01.s.sta = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
if (cfg01.s.mdpe) {
edac_pci_handle_npe(pci, "Master Data Parity Error");
cfg01.s.mdpe = 1; /* Reset */
octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
}
}
static int octeon_pci_probe(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci;
int res = 0;
pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err");
if (!pci)
return -ENOMEM;
pci->dev = &pdev->dev;
platform_set_drvdata(pdev, pci);
pci->dev_name = dev_name(&pdev->dev);
pci->mod_name = "octeon-pci";
pci->ctl_name = "octeon_pci_err";
pci->edac_check = octeon_pci_poll;
if (edac_pci_add_device(pci, 0) > 0) {
pr_err("%s: edac_pci_add_device() failed\n", __func__);
goto err;
}
return 0;
err:
edac_pci_free_ctl_info(pci);
return res;
}
static int octeon_pci_remove(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
edac_pci_del_device(&pdev->dev);
edac_pci_free_ctl_info(pci);
return 0;
}
static struct platform_driver octeon_pci_driver = {
.probe = octeon_pci_probe,
.remove = octeon_pci_remove,
.driver = {
.name = "octeon_pci_edac",
}
};
module_platform_driver(octeon_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
| linux-master | drivers/edac/octeon_edac-pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_buf.c
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <[email protected]>
* Copyright (C) 2002 Frank Mori Hess <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
#ifdef PAGE_KERNEL_NOCACHE
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
#else
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
static void comedi_buf_map_kref_release(struct kref *kref)
{
struct comedi_buf_map *bm =
container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
unsigned int i;
if (bm->page_list) {
if (bm->dma_dir != DMA_NONE) {
/*
* DMA buffer was allocated as a single block.
* Address is in page_list[0].
*/
buf = &bm->page_list[0];
dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE * bm->n_pages,
buf->virt_addr, buf->dma_addr);
} else {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
ClearPageReserved(virt_to_page(buf->virt_addr));
free_page((unsigned long)buf->virt_addr);
}
}
vfree(bm->page_list);
}
if (bm->dma_dir != DMA_NONE)
put_device(bm->dma_hw_dev);
kfree(bm);
}
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm;
unsigned long flags;
if (async->prealloc_buf) {
if (s->async_dma_dir == DMA_NONE)
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
async->buf_map = NULL;
spin_unlock_irqrestore(&s->spin_lock, flags);
comedi_buf_map_put(bm);
}
static struct comedi_buf_map *
comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
unsigned int n_pages)
{
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned int i;
bm = kzalloc(sizeof(*bm), GFP_KERNEL);
if (!bm)
return NULL;
kref_init(&bm->refcount);
bm->dma_dir = dma_dir;
if (bm->dma_dir != DMA_NONE) {
/* Need ref to hardware device to free buffer later. */
bm->dma_hw_dev = get_device(dev->hw_dev);
}
bm->page_list = vzalloc(sizeof(*buf) * n_pages);
if (!bm->page_list)
goto err;
if (bm->dma_dir != DMA_NONE) {
void *virt_addr;
dma_addr_t dma_addr;
/*
* Currently, the DMA buffer needs to be allocated as a
* single block so that it can be mmap()'ed.
*/
virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE * n_pages, &dma_addr,
GFP_KERNEL);
if (!virt_addr)
goto err;
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
}
bm->n_pages = i;
} else {
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
if (!buf->virt_addr)
break;
SetPageReserved(virt_to_page(buf->virt_addr));
}
bm->n_pages = i;
if (i < n_pages)
goto err;
}
return bm;
err:
comedi_buf_map_put(bm);
return NULL;
}
static void __comedi_buf_alloc(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int n_pages)
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned long flags;
unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
"dma buffer allocation not supported\n");
return;
}
bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
if (!bm)
return;
spin_lock_irqsave(&s->spin_lock, flags);
async->buf_map = bm;
spin_unlock_irqrestore(&s->spin_lock, flags);
if (bm->dma_dir != DMA_NONE) {
/*
* DMA buffer was allocated as a single block.
* Address is in page_list[0].
*/
buf = &bm->page_list[0];
async->prealloc_buf = buf->virt_addr;
} else {
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
pages[i] = virt_to_page(buf->virt_addr);
}
/* vmap the pages to prealloc_buf */
async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
COMEDI_PAGE_PROTECTION);
vfree(pages);
}
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
{
if (bm)
kref_get(&bm->refcount);
}
int comedi_buf_map_put(struct comedi_buf_map *bm)
{
if (bm)
return kref_put(&bm->refcount, comedi_buf_map_kref_release);
return 1;
}
/* helper for "access" vm operation */
int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
void *buf, int len, int write)
{
unsigned int pgoff = offset_in_page(offset);
unsigned long pg = offset >> PAGE_SHIFT;
int done = 0;
while (done < len && pg < bm->n_pages) {
int l = min_t(int, len - done, PAGE_SIZE - pgoff);
void *b = bm->page_list[pg].virt_addr + pgoff;
if (write)
memcpy(b, buf, l);
else
memcpy(buf, b, l);
buf += l;
done += l;
pg++;
pgoff = 0;
}
return done;
}
/* returns s->async->buf_map and increments its kref refcount */
struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm = NULL;
unsigned long flags;
if (!async)
return NULL;
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
/* only want it if buffer pages allocated */
if (bm && bm->n_pages)
comedi_buf_map_get(bm);
else
bm = NULL;
spin_unlock_irqrestore(&s->spin_lock, flags);
return bm;
}
bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
{
struct comedi_buf_map *bm = s->async->buf_map;
return bm && (kref_read(&bm->refcount) > 1);
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
struct comedi_async *async = s->async;
lockdep_assert_held(&dev->mutex);
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
if (async->prealloc_buf && async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
__comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
if (!async->prealloc_buf) {
/* allocation failed */
__comedi_buf_free(dev, s);
return -ENOMEM;
}
}
async->prealloc_bufsz = new_size;
return 0;
}
void comedi_buf_reset(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
async->buf_write_alloc_count = 0;
async->buf_write_count = 0;
async->buf_read_alloc_count = 0;
async->buf_read_count = 0;
async->buf_write_ptr = 0;
async->buf_read_ptr = 0;
async->cur_chan = 0;
async->scans_done = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
async->munge_ptr = 0;
async->events = 0;
}
static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
return free_end - async->buf_write_alloc_count;
}
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
return free_end - async->buf_write_count;
}
/**
* comedi_buf_write_alloc() - Reserve buffer space for writing
* @s: COMEDI subdevice.
* @nbytes: Maximum space to reserve in bytes.
*
* Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
* data buffer associated with the subdevice. The amount reserved is limited
* by the space available.
*
* Return: The amount of space reserved in bytes.
*/
unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int unalloc = comedi_buf_write_n_unalloc(s);
if (nbytes > unalloc)
nbytes = unalloc;
async->buf_write_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read and updated
* before we write data to the write-alloc'ed buffer space
*/
smp_mb();
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
* munging is applied to data by core as it passes between user
* and kernel space
*/
static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
return num_bytes;
}
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
int block_size = num_bytes - count;
unsigned int buf_end;
buf_end = async->prealloc_bufsz - async->munge_ptr;
if (block_size > buf_end)
block_size = buf_end;
s->munge(s->device, s,
async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
/*
* ensure data is munged in buffer before the
* async buffer munge_count is incremented
*/
smp_wmb();
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
return count;
}
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
return async->buf_write_alloc_count - async->buf_write_count;
}
/**
* comedi_buf_write_free() - Free buffer space after it is written
* @s: COMEDI subdevice.
* @nbytes: Maximum space to free in bytes.
*
* Free up to @nbytes bytes of space previously reserved for writing in the
* COMEDI acquisition data buffer associated with the subdevice. The amount of
* space freed is limited to the amount that was reserved. The freed space is
* assumed to have been filled with sample data by the writer.
*
* If the samples in the freed space need to be "munged", do so here. The
* freed space becomes available for allocation by the reader.
*
* Return: The amount of space freed in bytes.
*/
unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated = comedi_buf_write_n_allocated(s);
if (nbytes > allocated)
nbytes = allocated;
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
comedi_buf_munge(s, async->buf_write_count - async->munge_count);
if (async->buf_write_ptr >= async->prealloc_bufsz)
async->buf_write_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_free);
/**
* comedi_buf_read_n_available() - Determine amount of readable buffer space
* @s: COMEDI subdevice.
*
* Determine the amount of readable buffer space in the COMEDI acquisition data
* buffer associated with the subdevice. The readable buffer space is that
* which has been freed by the writer and "munged" to the sample data format
* expected by COMEDI if necessary.
*
* Return: The amount of readable buffer space.
*/
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int num_bytes;
if (!async)
return 0;
num_bytes = async->munge_count - async->buf_read_count;
/*
* ensure the async buffer 'counts' are read before we
* attempt to read data from the buffer
*/
smp_rmb();
return num_bytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
/**
* comedi_buf_read_alloc() - Reserve buffer space for reading
* @s: COMEDI subdevice.
* @nbytes: Maximum space to reserve in bytes.
*
* Reserve up to @nbytes bytes of previously written and "munged" buffer space
* for reading in the COMEDI acquisition data buffer associated with the
* subdevice. The amount reserved is limited to the space available. The
* reader can read from the reserved space and then free it. A reader is also
* allowed to read from the space before reserving it as long as it determines
* the amount of readable data available, but the space needs to be marked as
* reserved before it can be freed.
*
* Return: The amount of space reserved in bytes.
*/
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int available;
available = async->munge_count - async->buf_read_alloc_count;
if (nbytes > available)
nbytes = available;
async->buf_read_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read before we
* attempt to read data from the read-alloc'ed buffer space
*/
smp_rmb();
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
{
return async->buf_read_alloc_count - async->buf_read_count;
}
/**
* comedi_buf_read_free() - Free buffer space after it has been read
* @s: COMEDI subdevice.
* @nbytes: Maximum space to free in bytes.
*
* Free up to @nbytes bytes of buffer space previously reserved for reading in
* the COMEDI acquisition data buffer associated with the subdevice. The
* amount of space freed is limited to the amount that was reserved.
*
* The freed space becomes available for allocation by the writer.
*
* Return: The amount of space freed in bytes.
*/
unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated;
/*
* ensure data has been read out of buffer before
* the async read count is incremented
*/
smp_mb();
allocated = comedi_buf_read_n_allocated(async);
if (nbytes > allocated)
nbytes = allocated;
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
unsigned int block_size;
if (write_ptr + num_bytes > async->prealloc_bufsz)
block_size = async->prealloc_bufsz - write_ptr;
else
block_size = num_bytes;
memcpy(async->prealloc_buf + write_ptr, data, block_size);
data += block_size;
num_bytes -= block_size;
write_ptr = 0;
}
}
static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
void *dest, unsigned int nbytes)
{
void *src;
struct comedi_async *async = s->async;
unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
unsigned int block_size;
src = async->prealloc_buf + read_ptr;
if (nbytes >= async->prealloc_bufsz - read_ptr)
block_size = async->prealloc_bufsz - read_ptr;
else
block_size = nbytes;
memcpy(dest, src, block_size);
nbytes -= block_size;
dest += block_size;
read_ptr = 0;
}
}
/**
* comedi_buf_write_samples() - Write sample data to COMEDI buffer
* @s: COMEDI subdevice.
* @data: Pointer to source samples.
* @nsamples: Number of samples to write.
*
* Write up to @nsamples samples to the COMEDI acquisition data buffer
* associated with the subdevice, mark it as written and update the
* acquisition scan progress. If there is not enough room for the specified
* number of samples, the number of samples written is limited to the number
* that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
* acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
* event flag if any samples are written to cause waiting tasks to be woken
* when the event flags are processed.
*
* Return: The amount of data written in bytes.
*/
unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
const void *data, unsigned int nsamples)
{
unsigned int max_samples;
unsigned int nbytes;
/*
* Make sure there is enough room in the buffer for all the samples.
* If not, clamp the nsamples to the number that will fit, flag the
* buffer overrun and add the samples that fit.
*/
max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
if (nsamples > max_samples) {
dev_warn(s->device->class_dev, "buffer overrun\n");
s->async->events |= COMEDI_CB_OVERFLOW;
nsamples = max_samples;
}
if (nsamples == 0)
return 0;
nbytes = comedi_buf_write_alloc(s,
comedi_samples_to_bytes(s, nsamples));
comedi_buf_memcpy_to(s, data, nbytes);
comedi_buf_write_free(s, nbytes);
comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
* comedi_buf_read_samples() - Read sample data from COMEDI buffer
* @s: COMEDI subdevice.
* @data: Pointer to destination.
* @nsamples: Maximum number of samples to read.
*
* Read up to @nsamples samples from the COMEDI acquisition data buffer
* associated with the subdevice, mark it as read and update the acquisition
* scan progress. Limit the number of samples read to the number available.
* Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
* tasks to be woken when the event flags are processed.
*
* Return: The amount of data read in bytes.
*/
unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
void *data, unsigned int nsamples)
{
unsigned int max_samples;
unsigned int nbytes;
/* clamp nsamples to the number of full samples available */
max_samples = comedi_bytes_to_samples(s,
comedi_buf_read_n_available(s));
if (nsamples > max_samples)
nsamples = max_samples;
if (nsamples == 0)
return 0;
nbytes = comedi_buf_read_alloc(s,
comedi_samples_to_bytes(s, nsamples));
comedi_buf_memcpy_from(s, data, nbytes);
comedi_buf_read_free(s, nbytes);
comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
| linux-master | drivers/comedi/comedi_buf.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* module/drivers.c
* functions for manipulating drivers
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <[email protected]>
* Copyright (C) 2002 Frank Mori Hess <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
struct comedi_driver *comedi_drivers;
/* protects access to comedi_drivers */
DEFINE_MUTEX(comedi_drivers_list_lock);
/**
* comedi_set_hw_dev() - Set hardware device associated with COMEDI device
* @dev: COMEDI device.
* @hw_dev: Hardware device.
*
* For automatically configured COMEDI devices (resulting from a call to
* comedi_auto_config() or one of its wrappers from the low-level COMEDI
* driver), comedi_set_hw_dev() is called automatically by the COMEDI core
* to associate the COMEDI device with the hardware device. It can also be
* called directly by "legacy" low-level COMEDI drivers that rely on the
* %COMEDI_DEVCONFIG ioctl to configure the hardware as long as the hardware
* has a &struct device.
*
* If @dev->hw_dev is NULL, it gets a reference to @hw_dev and sets
* @dev->hw_dev, otherwise, it does nothing. Calling it multiple times
* with the same hardware device is not considered an error. If it gets
* a reference to the hardware device, it will be automatically 'put' when
* the device is detached from COMEDI.
*
* Returns 0 if @dev->hw_dev was NULL or the same as @hw_dev, otherwise
* returns -EEXIST.
*/
int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev)
{
if (hw_dev == dev->hw_dev)
return 0;
if (dev->hw_dev)
return -EEXIST;
dev->hw_dev = get_device(hw_dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_set_hw_dev);
static void comedi_clear_hw_dev(struct comedi_device *dev)
{
put_device(dev->hw_dev);
dev->hw_dev = NULL;
}
/**
* comedi_alloc_devpriv() - Allocate memory for the device private data
* @dev: COMEDI device.
* @size: Size of the memory to allocate.
*
* The allocated memory is zero-filled. @dev->private points to it on
* return. The memory will be automatically freed when the COMEDI device is
* "detached".
*
* Returns a pointer to the allocated memory, or NULL on failure.
*/
void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size)
{
dev->private = kzalloc(size, GFP_KERNEL);
return dev->private;
}
EXPORT_SYMBOL_GPL(comedi_alloc_devpriv);
/**
* comedi_alloc_subdevices() - Allocate subdevices for COMEDI device
* @dev: COMEDI device.
* @num_subdevices: Number of subdevices to allocate.
*
* Allocates and initializes an array of &struct comedi_subdevice for the
* COMEDI device. If successful, sets @dev->subdevices to point to the
* first one and @dev->n_subdevices to the number.
*
* Returns 0 on success, -EINVAL if @num_subdevices is < 1, or -ENOMEM if
* failed to allocate the memory.
*/
int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
{
struct comedi_subdevice *s;
int i;
if (num_subdevices < 1)
return -EINVAL;
s = kcalloc(num_subdevices, sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
dev->subdevices = s;
dev->n_subdevices = num_subdevices;
for (i = 0; i < num_subdevices; ++i) {
s = &dev->subdevices[i];
s->device = dev;
s->index = i;
s->async_dma_dir = DMA_NONE;
spin_lock_init(&s->spin_lock);
s->minor = -1;
}
return 0;
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
/**
* comedi_alloc_subdev_readback() - Allocate memory for the subdevice readback
* @s: COMEDI subdevice.
*
* This is called by low-level COMEDI drivers to allocate an array to record
* the last values written to a subdevice's analog output channels (at least
* by the %INSN_WRITE instruction), to allow them to be read back by an
* %INSN_READ instruction. It also provides a default handler for the
* %INSN_READ instruction unless one has already been set.
*
* On success, @s->readback points to the first element of the array, which
* is zero-filled. The low-level driver is responsible for updating its
* contents. @s->insn_read will be set to comedi_readback_insn_read()
* unless it is already non-NULL.
*
* Returns 0 on success, -EINVAL if the subdevice has no channels, or
* -ENOMEM on allocation failure.
*/
int comedi_alloc_subdev_readback(struct comedi_subdevice *s)
{
if (!s->n_chan)
return -EINVAL;
s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL);
if (!s->readback)
return -ENOMEM;
if (!s->insn_read)
s->insn_read = comedi_readback_insn_read;
return 0;
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback);
static void comedi_device_detach_cleanup(struct comedi_device *dev)
{
int i;
struct comedi_subdevice *s;
lockdep_assert_held(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
if (comedi_can_auto_free_spriv(s))
kfree(s->private);
comedi_free_subdevice_minor(s);
if (s->async) {
comedi_buf_alloc(dev, s, 0);
kfree(s->async);
}
kfree(s->readback);
}
kfree(dev->subdevices);
dev->subdevices = NULL;
dev->n_subdevices = 0;
}
kfree(dev->private);
kfree(dev->pacer);
dev->private = NULL;
dev->pacer = NULL;
dev->driver = NULL;
dev->board_name = NULL;
dev->board_ptr = NULL;
dev->mmio = NULL;
dev->iobase = 0;
dev->iolen = 0;
dev->ioenabled = false;
dev->irq = 0;
dev->read_subdev = NULL;
dev->write_subdev = NULL;
dev->open = NULL;
dev->close = NULL;
comedi_clear_hw_dev(dev);
}
void comedi_device_detach(struct comedi_device *dev)
{
lockdep_assert_held(&dev->mutex);
comedi_device_cancel_all(dev);
down_write(&dev->attach_lock);
dev->attached = false;
dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
comedi_device_detach_cleanup(dev);
up_write(&dev->attach_lock);
}
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
{
return -EINVAL;
}
static int insn_device_inval(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
return -EINVAL;
}
static unsigned int get_zero_valid_routes(struct comedi_device *dev,
unsigned int n_pairs,
unsigned int *pair_data)
{
return 0;
}
int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
return -EINVAL;
}
/**
* comedi_readback_insn_read() - A generic (*insn_read) for subdevice readback.
* @dev: COMEDI device.
* @s: COMEDI subdevice.
* @insn: COMEDI instruction.
* @data: Pointer to return the readback data.
*
* Handles the %INSN_READ instruction for subdevices that use the readback
* array allocated by comedi_alloc_subdev_readback(). It may be used
* directly as the subdevice's handler (@s->insn_read) or called via a
* wrapper.
*
* @insn->n is normally 1, which will read a single value. If higher, the
* same element of the readback array will be read multiple times.
*
* Returns @insn->n on success, or -EINVAL if @s->readback is NULL.
*/
int comedi_readback_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
if (!s->readback)
return -EINVAL;
for (i = 0; i < insn->n; i++)
data[i] = s->readback[chan];
return insn->n;
}
EXPORT_SYMBOL_GPL(comedi_readback_insn_read);
/**
* comedi_timeout() - Busy-wait for a driver condition to occur
* @dev: COMEDI device.
* @s: COMEDI subdevice.
* @insn: COMEDI instruction.
* @cb: Callback to check for the condition.
* @context: Private context from the driver.
*
* Busy-waits for up to a second (%COMEDI_TIMEOUT_MS) for the condition or
* some error (other than -EBUSY) to occur. The parameters @dev, @s, @insn,
* and @context are passed to the callback function, which returns -EBUSY to
* continue waiting or some other value to stop waiting (generally 0 if the
* condition occurred, or some error value).
*
* Returns -ETIMEDOUT if timed out, otherwise the return value from the
* callback function.
*/
int comedi_timeout(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
int (*cb)(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context),
unsigned long context)
{
unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS);
int ret;
while (time_before(jiffies, timeout)) {
ret = cb(dev, s, insn, context);
if (ret != -EBUSY)
return ret; /* success (0) or non EBUSY errno */
cpu_relax();
}
return -ETIMEDOUT;
}
EXPORT_SYMBOL_GPL(comedi_timeout);
/**
* comedi_dio_insn_config() - Boilerplate (*insn_config) for DIO subdevices
* @dev: COMEDI device.
* @s: COMEDI subdevice.
* @insn: COMEDI instruction.
* @data: Instruction parameters and return data.
* @mask: io_bits mask for grouped channels, or 0 for single channel.
*
* If @mask is 0, it is replaced with a single-bit mask corresponding to the
* channel number specified by @insn->chanspec. Otherwise, @mask
* corresponds to a group of channels (which should include the specified
* channel) that are always configured together as inputs or outputs.
*
* Partially handles the %INSN_CONFIG_DIO_INPUT, %INSN_CONFIG_DIO_OUTPUTS,
* and %INSN_CONFIG_DIO_QUERY instructions. The first two update
* @s->io_bits to record the directions of the masked channels. The last
* one sets @data[1] to the current direction of the group of channels
* (%COMEDI_INPUT) or %COMEDI_OUTPUT) as recorded in @s->io_bits.
*
* The caller is responsible for updating the DIO direction in the hardware
* registers if this function returns 0.
*
* Returns 0 for a %INSN_CONFIG_DIO_INPUT or %INSN_CONFIG_DIO_OUTPUT
* instruction, @insn->n (> 0) for a %INSN_CONFIG_DIO_QUERY instruction, or
* -EINVAL for some other instruction.
*/
int comedi_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data,
unsigned int mask)
{
unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
if (!mask)
mask = chan_mask;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~mask;
break;
case INSN_CONFIG_DIO_OUTPUT:
s->io_bits |= mask;
break;
case INSN_CONFIG_DIO_QUERY:
data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
return insn->n;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
/**
* comedi_dio_update_state() - Update the internal state of DIO subdevices
* @s: COMEDI subdevice.
* @data: The channel mask and bits to update.
*
* Updates @s->state which holds the internal state of the outputs for DIO
* or DO subdevices (up to 32 channels). @data[0] contains a bit-mask of
* the channels to be updated. @data[1] contains a bit-mask of those
* channels to be set to '1'. The caller is responsible for updating the
* outputs in hardware according to @s->state. As a minimum, the channels
* in the returned bit-mask need to be updated.
*
* Returns @mask with non-existent channels removed.
*/
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data)
{
unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
: 0xffffffff;
unsigned int mask = data[0] & chanmask;
unsigned int bits = data[1];
if (mask) {
s->state &= ~mask;
s->state |= (bits & mask);
}
return mask;
}
EXPORT_SYMBOL_GPL(comedi_dio_update_state);
/**
* comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
* bytes
* @s: COMEDI subdevice.
* @cmd: COMEDI command.
*
* Determines the overall scan length according to the subdevice type and the
* number of channels in the scan for the specified command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
* short or unsigned int values according to the subdevice's %SDF_LSAMPL
* flag. For other types of subdevice, samples are assumed to occupy a
* whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
*
* Returns the overall scan length in bytes.
*/
unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int num_samples;
unsigned int bits_per_sample;
switch (s->type) {
case COMEDI_SUBD_DI:
case COMEDI_SUBD_DO:
case COMEDI_SUBD_DIO:
bits_per_sample = 8 * comedi_bytes_per_sample(s);
num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample);
break;
default:
num_samples = cmd->scan_end_arg;
break;
}
return comedi_samples_to_bytes(s, num_samples);
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
/**
* comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
* @s: COMEDI subdevice.
*
* Determines the overall scan length according to the subdevice type and the
* number of channels in the scan for the current command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
* short or unsigned int values according to the subdevice's %SDF_LSAMPL
* flag. For other types of subdevice, samples are assumed to occupy a
* whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
*
* Returns the overall scan length in bytes.
*/
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
return comedi_bytes_per_scan_cmd(s, cmd);
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
if (cmd->stop_src == TRIG_COUNT) {
unsigned int scans_left = 0;
if (async->scans_done < cmd->stop_arg)
scans_left = cmd->stop_arg - async->scans_done;
if (nscans > scans_left)
nscans = scans_left;
}
return nscans;
}
/**
* comedi_nscans_left() - Return the number of scans left in the command
* @s: COMEDI subdevice.
* @nscans: The expected number of scans or 0 for all available scans.
*
* If @nscans is 0, it is set to the number of scans available in the
* async buffer.
*
* If the async command has a stop_src of %TRIG_COUNT, the @nscans will be
* checked against the number of scans remaining to complete the command.
*
* The return value will then be either the expected number of scans or the
* number of scans remaining to complete the command, whichever is fewer.
*/
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans)
{
if (nscans == 0) {
unsigned int nbytes = comedi_buf_read_n_available(s);
nscans = nbytes / comedi_bytes_per_scan(s);
}
return __comedi_nscans_left(s, nscans);
}
EXPORT_SYMBOL_GPL(comedi_nscans_left);
/**
* comedi_nsamples_left() - Return the number of samples left in the command
* @s: COMEDI subdevice.
* @nsamples: The expected number of samples.
*
* Returns the number of samples remaining to complete the command, or the
* specified expected number of samples (@nsamples), whichever is fewer.
*/
unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
unsigned int nsamples)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned long long scans_left;
unsigned long long samples_left;
if (cmd->stop_src != TRIG_COUNT)
return nsamples;
scans_left = __comedi_nscans_left(s, cmd->stop_arg);
if (!scans_left)
return 0;
samples_left = scans_left * cmd->scan_end_arg -
comedi_bytes_to_samples(s, async->scan_progress);
if (samples_left < nsamples)
return samples_left;
return nsamples;
}
EXPORT_SYMBOL_GPL(comedi_nsamples_left);
/**
* comedi_inc_scan_progress() - Update scan progress in asynchronous command
* @s: COMEDI subdevice.
* @num_bytes: Amount of data in bytes to increment scan progress.
*
* Increments the scan progress by the number of bytes specified by @num_bytes.
* If the scan progress reaches or exceeds the scan length in bytes, reduce
* it modulo the scan length in bytes and set the "end of scan" asynchronous
* event flag (%COMEDI_CB_EOS) to be processed later.
*/
void comedi_inc_scan_progress(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int scan_length = comedi_bytes_per_scan(s);
/* track the 'cur_chan' for non-SDF_PACKED subdevices */
if (!(s->subdev_flags & SDF_PACKED)) {
async->cur_chan += comedi_bytes_to_samples(s, num_bytes);
async->cur_chan %= cmd->chanlist_len;
}
async->scan_progress += num_bytes;
if (async->scan_progress >= scan_length) {
unsigned int nscans = async->scan_progress / scan_length;
if (async->scans_done < (UINT_MAX - nscans))
async->scans_done += nscans;
else
async->scans_done = UINT_MAX;
async->scan_progress %= scan_length;
async->events |= COMEDI_CB_EOS;
}
}
EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
/**
* comedi_handle_events() - Handle events and possibly stop acquisition
* @dev: COMEDI device.
* @s: COMEDI subdevice.
*
* Handles outstanding asynchronous acquisition event flags associated
* with the subdevice. Call the subdevice's @s->cancel() handler if the
* "end of acquisition", "error" or "overflow" event flags are set in order
* to stop the acquisition at the driver level.
*
* Calls comedi_event() to further process the event flags, which may mark
* the asynchronous command as no longer running, possibly terminated with
* an error, and may wake up tasks.
*
* Return a bit-mask of the handled events.
*/
unsigned int comedi_handle_events(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int events = s->async->events;
if (events == 0)
return events;
if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
s->cancel(dev, s);
comedi_event(dev, s);
return events;
}
EXPORT_SYMBOL_GPL(comedi_handle_events);
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct comedi_insn _insn;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int base_chan = (chan < 32) ? 0 : chan;
unsigned int _data[2];
int ret;
memset(_data, 0, sizeof(_data));
memset(&_insn, 0, sizeof(_insn));
_insn.insn = INSN_BITS;
_insn.chanspec = base_chan;
_insn.n = 2;
_insn.subdev = insn->subdev;
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
_data[0] = 1 << (chan - base_chan); /* mask */
_data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
}
ret = s->insn_bits(dev, s, &_insn, _data);
if (ret < 0)
return ret;
if (insn->insn == INSN_READ)
data[0] = (_data[1] >> (chan - base_chan)) & 1;
return 1;
}
static int __comedi_device_postconfig_async(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async;
unsigned int buf_size;
int ret;
lockdep_assert_held(&dev->mutex);
if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) {
dev_warn(dev->class_dev,
"async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n");
return -EINVAL;
}
if (!s->do_cmdtest) {
dev_warn(dev->class_dev,
"async subdevices must have a do_cmdtest() function\n");
return -EINVAL;
}
if (!s->cancel)
dev_warn(dev->class_dev,
"async subdevices should have a cancel() function\n");
async = kzalloc(sizeof(*async), GFP_KERNEL);
if (!async)
return -ENOMEM;
init_waitqueue_head(&async->wait_head);
s->async = async;
async->max_bufsize = comedi_default_buf_maxsize_kb * 1024;
buf_size = comedi_default_buf_size_kb * 1024;
if (buf_size > async->max_bufsize)
buf_size = async->max_bufsize;
if (comedi_buf_alloc(dev, s, buf_size) < 0) {
dev_warn(dev->class_dev, "Buffer allocation failed\n");
return -ENOMEM;
}
if (s->buf_change) {
ret = s->buf_change(dev, s);
if (ret < 0)
return ret;
}
comedi_alloc_subdevice_minor(s);
return 0;
}
static int __comedi_device_postconfig(struct comedi_device *dev)
{
struct comedi_subdevice *s;
int ret;
int i;
lockdep_assert_held(&dev->mutex);
if (!dev->insn_device_config)
dev->insn_device_config = insn_device_inval;
if (!dev->get_valid_routes)
dev->get_valid_routes = get_zero_valid_routes;
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
if (s->type == COMEDI_SUBD_UNUSED)
continue;
if (s->type == COMEDI_SUBD_DO) {
if (s->n_chan < 32)
s->io_bits = (1 << s->n_chan) - 1;
else
s->io_bits = 0xffffffff;
}
if (s->len_chanlist == 0)
s->len_chanlist = 1;
if (s->do_cmd) {
ret = __comedi_device_postconfig_async(dev, s);
if (ret)
return ret;
}
if (!s->range_table && !s->range_table_list)
s->range_table = &range_unknown;
if (!s->insn_read && s->insn_bits)
s->insn_read = insn_rw_emulate_bits;
if (!s->insn_write && s->insn_bits)
s->insn_write = insn_rw_emulate_bits;
if (!s->insn_read)
s->insn_read = insn_inval;
if (!s->insn_write)
s->insn_write = insn_inval;
if (!s->insn_bits)
s->insn_bits = insn_inval;
if (!s->insn_config)
s->insn_config = insn_inval;
if (!s->poll)
s->poll = poll_invalid;
}
return 0;
}
/* do a little post-config cleanup */
static int comedi_device_postconfig(struct comedi_device *dev)
{
int ret;
lockdep_assert_held(&dev->mutex);
ret = __comedi_device_postconfig(dev);
if (ret < 0)
return ret;
down_write(&dev->attach_lock);
dev->attached = true;
up_write(&dev->attach_lock);
return 0;
}
/*
* Generic recognize function for drivers that register their supported
* board names.
*
* 'driv->board_name' points to a 'const char *' member within the
* zeroth element of an array of some private board information
* structure, say 'struct foo_board' containing a member 'const char
* *board_name' that is initialized to point to a board name string that
* is one of the candidates matched against this function's 'name'
* parameter.
*
* 'driv->offset' is the size of the private board information
* structure, say 'sizeof(struct foo_board)', and 'driv->num_names' is
* the length of the array of private board information structures.
*
* If one of the board names in the array of private board information
* structures matches the name supplied to this function, the function
* returns a pointer to the pointer to the board name, otherwise it
* returns NULL. The return value ends up in the 'board_ptr' member of
* a 'struct comedi_device' that the low-level comedi driver's
* 'attach()' hook can convert to a point to a particular element of its
* array of private board information structures by subtracting the
* offset of the member that points to the board name. (No subtraction
* is required if the board name pointer is the first member of the
* private board information structure, which is generally the case.)
*/
static void *comedi_recognize(struct comedi_driver *driv, const char *name)
{
char **name_ptr = (char **)driv->board_name;
int i;
for (i = 0; i < driv->num_names; i++) {
if (strcmp(*name_ptr, name) == 0)
return name_ptr;
name_ptr = (void *)name_ptr + driv->offset;
}
return NULL;
}
static void comedi_report_boards(struct comedi_driver *driv)
{
unsigned int i;
const char *const *name_ptr;
pr_info("comedi: valid board names for %s driver are:\n",
driv->driver_name);
name_ptr = driv->board_name;
for (i = 0; i < driv->num_names; i++) {
pr_info(" %s\n", *name_ptr);
name_ptr = (const char **)((char *)name_ptr + driv->offset);
}
if (driv->num_names == 0)
pr_info(" %s\n", driv->driver_name);
}
/**
* comedi_load_firmware() - Request and load firmware for a device
* @dev: COMEDI device.
* @device: Hardware device.
* @name: The name of the firmware image.
* @cb: Callback to the upload the firmware image.
* @context: Private context from the driver.
*
* Sends a firmware request for the hardware device and waits for it. Calls
* the callback function to upload the firmware to the device, them releases
* the firmware.
*
* Returns 0 on success, -EINVAL if @cb is NULL, or a negative error number
* from the firmware request or the callback function.
*/
int comedi_load_firmware(struct comedi_device *dev,
struct device *device,
const char *name,
int (*cb)(struct comedi_device *dev,
const u8 *data, size_t size,
unsigned long context),
unsigned long context)
{
const struct firmware *fw;
int ret;
if (!cb)
return -EINVAL;
ret = request_firmware(&fw, name, device);
if (ret == 0) {
ret = cb(dev, fw->data, fw->size, context);
release_firmware(fw);
}
return min(ret, 0);
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
/**
* __comedi_request_region() - Request an I/O region for a legacy driver
* @dev: COMEDI device.
* @start: Base address of the I/O region.
* @len: Length of the I/O region.
*
* Requests the specified I/O port region which must start at a non-zero
* address.
*
* Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request
* fails.
*/
int __comedi_request_region(struct comedi_device *dev,
unsigned long start, unsigned long len)
{
if (!start) {
dev_warn(dev->class_dev,
"%s: a I/O base address must be specified\n",
dev->board_name);
return -EINVAL;
}
if (!request_region(start, len, dev->board_name)) {
dev_warn(dev->class_dev, "%s: I/O port conflict (%#lx,%lu)\n",
dev->board_name, start, len);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(__comedi_request_region);
/**
* comedi_request_region() - Request an I/O region for a legacy driver
* @dev: COMEDI device.
* @start: Base address of the I/O region.
* @len: Length of the I/O region.
*
* Requests the specified I/O port region which must start at a non-zero
* address.
*
* On success, @dev->iobase is set to the base address of the region and
* @dev->iolen is set to its length.
*
* Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request
* fails.
*/
int comedi_request_region(struct comedi_device *dev,
unsigned long start, unsigned long len)
{
int ret;
ret = __comedi_request_region(dev, start, len);
if (ret == 0) {
dev->iobase = start;
dev->iolen = len;
}
return ret;
}
EXPORT_SYMBOL_GPL(comedi_request_region);
/**
* comedi_legacy_detach() - A generic (*detach) function for legacy drivers
* @dev: COMEDI device.
*
* This is a simple, generic 'detach' handler for legacy COMEDI devices that
* just use a single I/O port region and possibly an IRQ and that don't need
* any special clean-up for their private device or subdevice storage. It
* can also be called by a driver-specific 'detach' handler.
*
* If @dev->irq is non-zero, the IRQ will be freed. If @dev->iobase and
* @dev->iolen are both non-zero, the I/O port region will be released.
*/
void comedi_legacy_detach(struct comedi_device *dev)
{
if (dev->irq) {
free_irq(dev->irq, dev);
dev->irq = 0;
}
if (dev->iobase && dev->iolen) {
release_region(dev->iobase, dev->iolen);
dev->iobase = 0;
dev->iolen = 0;
}
}
EXPORT_SYMBOL_GPL(comedi_legacy_detach);
int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_driver *driv;
int ret;
lockdep_assert_held(&dev->mutex);
if (dev->attached)
return -EBUSY;
mutex_lock(&comedi_drivers_list_lock);
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module))
continue;
if (driv->num_names) {
dev->board_ptr = comedi_recognize(driv, it->board_name);
if (dev->board_ptr)
break;
} else if (strcmp(driv->driver_name, it->board_name) == 0) {
break;
}
module_put(driv->module);
}
if (!driv) {
/* recognize has failed if we get here */
/* report valid board names before returning error */
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module))
continue;
comedi_report_boards(driv);
module_put(driv->module);
}
ret = -EIO;
goto out;
}
if (!driv->attach) {
/* driver does not support manual configuration */
dev_warn(dev->class_dev,
"driver '%s' does not support attach using comedi_config\n",
driv->driver_name);
module_put(driv->module);
ret = -EIO;
goto out;
}
dev->driver = driv;
dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr
: dev->driver->driver_name;
ret = driv->attach(dev, it);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
if (ret < 0) {
comedi_device_detach(dev);
module_put(driv->module);
}
/* On success, the driver module count has been incremented. */
out:
mutex_unlock(&comedi_drivers_list_lock);
return ret;
}
/**
* comedi_auto_config() - Create a COMEDI device for a hardware device
* @hardware_device: Hardware device.
* @driver: COMEDI low-level driver for the hardware device.
* @context: Driver context for the auto_attach handler.
*
* Allocates a new COMEDI device for the hardware device and calls the
* low-level driver's 'auto_attach' handler to set-up the hardware and
* allocate the COMEDI subdevices. Additional "post-configuration" setting
* up is performed on successful return from the 'auto_attach' handler.
* If the 'auto_attach' handler fails, the low-level driver's 'detach'
* handler will be called as part of the clean-up.
*
* This is usually called from a wrapper function in a bus-specific COMEDI
* module, which in turn is usually called from a bus device 'probe'
* function in the low-level driver.
*
* Returns 0 on success, -EINVAL if the parameters are invalid or the
* post-configuration determines the driver has set the COMEDI device up
* incorrectly, -ENOMEM if failed to allocate memory, -EBUSY if run out of
* COMEDI minor device numbers, or some negative error number returned by
* the driver's 'auto_attach' handler.
*/
int comedi_auto_config(struct device *hardware_device,
struct comedi_driver *driver, unsigned long context)
{
struct comedi_device *dev;
int ret;
if (!hardware_device) {
pr_warn("BUG! %s called with NULL hardware_device\n", __func__);
return -EINVAL;
}
if (!driver) {
dev_warn(hardware_device,
"BUG! %s called with NULL comedi driver\n", __func__);
return -EINVAL;
}
if (!driver->auto_attach) {
dev_warn(hardware_device,
"BUG! comedi driver '%s' has no auto_attach handler\n",
driver->driver_name);
return -EINVAL;
}
dev = comedi_alloc_board_minor(hardware_device);
if (IS_ERR(dev)) {
dev_warn(hardware_device,
"driver '%s' could not create device.\n",
driver->driver_name);
return PTR_ERR(dev);
}
/* Note: comedi_alloc_board_minor() locked dev->mutex. */
lockdep_assert_held(&dev->mutex);
dev->driver = driver;
dev->board_name = dev->driver->driver_name;
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
if (ret < 0) {
dev_warn(hardware_device,
"driver '%s' failed to auto-configure device.\n",
driver->driver_name);
mutex_unlock(&dev->mutex);
comedi_release_hardware_device(hardware_device);
} else {
/*
* class_dev should be set properly here
* after a successful auto config
*/
dev_info(dev->class_dev,
"driver '%s' has successfully auto-configured '%s'.\n",
driver->driver_name, dev->board_name);
mutex_unlock(&dev->mutex);
}
return ret;
}
EXPORT_SYMBOL_GPL(comedi_auto_config);
/**
* comedi_auto_unconfig() - Unconfigure auto-allocated COMEDI device
* @hardware_device: Hardware device previously passed to
* comedi_auto_config().
*
* Cleans up and eventually destroys the COMEDI device allocated by
* comedi_auto_config() for the same hardware device. As part of this
* clean-up, the low-level COMEDI driver's 'detach' handler will be called.
* (The COMEDI device itself will persist in an unattached state if it is
* still open, until it is released, and any mmapped buffers will persist
* until they are munmapped.)
*
* This is usually called from a wrapper module in a bus-specific COMEDI
* module, which in turn is usually set as the bus device 'remove' function
* in the low-level COMEDI driver.
*/
void comedi_auto_unconfig(struct device *hardware_device)
{
if (!hardware_device)
return;
comedi_release_hardware_device(hardware_device);
}
EXPORT_SYMBOL_GPL(comedi_auto_unconfig);
/**
* comedi_driver_register() - Register a low-level COMEDI driver
* @driver: Low-level COMEDI driver.
*
* The low-level COMEDI driver is added to the list of registered COMEDI
* drivers. This is used by the handler for the "/proc/comedi" file and is
* also used by the handler for the %COMEDI_DEVCONFIG ioctl to configure
* "legacy" COMEDI devices (for those low-level drivers that support it).
*
* Returns 0.
*/
int comedi_driver_register(struct comedi_driver *driver)
{
mutex_lock(&comedi_drivers_list_lock);
driver->next = comedi_drivers;
comedi_drivers = driver;
mutex_unlock(&comedi_drivers_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_driver_register);
/**
* comedi_driver_unregister() - Unregister a low-level COMEDI driver
* @driver: Low-level COMEDI driver.
*
* The low-level COMEDI driver is removed from the list of registered COMEDI
* drivers. Detaches any COMEDI devices attached to the driver, which will
* result in the low-level driver's 'detach' handler being called for those
* devices before this function returns.
*/
void comedi_driver_unregister(struct comedi_driver *driver)
{
struct comedi_driver *prev;
int i;
/* unlink the driver */
mutex_lock(&comedi_drivers_list_lock);
if (comedi_drivers == driver) {
comedi_drivers = driver->next;
} else {
for (prev = comedi_drivers; prev->next; prev = prev->next) {
if (prev->next == driver) {
prev->next = driver->next;
break;
}
}
}
mutex_unlock(&comedi_drivers_list_lock);
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_get_from_minor(i);
if (!dev)
continue;
mutex_lock(&dev->mutex);
if (dev->attached && dev->driver == driver) {
if (dev->use_count)
dev_warn(dev->class_dev,
"BUG! detaching device with use_count=%d\n",
dev->use_count);
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
}
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
| linux-master | drivers/comedi/drivers.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.