python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions. * * Copyright (C) 2015 - 2018 Linaro Ltd. * Copyright (C) 2023 Google LLC. */ #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/aes.h> #include <crypto/gcm.h> #include <crypto/b128ops.h> #include <crypto/cryptd.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/gf128mul.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/jump_label.h> #include <linux/module.h> MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("ghash"); MODULE_ALIAS_CRYPTO("gcm(aes)"); MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))"); #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 #define RFC4106_NONCE_SIZE 4 struct ghash_key { be128 k; u64 h[][2]; }; struct gcm_key { u64 h[4][2]; u32 rk[AES_MAX_KEYLENGTH_U32]; int rounds; u8 nonce[]; // for RFC4106 nonce }; struct ghash_desc_ctx { u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)]; u8 buf[GHASH_BLOCK_SIZE]; u32 count; }; struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, u64 const h[][2], const char *head); asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, u64 const h[][2], const char *head); static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64); static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); *ctx = (struct ghash_desc_ctx){}; return 0; } static void ghash_do_update(int blocks, u64 dg[], const char *src, struct ghash_key *key, const char *head) { if (likely(crypto_simd_usable())) { kernel_neon_begin(); if (static_branch_likely(&use_p64)) pmull_ghash_update_p64(blocks, dg, src, key->h, head); else pmull_ghash_update_p8(blocks, dg, src, key->h, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; do { const u8 *in = src; if (head) { in = head; blocks++; head = NULL; } else { src += GHASH_BLOCK_SIZE; } crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); gf128mul_lle(&dst, &key->k); } while (--blocks); dg[0] = be64_to_cpu(dst.b); dg[1] = be64_to_cpu(dst.a); } } static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int len) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; ctx->count += len; if ((partial + len) >= GHASH_BLOCK_SIZE) { struct ghash_key *key = crypto_shash_ctx(desc->tfm); int blocks; if (partial) { int p = GHASH_BLOCK_SIZE - partial; memcpy(ctx->buf + partial, src, p); src += p; len -= p; } blocks = len / GHASH_BLOCK_SIZE; len %= GHASH_BLOCK_SIZE; ghash_do_update(blocks, ctx->digest, src, key, partial ? ctx->buf : NULL); src += blocks * GHASH_BLOCK_SIZE; partial = 0; } if (len) memcpy(ctx->buf + partial, src, len); return 0; } static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; if (partial) { struct ghash_key *key = crypto_shash_ctx(desc->tfm); memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); *ctx = (struct ghash_desc_ctx){}; return 0; } static void ghash_reflect(u64 h[], const be128 *k) { u64 carry = be64_to_cpu(k->a) >> 63; h[0] = (be64_to_cpu(k->b) << 1) | carry; h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63); if (carry) h[1] ^= 0xc200000000000000UL; } static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { struct ghash_key *key = crypto_shash_ctx(tfm); if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); ghash_reflect(key->h[0], &key->k); if (static_branch_likely(&use_p64)) { be128 h = key->k; gf128mul_lle(&h, &key->k); ghash_reflect(key->h[1], &h); gf128mul_lle(&h, &key->k); ghash_reflect(key->h[2], &h); gf128mul_lle(&h, &key->k); ghash_reflect(key->h[3], &h); } return 0; } static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce-sync", .base.cra_priority = 300 - 1, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; static int ghash_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; return crypto_shash_init(desc); } static int ghash_async_update(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_update(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return shash_ahash_update(req, desc); } } static int ghash_async_final(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_final(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return crypto_shash_final(desc, req->result); } } static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; return shash_ahash_digest(req, desc); } } static int ghash_async_import(struct ahash_request *req, const void *in) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct shash_desc *desc = cryptd_shash_desc(cryptd_req); desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); return crypto_shash_import(desc, in); } static int ghash_async_export(struct ahash_request *req, void *out) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return crypto_shash_export(desc, out); } static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); return crypto_ahash_setkey(child, key, keylen); } static int ghash_async_init_tfm(struct crypto_tfm *tfm) { struct cryptd_ahash *cryptd_tfm; struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_tfm = cryptd_alloc_ahash("ghash-ce-sync", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); ctx->cryptd_tfm = cryptd_tfm; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct ahash_request) + crypto_ahash_reqsize(&cryptd_tfm->base)); return 0; } static void ghash_async_exit_tfm(struct crypto_tfm *tfm) { struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_free_ahash(ctx->cryptd_tfm); } static struct ahash_alg ghash_async_alg = { .init = ghash_async_init, .update = ghash_async_update, .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, .import = ghash_async_import, .export = ghash_async_export, .halg.digestsize = GHASH_DIGEST_SIZE, .halg.statesize = sizeof(struct ghash_desc_ctx), .halg.base = { .cra_name = "ghash", .cra_driver_name = "ghash-ce", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_async_ctx), .cra_module = THIS_MODULE, .cra_init = ghash_async_init_tfm, .cra_exit = ghash_async_exit_tfm, }, }; void pmull_gcm_encrypt(int blocks, u64 dg[], const char *src, struct gcm_key const *k, char *dst, const char *iv, int rounds, u32 counter); void pmull_gcm_enc_final(int blocks, u64 dg[], char *tag, struct gcm_key const *k, char *head, const char *iv, int rounds, u32 counter); void pmull_gcm_decrypt(int bytes, u64 dg[], const char *src, struct gcm_key const *k, char *dst, const char *iv, int rounds, u32 counter); int pmull_gcm_dec_final(int bytes, u64 dg[], char *tag, struct gcm_key const *k, char *head, const char *iv, int rounds, u32 counter, const char *otag, int authsize); static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey, unsigned int keylen) { struct gcm_key *ctx = crypto_aead_ctx(tfm); struct crypto_aes_ctx aes_ctx; be128 h, k; int ret; ret = aes_expandkey(&aes_ctx, inkey, keylen); if (ret) return -EINVAL; aes_encrypt(&aes_ctx, (u8 *)&k, (u8[AES_BLOCK_SIZE]){}); memcpy(ctx->rk, aes_ctx.key_enc, sizeof(ctx->rk)); ctx->rounds = 6 + keylen / 4; memzero_explicit(&aes_ctx, sizeof(aes_ctx)); ghash_reflect(ctx->h[0], &k); h = k; gf128mul_lle(&h, &k); ghash_reflect(ctx->h[1], &h); gf128mul_lle(&h, &k); ghash_reflect(ctx->h[2], &h); gf128mul_lle(&h, &k); ghash_reflect(ctx->h[3], &h); return 0; } static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { return crypto_gcm_check_authsize(authsize); } static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], int *buf_count, struct gcm_key *ctx) { if (*buf_count > 0) { int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count); memcpy(&buf[*buf_count], src, buf_added); *buf_count += buf_added; src += buf_added; count -= buf_added; } if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { int blocks = count / GHASH_BLOCK_SIZE; pmull_ghash_update_p64(blocks, dg, src, ctx->h, *buf_count ? buf : NULL); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; *buf_count = 0; } if (count > 0) { memcpy(buf, src, count); *buf_count = count; } } static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_key *ctx = crypto_aead_ctx(aead); u8 buf[GHASH_BLOCK_SIZE]; struct scatter_walk walk; int buf_count = 0; scatterwalk_start(&walk, req->src); do { u32 n = scatterwalk_clamp(&walk, len); u8 *p; if (!n) { scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } p = scatterwalk_map(&walk); gcm_update_mac(dg, p, n, buf, &buf_count, ctx); scatterwalk_unmap(p); if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) { kernel_neon_end(); kernel_neon_begin(); } len -= n; scatterwalk_advance(&walk, n); scatterwalk_done(&walk, 0, len); } while (len); if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); pmull_ghash_update_p64(1, dg, buf, ctx->h, NULL); } } static int gcm_encrypt(struct aead_request *req, const u8 *iv, u32 assoclen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_key *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 buf[AES_BLOCK_SIZE]; u32 counter = 2; u64 dg[2] = {}; be128 lengths; const u8 *src; u8 *tag, *dst; int tail, err; if (WARN_ON_ONCE(!may_use_simd())) return -EBUSY; err = skcipher_walk_aead_encrypt(&walk, req, false); kernel_neon_begin(); if (assoclen) gcm_calculate_auth_mac(req, dg, assoclen); src = walk.src.virt.addr; dst = walk.dst.virt.addr; while (walk.nbytes >= AES_BLOCK_SIZE) { int nblocks = walk.nbytes / AES_BLOCK_SIZE; pmull_gcm_encrypt(nblocks, dg, src, ctx, dst, iv, ctx->rounds, counter); counter += nblocks; if (walk.nbytes == walk.total) { src += nblocks * AES_BLOCK_SIZE; dst += nblocks * AES_BLOCK_SIZE; break; } kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); if (err) return err; src = walk.src.virt.addr; dst = walk.dst.virt.addr; kernel_neon_begin(); } lengths.a = cpu_to_be64(assoclen * 8); lengths.b = cpu_to_be64(req->cryptlen * 8); tag = (u8 *)&lengths; tail = walk.nbytes % AES_BLOCK_SIZE; /* * Bounce via a buffer unless we are encrypting in place and src/dst * are not pointing to the start of the walk buffer. In that case, we * can do a NEON load/xor/store sequence in place as long as we move * the plain/ciphertext and keystream to the start of the register. If * not, do a memcpy() to the end of the buffer so we can reuse the same * logic. */ if (unlikely(tail && (tail == walk.nbytes || src != dst))) src = memcpy(buf + sizeof(buf) - tail, src, tail); pmull_gcm_enc_final(tail, dg, tag, ctx, (u8 *)src, iv, ctx->rounds, counter); kernel_neon_end(); if (unlikely(tail && src != dst)) memcpy(dst, src, tail); if (walk.nbytes) { err = skcipher_walk_done(&walk, 0); if (err) return err; } /* copy authtag to end of dst */ scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; } static int gcm_decrypt(struct aead_request *req, const u8 *iv, u32 assoclen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_key *ctx = crypto_aead_ctx(aead); int authsize = crypto_aead_authsize(aead); struct skcipher_walk walk; u8 otag[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 counter = 2; u64 dg[2] = {}; be128 lengths; const u8 *src; u8 *tag, *dst; int tail, err, ret; if (WARN_ON_ONCE(!may_use_simd())) return -EBUSY; scatterwalk_map_and_copy(otag, req->src, req->assoclen + req->cryptlen - authsize, authsize, 0); err = skcipher_walk_aead_decrypt(&walk, req, false); kernel_neon_begin(); if (assoclen) gcm_calculate_auth_mac(req, dg, assoclen); src = walk.src.virt.addr; dst = walk.dst.virt.addr; while (walk.nbytes >= AES_BLOCK_SIZE) { int nblocks = walk.nbytes / AES_BLOCK_SIZE; pmull_gcm_decrypt(nblocks, dg, src, ctx, dst, iv, ctx->rounds, counter); counter += nblocks; if (walk.nbytes == walk.total) { src += nblocks * AES_BLOCK_SIZE; dst += nblocks * AES_BLOCK_SIZE; break; } kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); if (err) return err; src = walk.src.virt.addr; dst = walk.dst.virt.addr; kernel_neon_begin(); } lengths.a = cpu_to_be64(assoclen * 8); lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8); tag = (u8 *)&lengths; tail = walk.nbytes % AES_BLOCK_SIZE; if (unlikely(tail && (tail == walk.nbytes || src != dst))) src = memcpy(buf + sizeof(buf) - tail, src, tail); ret = pmull_gcm_dec_final(tail, dg, tag, ctx, (u8 *)src, iv, ctx->rounds, counter, otag, authsize); kernel_neon_end(); if (unlikely(tail && src != dst)) memcpy(dst, src, tail); if (walk.nbytes) { err = skcipher_walk_done(&walk, 0); if (err) return err; } return ret ? -EBADMSG : 0; } static int gcm_aes_encrypt(struct aead_request *req) { return gcm_encrypt(req, req->iv, req->assoclen); } static int gcm_aes_decrypt(struct aead_request *req) { return gcm_decrypt(req, req->iv, req->assoclen); } static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey, unsigned int keylen) { struct gcm_key *ctx = crypto_aead_ctx(tfm); int err; keylen -= RFC4106_NONCE_SIZE; err = gcm_aes_setkey(tfm, inkey, keylen); if (err) return err; memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE); return 0; } static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { return crypto_rfc4106_check_authsize(authsize); } static int rfc4106_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_key *ctx = crypto_aead_ctx(aead); u8 iv[GCM_AES_IV_SIZE]; memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); return crypto_ipsec_check_assoclen(req->assoclen) ?: gcm_encrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE); } static int rfc4106_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_key *ctx = crypto_aead_ctx(aead); u8 iv[GCM_AES_IV_SIZE]; memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); return crypto_ipsec_check_assoclen(req->assoclen) ?: gcm_decrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE); } static struct aead_alg gcm_aes_algs[] = {{ .ivsize = GCM_AES_IV_SIZE, .chunksize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm_aes_setkey, .setauthsize = gcm_aes_setauthsize, .encrypt = gcm_aes_encrypt, .decrypt = gcm_aes_decrypt, .base.cra_name = "gcm(aes)", .base.cra_driver_name = "gcm-aes-ce", .base.cra_priority = 400, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct gcm_key), .base.cra_module = THIS_MODULE, }, { .ivsize = GCM_RFC4106_IV_SIZE, .chunksize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = rfc4106_setkey, .setauthsize = rfc4106_setauthsize, .encrypt = rfc4106_encrypt, .decrypt = rfc4106_decrypt, .base.cra_name = "rfc4106(gcm(aes))", .base.cra_driver_name = "rfc4106-gcm-aes-ce", .base.cra_priority = 400, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct gcm_key) + RFC4106_NONCE_SIZE, .base.cra_module = THIS_MODULE, }}; static int __init ghash_ce_mod_init(void) { int err; if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; if (elf_hwcap2 & HWCAP2_PMULL) { err = crypto_register_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs)); if (err) return err; ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]); static_branch_enable(&use_p64); } err = crypto_register_shash(&ghash_alg); if (err) goto err_aead; err = crypto_register_ahash(&ghash_async_alg); if (err) goto err_shash; return 0; err_shash: crypto_unregister_shash(&ghash_alg); err_aead: if (elf_hwcap2 & HWCAP2_PMULL) crypto_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs)); return err; } static void __exit ghash_ce_mod_exit(void) { crypto_unregister_ahash(&ghash_async_alg); crypto_unregister_shash(&ghash_alg); if (elf_hwcap2 & HWCAP2_PMULL) crypto_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs)); } module_init(ghash_ce_mod_init); module_exit(ghash_ce_mod_exit);
linux-master
arch/arm/crypto/ghash-ce-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * aes-ce-glue.c - wrapper code for ARMv8 AES * * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> #include <linux/module.h> #include <crypto/xts.h> MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); /* defined in aes-ce-core.S */ asmlinkage u32 ce_aes_sub(u32 input); asmlinkage void ce_aes_invert(void *dst, void *src); asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks); asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks); asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 iv[]); asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 iv[]); asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int bytes, u8 const iv[]); asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int bytes, u8 const iv[]); asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 ctr[]); asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int bytes, u8 iv[], u32 const rk2[], int first); asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int bytes, u8 iv[], u32 const rk2[], int first); struct aes_block { u8 b[AES_BLOCK_SIZE]; }; static int num_rounds(struct crypto_aes_ctx *ctx) { /* * # of rounds specified by AES: * 128 bit key 10 rounds * 192 bit key 12 rounds * 256 bit key 14 rounds * => n byte key => 6 + (n/4) rounds */ return 6 + ctx->key_length / 4; } static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { /* * The AES key schedule round constants */ static u8 const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, }; u32 kwords = key_len / sizeof(u32); struct aes_block *key_enc, *key_dec; int i, j; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) return -EINVAL; ctx->key_length = key_len; for (i = 0; i < kwords; i++) ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); kernel_neon_begin(); for (i = 0; i < sizeof(rcon); i++) { u32 *rki = ctx->key_enc + (i * kwords); u32 *rko = rki + kwords; rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); rko[0] = rko[0] ^ rki[0] ^ rcon[i]; rko[1] = rko[0] ^ rki[1]; rko[2] = rko[1] ^ rki[2]; rko[3] = rko[2] ^ rki[3]; if (key_len == AES_KEYSIZE_192) { if (i >= 7) break; rko[4] = rko[3] ^ rki[4]; rko[5] = rko[4] ^ rki[5]; } else if (key_len == AES_KEYSIZE_256) { if (i >= 6) break; rko[4] = ce_aes_sub(rko[3]) ^ rki[4]; rko[5] = rko[4] ^ rki[5]; rko[6] = rko[5] ^ rki[6]; rko[7] = rko[6] ^ rki[7]; } } /* * Generate the decryption keys for the Equivalent Inverse Cipher. * This involves reversing the order of the round keys, and applying * the Inverse Mix Columns transformation on all but the first and * the last one. */ key_enc = (struct aes_block *)ctx->key_enc; key_dec = (struct aes_block *)ctx->key_dec; j = num_rounds(ctx); key_dec[0] = key_enc[j]; for (i = 1, j--; j > 0; i++, j--) ce_aes_invert(key_dec + i, key_enc + j); key_dec[i] = key_enc[0]; kernel_neon_end(); return 0; } static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); return ce_aes_expandkey(ctx, in_key, key_len); } struct crypto_aes_xts_ctx { struct crypto_aes_ctx key1; struct crypto_aes_ctx __aligned(8) key2; }; static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ret = xts_verify_key(tfm, in_key, key_len); if (ret) return ret; ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2); if (!ret) ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2], key_len / 2); return ret; } static int ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int blocks; int err; err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, num_rounds(ctx), blocks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } static int ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int blocks; int err; err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_dec, num_rounds(ctx), blocks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } static int cbc_encrypt_walk(struct skcipher_request *req, struct skcipher_walk *walk) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); unsigned int blocks; int err = 0; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, ctx->key_enc, num_rounds(ctx), blocks, walk->iv); kernel_neon_end(); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } static int cbc_encrypt(struct skcipher_request *req) { struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); if (err) return err; return cbc_encrypt_walk(req, &walk); } static int cbc_decrypt_walk(struct skcipher_request *req, struct skcipher_walk *walk) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); unsigned int blocks; int err = 0; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, ctx->key_dec, num_rounds(ctx), blocks, walk->iv); kernel_neon_end(); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } static int cbc_decrypt(struct skcipher_request *req) { struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); if (err) return err; return cbc_decrypt_walk(req, &walk); } static int cts_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; struct scatterlist *src = req->src, *dst = req->dst; struct scatterlist sg_src[2], sg_dst[2]; struct skcipher_request subreq; struct skcipher_walk walk; int err; skcipher_request_set_tfm(&subreq, tfm); skcipher_request_set_callback(&subreq, skcipher_request_flags(req), NULL, NULL); if (req->cryptlen <= AES_BLOCK_SIZE) { if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; cbc_blocks = 1; } if (cbc_blocks > 0) { skcipher_request_set_crypt(&subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); err = skcipher_walk_virt(&walk, &subreq, false) ?: cbc_encrypt_walk(&subreq, &walk); if (err) return err; if (req->cryptlen == AES_BLOCK_SIZE) return 0; dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); if (req->dst != req->src) dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); } /* handle ciphertext stealing */ skcipher_request_set_crypt(&subreq, src, dst, req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, req->iv); err = skcipher_walk_virt(&walk, &subreq, false); if (err) return err; kernel_neon_begin(); ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, num_rounds(ctx), walk.nbytes, walk.iv); kernel_neon_end(); return skcipher_walk_done(&walk, 0); } static int cts_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; struct scatterlist *src = req->src, *dst = req->dst; struct scatterlist sg_src[2], sg_dst[2]; struct skcipher_request subreq; struct skcipher_walk walk; int err; skcipher_request_set_tfm(&subreq, tfm); skcipher_request_set_callback(&subreq, skcipher_request_flags(req), NULL, NULL); if (req->cryptlen <= AES_BLOCK_SIZE) { if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; cbc_blocks = 1; } if (cbc_blocks > 0) { skcipher_request_set_crypt(&subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); err = skcipher_walk_virt(&walk, &subreq, false) ?: cbc_decrypt_walk(&subreq, &walk); if (err) return err; if (req->cryptlen == AES_BLOCK_SIZE) return 0; dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); if (req->dst != req->src) dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); } /* handle ciphertext stealing */ skcipher_request_set_crypt(&subreq, src, dst, req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, req->iv); err = skcipher_walk_virt(&walk, &subreq, false); if (err) return err; kernel_neon_begin(); ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_dec, num_rounds(ctx), walk.nbytes, walk.iv); kernel_neon_end(); return skcipher_walk_done(&walk, 0); } static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; int err, blocks; err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, num_rounds(ctx), blocks, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } if (walk.nbytes) { u8 __aligned(8) tail[AES_BLOCK_SIZE]; unsigned int nbytes = walk.nbytes; u8 *tdst = walk.dst.virt.addr; u8 *tsrc = walk.src.virt.addr; /* * Tell aes_ctr_encrypt() to process a tail block. */ blocks = -1; kernel_neon_begin(); ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx), blocks, walk.iv); kernel_neon_end(); crypto_xor_cpy(tdst, tsrc, tail, nbytes); err = skcipher_walk_done(&walk, 0); } return err; } static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); unsigned long flags; /* * Temporarily disable interrupts to avoid races where * cachelines are evicted when the CPU is interrupted * to do something else. */ local_irq_save(flags); aes_encrypt(ctx, dst, src); local_irq_restore(flags); } static int ctr_encrypt_sync(struct skcipher_request *req) { if (!crypto_simd_usable()) return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); return ctr_encrypt(req); } static int xts_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err, first, rounds = num_rounds(&ctx->key1); int tail = req->cryptlen % AES_BLOCK_SIZE; struct scatterlist sg_src[2], sg_dst[2]; struct skcipher_request subreq; struct scatterlist *src, *dst; struct skcipher_walk walk; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; err = skcipher_walk_virt(&walk, req, false); if (unlikely(tail > 0 && walk.nbytes < walk.total)) { int xts_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; skcipher_walk_abort(&walk); skcipher_request_set_tfm(&subreq, tfm); skcipher_request_set_callback(&subreq, skcipher_request_flags(req), NULL, NULL); skcipher_request_set_crypt(&subreq, req->src, req->dst, xts_blocks * AES_BLOCK_SIZE, req->iv); req = &subreq; err = skcipher_walk_virt(&walk, req, false); } else { tail = 0; } for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { int nbytes = walk.nbytes; if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); kernel_neon_begin(); ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, nbytes, walk.iv, ctx->key2.key_enc, first); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } if (err || likely(!tail)) return err; dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); if (req->dst != req->src) dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, req->iv); err = skcipher_walk_virt(&walk, req, false); if (err) return err; kernel_neon_begin(); ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, walk.nbytes, walk.iv, ctx->key2.key_enc, first); kernel_neon_end(); return skcipher_walk_done(&walk, 0); } static int xts_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err, first, rounds = num_rounds(&ctx->key1); int tail = req->cryptlen % AES_BLOCK_SIZE; struct scatterlist sg_src[2], sg_dst[2]; struct skcipher_request subreq; struct scatterlist *src, *dst; struct skcipher_walk walk; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; err = skcipher_walk_virt(&walk, req, false); if (unlikely(tail > 0 && walk.nbytes < walk.total)) { int xts_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; skcipher_walk_abort(&walk); skcipher_request_set_tfm(&subreq, tfm); skcipher_request_set_callback(&subreq, skcipher_request_flags(req), NULL, NULL); skcipher_request_set_crypt(&subreq, req->src, req->dst, xts_blocks * AES_BLOCK_SIZE, req->iv); req = &subreq; err = skcipher_walk_virt(&walk, req, false); } else { tail = 0; } for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { int nbytes = walk.nbytes; if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); kernel_neon_begin(); ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, nbytes, walk.iv, ctx->key2.key_enc, first); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } if (err || likely(!tail)) return err; dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); if (req->dst != req->src) dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, req->iv); err = skcipher_walk_virt(&walk, req, false); if (err) return err; kernel_neon_begin(); ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, walk.nbytes, walk.iv, ctx->key2.key_enc, first); kernel_neon_end(); return skcipher_walk_done(&walk, 0); } static struct skcipher_alg aes_algs[] = { { .base.cra_name = "__ecb(aes)", .base.cra_driver_name = "__ecb-aes-ce", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = ce_aes_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "__cbc(aes)", .base.cra_driver_name = "__cbc-aes-ce", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ce_aes_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { .base.cra_name = "__cts(cbc(aes))", .base.cra_driver_name = "__cts-cbc-aes-ce", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .walksize = 2 * AES_BLOCK_SIZE, .setkey = ce_aes_setkey, .encrypt = cts_cbc_encrypt, .decrypt = cts_cbc_decrypt, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-ce", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, .setkey = ce_aes_setkey, .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-ce-sync", .base.cra_priority = 300 - 1, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, .setkey = ce_aes_setkey, .encrypt = ctr_encrypt_sync, .decrypt = ctr_encrypt_sync, }, { .base.cra_name = "__xts(aes)", .base.cra_driver_name = "__xts-aes-ce", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .base.cra_module = THIS_MODULE, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .walksize = 2 * AES_BLOCK_SIZE, .setkey = xts_set_key, .encrypt = xts_encrypt, .decrypt = xts_decrypt, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; static void aes_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++) simd_skcipher_free(aes_simd_algs[i]); crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } static int __init aes_init(void) { struct simd_skcipher_alg *simd; const char *basename; const char *algname; const char *drvname; int err; int i; err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); if (err) return err; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) continue; algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; simd = simd_skcipher_create_compat(algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; aes_simd_algs[i] = simd; } return 0; unregister_simds: aes_exit(); return err; } module_cpu_feature_match(AES, aes_init); module_exit(aes_exit);
linux-master
arch/arm/crypto/aes-ce-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * sha512-glue.c - accelerated SHA-384/512 for ARM * * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> #include <linux/crypto.h> #include <linux/module.h> #include <asm/hwcap.h> #include <asm/neon.h> #include "sha512.h" MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha512"); MODULE_ALIAS_CRYPTO("sha384-arm"); MODULE_ALIAS_CRYPTO("sha512-arm"); asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); int sha512_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_block_data_order); } static int sha512_arm_final(struct shash_desc *desc, u8 *out) { sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_block_data_order); return sha512_base_finish(desc, out); } int sha512_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_block_data_order); return sha512_arm_final(desc, out); } static struct shash_alg sha512_arm_algs[] = { { .init = sha384_base_init, .update = sha512_arm_update, .final = sha512_arm_final, .finup = sha512_arm_finup, .descsize = sizeof(struct sha512_state), .digestsize = SHA384_DIGEST_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-arm", .cra_priority = 250, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .init = sha512_base_init, .update = sha512_arm_update, .final = sha512_arm_final, .finup = sha512_arm_finup, .descsize = sizeof(struct sha512_state), .digestsize = SHA512_DIGEST_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-arm", .cra_priority = 250, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init sha512_arm_mod_init(void) { int err; err = crypto_register_shashes(sha512_arm_algs, ARRAY_SIZE(sha512_arm_algs)); if (err) return err; if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { err = crypto_register_shashes(sha512_neon_algs, ARRAY_SIZE(sha512_neon_algs)); if (err) goto err_unregister; } return 0; err_unregister: crypto_unregister_shashes(sha512_arm_algs, ARRAY_SIZE(sha512_arm_algs)); return err; } static void __exit sha512_arm_mod_fini(void) { crypto_unregister_shashes(sha512_arm_algs, ARRAY_SIZE(sha512_arm_algs)); if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) crypto_unregister_shashes(sha512_neon_algs, ARRAY_SIZE(sha512_neon_algs)); } module_init(sha512_arm_mod_init); module_exit(sha512_arm_mod_fini);
linux-master
arch/arm/crypto/sha512-glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include "sha1.h" MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src, int blocks); static int sha1_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) return sha1_update_arm(desc, data, len); kernel_neon_begin(); sha1_base_do_update(desc, data, len, sha1_ce_transform); kernel_neon_end(); return 0; } static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return sha1_finup_arm(desc, data, len, out); kernel_neon_begin(); if (len) sha1_base_do_update(desc, data, len, sha1_ce_transform); sha1_base_do_finalize(desc, sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); } static int sha1_ce_final(struct shash_desc *desc, u8 *out) { return sha1_ce_finup(desc, NULL, 0, out); } static struct shash_alg alg = { .init = sha1_base_init, .update = sha1_ce_update, .final = sha1_ce_final, .finup = sha1_ce_finup, .descsize = sizeof(struct sha1_state), .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", .cra_priority = 200, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_ce_mod_init(void) { return crypto_register_shash(&alg); } static void __exit sha1_ce_mod_fini(void) { crypto_unregister_shash(&alg); } module_cpu_feature_match(SHA1, sha1_ce_mod_init); module_exit(sha1_ce_mod_fini);
linux-master
arch/arm/crypto/sha1-ce-glue.c
// SPDX-License-Identifier: GPL-2.0 /* * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum * (NEON accelerated version) * * Copyright 2018 Google LLC */ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/nhpoly1305.h> #include <linux/module.h> asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len, __le64 hash[NH_NUM_PASSES]); static int nhpoly1305_neon_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { if (srclen < 64 || !crypto_simd_usable()) return crypto_nhpoly1305_update(desc, src, srclen); do { unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_neon_begin(); crypto_nhpoly1305_update_helper(desc, src, n, nh_neon); kernel_neon_end(); src += n; srclen -= n; } while (srclen); return 0; } static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-neon", .base.cra_priority = 200, .base.cra_ctxsize = sizeof(struct nhpoly1305_key), .base.cra_module = THIS_MODULE, .digestsize = POLY1305_DIGEST_SIZE, .init = crypto_nhpoly1305_init, .update = nhpoly1305_neon_update, .final = crypto_nhpoly1305_final, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; static int __init nhpoly1305_mod_init(void) { if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; return crypto_register_shash(&nhpoly1305_alg); } static void __exit nhpoly1305_mod_exit(void) { crypto_unregister_shash(&nhpoly1305_alg); } module_init(nhpoly1305_mod_init); module_exit(nhpoly1305_mod_exit); MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers <[email protected]>"); MODULE_ALIAS_CRYPTO("nhpoly1305"); MODULE_ALIAS_CRYPTO("nhpoly1305-neon");
linux-master
arch/arm/crypto/nhpoly1305-neon-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * Glue code for the SHA1 Secure Hash Algorithm assembler implementation * * This file is based on sha1_generic.c and sha1_ssse3_glue.c * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) Jean-Francois Dive <[email protected]> * Copyright (c) Mathias Krause <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <asm/byteorder.h> #include "sha1.h" asmlinkage void sha1_block_data_order(struct sha1_state *digest, const u8 *data, int rounds); int sha1_update_arm(struct shash_desc *desc, const u8 *data, unsigned int len) { /* make sure signature matches sha1_block_fn() */ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); return sha1_base_do_update(desc, data, len, sha1_block_data_order); } EXPORT_SYMBOL_GPL(sha1_update_arm); static int sha1_final(struct shash_desc *desc, u8 *out) { sha1_base_do_finalize(desc, sha1_block_data_order); return sha1_base_finish(desc, out); } int sha1_finup_arm(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { sha1_base_do_update(desc, data, len, sha1_block_data_order); return sha1_final(desc, out); } EXPORT_SYMBOL_GPL(sha1_finup_arm); static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_update_arm, .final = sha1_final, .finup = sha1_finup_arm, .descsize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-asm", .cra_priority = 150, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_mod_init(void) { return crypto_register_shash(&alg); } static void __exit sha1_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_mod_init); module_exit(sha1_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); MODULE_ALIAS_CRYPTO("sha1"); MODULE_AUTHOR("David McCullough <[email protected]>");
linux-master
arch/arm/crypto/sha1_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * sha512-neon-glue.c - accelerated SHA-384/512 for ARM NEON * * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> #include <linux/crypto.h> #include <linux/module.h> #include <asm/simd.h> #include <asm/neon.h> #include "sha512.h" MODULE_ALIAS_CRYPTO("sha384-neon"); MODULE_ALIAS_CRYPTO("sha512-neon"); asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, const u8 *src, int blocks); static int sha512_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) return sha512_arm_update(desc, data, len); kernel_neon_begin(); sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); kernel_neon_end(); return 0; } static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return sha512_arm_finup(desc, data, len, out); kernel_neon_begin(); if (len) sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); sha512_base_do_finalize(desc, sha512_block_data_order_neon); kernel_neon_end(); return sha512_base_finish(desc, out); } static int sha512_neon_final(struct shash_desc *desc, u8 *out) { return sha512_neon_finup(desc, NULL, 0, out); } struct shash_alg sha512_neon_algs[] = { { .init = sha384_base_init, .update = sha512_neon_update, .final = sha512_neon_final, .finup = sha512_neon_finup, .descsize = sizeof(struct sha512_state), .digestsize = SHA384_DIGEST_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-neon", .cra_priority = 300, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .init = sha512_base_init, .update = sha512_neon_update, .final = sha512_neon_final, .finup = sha512_neon_finup, .descsize = sizeof(struct sha512_state), .digestsize = SHA512_DIGEST_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-neon", .cra_priority = 300, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } } };
linux-master
arch/arm/crypto/sha512-neon-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for the SHA256 Secure Hash Algorithm assembly implementation * using NEON instructions. * * Copyright © 2015 Google Inc. * * This file is based on sha512_neon_glue.c: * Copyright © 2014 Jussi Kivilinna <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/types.h> #include <linux/string.h> #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <asm/byteorder.h> #include <asm/simd.h> #include <asm/neon.h> #include "sha256_glue.h" asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data, int num_blks); static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) return crypto_sha256_arm_update(desc, data, len); kernel_neon_begin(); sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); kernel_neon_end(); return 0; } static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return crypto_sha256_arm_finup(desc, data, len, out); kernel_neon_begin(); if (len) sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); sha256_base_do_finalize(desc, sha256_block_data_order_neon); kernel_neon_end(); return sha256_base_finish(desc, out); } static int crypto_sha256_neon_final(struct shash_desc *desc, u8 *out) { return crypto_sha256_neon_finup(desc, NULL, 0, out); } struct shash_alg sha256_neon_algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = crypto_sha256_neon_update, .final = crypto_sha256_neon_final, .finup = crypto_sha256_neon_finup, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-neon", .cra_priority = 250, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_base_init, .update = crypto_sha256_neon_update, .final = crypto_sha256_neon_final, .finup = crypto_sha256_neon_finup, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-neon", .cra_priority = 250, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } } };
linux-master
arch/arm/crypto/sha256_neon_glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using * ARM NEON instructions. * * Copyright © 2014 Jussi Kivilinna <[email protected]> * * This file is based on sha1_generic.c and sha1_ssse3_glue.c: * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) Jean-Francois Dive <[email protected]> * Copyright (c) Mathias Krause <[email protected]> * Copyright (c) Chandramouli Narayanan <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <asm/neon.h> #include <asm/simd.h> #include "sha1.h" asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data, int rounds); static int sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) return sha1_update_arm(desc, data, len); kernel_neon_begin(); sha1_base_do_update(desc, data, len, sha1_transform_neon); kernel_neon_end(); return 0; } static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return sha1_finup_arm(desc, data, len, out); kernel_neon_begin(); if (len) sha1_base_do_update(desc, data, len, sha1_transform_neon); sha1_base_do_finalize(desc, sha1_transform_neon); kernel_neon_end(); return sha1_base_finish(desc, out); } static int sha1_neon_final(struct shash_desc *desc, u8 *out) { return sha1_neon_finup(desc, NULL, 0, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_neon_update, .final = sha1_neon_final, .finup = sha1_neon_finup, .descsize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-neon", .cra_priority = 250, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_neon_mod_init(void) { if (!cpu_has_neon()) return -ENODEV; return crypto_register_shash(&alg); } static void __exit sha1_neon_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_neon_mod_init); module_exit(sha1_neon_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); MODULE_ALIAS_CRYPTO("sha1");
linux-master
arch/arm/crypto/sha1_neon_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions * * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> #include <asm/hwcap.h> #include <asm/simd.h> #include <asm/neon.h> #include <asm/unaligned.h> #include "sha256_glue.h" MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_LICENSE("GPL v2"); asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src, int blocks); static int sha2_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) return crypto_sha256_arm_update(desc, data, len); kernel_neon_begin(); sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); return 0; } static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return crypto_sha256_arm_finup(desc, data, len, out); kernel_neon_begin(); if (len) sha256_base_do_update(desc, data, len, (sha256_block_fn *)sha2_ce_transform); sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); return sha256_base_finish(desc, out); } static int sha2_ce_final(struct shash_desc *desc, u8 *out) { return sha2_ce_finup(desc, NULL, 0, out); } static struct shash_alg algs[] = { { .init = sha224_base_init, .update = sha2_ce_update, .final = sha2_ce_final, .finup = sha2_ce_finup, .descsize = sizeof(struct sha256_state), .digestsize = SHA224_DIGEST_SIZE, .base = { .cra_name = "sha224", .cra_driver_name = "sha224-ce", .cra_priority = 300, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .init = sha256_base_init, .update = sha2_ce_update, .final = sha2_ce_final, .finup = sha2_ce_finup, .descsize = sizeof(struct sha256_state), .digestsize = SHA256_DIGEST_SIZE, .base = { .cra_name = "sha256", .cra_driver_name = "sha256-ce", .cra_priority = 300, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init sha2_ce_mod_init(void) { return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit sha2_ce_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_cpu_feature_match(SHA2, sha2_ce_mod_init); module_exit(sha2_ce_mod_fini);
linux-master
arch/arm/crypto/sha2-ce-glue.c
// SPDX-License-Identifier: GPL-2.0 /* * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM * * Copyright (C) 2019 Linaro Ltd. <[email protected]> */ #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> #include <crypto/internal/simd.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/jump_label.h> #include <linux/module.h> void poly1305_init_arm(void *state, const u8 *key); void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce); void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) { } static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_arm(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); dctx->s[1] = get_unaligned_le32(key + 20); dctx->s[2] = get_unaligned_le32(key + 24); dctx->s[3] = get_unaligned_le32(key + 28); dctx->buflen = 0; } EXPORT_SYMBOL(poly1305_init_arch); static int arm_poly1305_init(struct shash_desc *desc) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); dctx->buflen = 0; dctx->rset = 0; dctx->sset = false; return 0; } static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, u32 len, u32 hibit, bool do_neon) { if (unlikely(!dctx->sset)) { if (!dctx->rset) { poly1305_init_arm(&dctx->h, src); src += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; dctx->rset = 1; } if (len >= POLY1305_BLOCK_SIZE) { dctx->s[0] = get_unaligned_le32(src + 0); dctx->s[1] = get_unaligned_le32(src + 4); dctx->s[2] = get_unaligned_le32(src + 8); dctx->s[3] = get_unaligned_le32(src + 12); src += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; dctx->sset = true; } if (len < POLY1305_BLOCK_SIZE) return; } len &= ~(POLY1305_BLOCK_SIZE - 1); if (static_branch_likely(&have_neon) && likely(do_neon)) poly1305_blocks_neon(&dctx->h, src, len, hibit); else poly1305_blocks_arm(&dctx->h, src, len, hibit); } static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx, const u8 *src, u32 len, bool do_neon) { if (unlikely(dctx->buflen)) { u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); memcpy(dctx->buf + dctx->buflen, src, bytes); src += bytes; len -= bytes; dctx->buflen += bytes; if (dctx->buflen == POLY1305_BLOCK_SIZE) { arm_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1, false); dctx->buflen = 0; } } if (likely(len >= POLY1305_BLOCK_SIZE)) { arm_poly1305_blocks(dctx, src, len, 1, do_neon); src += round_down(len, POLY1305_BLOCK_SIZE); len %= POLY1305_BLOCK_SIZE; } if (unlikely(len)) { dctx->buflen = len; memcpy(dctx->buf, src, len); } } static int arm_poly1305_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); arm_poly1305_do_update(dctx, src, srclen, false); return 0; } static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); bool do_neon = crypto_simd_usable() && srclen > 128; if (static_branch_likely(&have_neon) && do_neon) kernel_neon_begin(); arm_poly1305_do_update(dctx, src, srclen, do_neon); if (static_branch_likely(&have_neon) && do_neon) kernel_neon_end(); return 0; } void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int nbytes) { bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && crypto_simd_usable(); if (unlikely(dctx->buflen)) { u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); memcpy(dctx->buf + dctx->buflen, src, bytes); src += bytes; nbytes -= bytes; dctx->buflen += bytes; if (dctx->buflen == POLY1305_BLOCK_SIZE) { poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); dctx->buflen = 0; } } if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); if (static_branch_likely(&have_neon) && do_neon) { do { unsigned int todo = min_t(unsigned int, len, SZ_4K); kernel_neon_begin(); poly1305_blocks_neon(&dctx->h, src, todo, 1); kernel_neon_end(); len -= todo; src += todo; } while (len); } else { poly1305_blocks_arm(&dctx->h, src, len, 1); src += len; } nbytes %= POLY1305_BLOCK_SIZE; } if (unlikely(nbytes)) { dctx->buflen = nbytes; memcpy(dctx->buf, src, nbytes); } } EXPORT_SYMBOL(poly1305_update_arch); void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) { if (unlikely(dctx->buflen)) { dctx->buf[dctx->buflen++] = 1; memset(dctx->buf + dctx->buflen, 0, POLY1305_BLOCK_SIZE - dctx->buflen); poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); } poly1305_emit_arm(&dctx->h, dst, dctx->s); *dctx = (struct poly1305_desc_ctx){}; } EXPORT_SYMBOL(poly1305_final_arch); static int arm_poly1305_final(struct shash_desc *desc, u8 *dst) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); if (unlikely(!dctx->sset)) return -ENOKEY; poly1305_final_arch(dctx, dst); return 0; } static struct shash_alg arm_poly1305_algs[] = {{ .init = arm_poly1305_init, .update = arm_poly1305_update, .final = arm_poly1305_final, .digestsize = POLY1305_DIGEST_SIZE, .descsize = sizeof(struct poly1305_desc_ctx), .base.cra_name = "poly1305", .base.cra_driver_name = "poly1305-arm", .base.cra_priority = 150, .base.cra_blocksize = POLY1305_BLOCK_SIZE, .base.cra_module = THIS_MODULE, #ifdef CONFIG_KERNEL_MODE_NEON }, { .init = arm_poly1305_init, .update = arm_poly1305_update_neon, .final = arm_poly1305_final, .digestsize = POLY1305_DIGEST_SIZE, .descsize = sizeof(struct poly1305_desc_ctx), .base.cra_name = "poly1305", .base.cra_driver_name = "poly1305-neon", .base.cra_priority = 200, .base.cra_blocksize = POLY1305_BLOCK_SIZE, .base.cra_module = THIS_MODULE, #endif }}; static int __init arm_poly1305_mod_init(void) { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) static_branch_enable(&have_neon); else if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) /* register only the first entry */ return crypto_register_shash(&arm_poly1305_algs[0]); return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shashes(arm_poly1305_algs, ARRAY_SIZE(arm_poly1305_algs)) : 0; } static void __exit arm_poly1305_mod_exit(void) { if (!IS_REACHABLE(CONFIG_CRYPTO_HASH)) return; if (!static_branch_likely(&have_neon)) { crypto_unregister_shash(&arm_poly1305_algs[0]); return; } crypto_unregister_shashes(arm_poly1305_algs, ARRAY_SIZE(arm_poly1305_algs)); } module_init(arm_poly1305_mod_init); module_exit(arm_poly1305_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("poly1305"); MODULE_ALIAS_CRYPTO("poly1305-arm"); MODULE_ALIAS_CRYPTO("poly1305-neon");
linux-master
arch/arm/crypto/poly1305-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <crypto/internal/blake2s.h> #include <linux/module.h> /* defined in blake2s-core.S */ EXPORT_SYMBOL(blake2s_compress);
linux-master
arch/arm/crypto/blake2s-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * BLAKE2b digest algorithm, NEON accelerated * * Copyright 2020 Google LLC */ #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/module.h> #include <linux/sizes.h> #include <asm/neon.h> #include <asm/simd.h> asmlinkage void blake2b_compress_neon(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc); static void blake2b_compress_arch(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc) { if (!crypto_simd_usable()) { blake2b_compress_generic(state, block, nblocks, inc); return; } do { const size_t blocks = min_t(size_t, nblocks, SZ_4K / BLAKE2B_BLOCK_SIZE); kernel_neon_begin(); blake2b_compress_neon(state, block, blocks, inc); kernel_neon_end(); nblocks -= blocks; block += blocks * BLAKE2B_BLOCK_SIZE; } while (nblocks); } static int crypto_blake2b_update_neon(struct shash_desc *desc, const u8 *in, unsigned int inlen) { return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch); } static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out) { return crypto_blake2b_final(desc, out, blake2b_compress_arch); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ { \ .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 200, \ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ .digestsize = digest_size, \ .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_neon, \ .final = crypto_blake2b_final_neon, \ .descsize = sizeof(struct blake2b_state), \ } static struct shash_alg blake2b_neon_algs[] = { BLAKE2B_ALG("blake2b-160", "blake2b-160-neon", BLAKE2B_160_HASH_SIZE), BLAKE2B_ALG("blake2b-256", "blake2b-256-neon", BLAKE2B_256_HASH_SIZE), BLAKE2B_ALG("blake2b-384", "blake2b-384-neon", BLAKE2B_384_HASH_SIZE), BLAKE2B_ALG("blake2b-512", "blake2b-512-neon", BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_neon_mod_init(void) { if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; return crypto_register_shashes(blake2b_neon_algs, ARRAY_SIZE(blake2b_neon_algs)); } static void __exit blake2b_neon_mod_exit(void) { crypto_unregister_shashes(blake2b_neon_algs, ARRAY_SIZE(blake2b_neon_algs)); } module_init(blake2b_neon_mod_init); module_exit(blake2b_neon_mod_exit); MODULE_DESCRIPTION("BLAKE2b digest algorithm, NEON accelerated"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Biggers <[email protected]>"); MODULE_ALIAS_CRYPTO("blake2b-160"); MODULE_ALIAS_CRYPTO("blake2b-160-neon"); MODULE_ALIAS_CRYPTO("blake2b-256"); MODULE_ALIAS_CRYPTO("blake2b-256-neon"); MODULE_ALIAS_CRYPTO("blake2b-384"); MODULE_ALIAS_CRYPTO("blake2b-384-neon"); MODULE_ALIAS_CRYPTO("blake2b-512"); MODULE_ALIAS_CRYPTO("blake2b-512-neon");
linux-master
arch/arm/crypto/blake2b-neon-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. */ #include <linux/clk.h> #include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/micrel_phy.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/sys_soc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/system_info.h> #include <asm/system_misc.h> #include "pm.h" /* MXS DIGCTL SAIF CLKMUX */ #define MXS_DIGCTL_SAIF_CLKMUX_DIRECT 0x0 #define MXS_DIGCTL_SAIF_CLKMUX_CROSSINPUT 0x1 #define MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0 0x2 #define MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR1 0x3 #define HW_DIGCTL_CHIPID 0x310 #define HW_DIGCTL_CHIPID_MASK (0xffff << 16) #define HW_DIGCTL_REV_MASK 0xff #define HW_DIGCTL_CHIPID_MX23 (0x3780 << 16) #define HW_DIGCTL_CHIPID_MX28 (0x2800 << 16) #define MXS_CHIP_REVISION_1_0 0x10 #define MXS_CHIP_REVISION_1_1 0x11 #define MXS_CHIP_REVISION_1_2 0x12 #define MXS_CHIP_REVISION_1_3 0x13 #define MXS_CHIP_REVISION_1_4 0x14 #define MXS_CHIP_REV_UNKNOWN 0xff #define MXS_GPIO_NR(bank, nr) ((bank) * 32 + (nr)) #define MXS_SET_ADDR 0x4 #define MXS_CLR_ADDR 0x8 #define MXS_TOG_ADDR 0xc #define HW_OCOTP_OPS2 19 /* offset 0x150 */ #define HW_OCOTP_OPS3 20 /* offset 0x160 */ static u32 chipid; static u32 socid; static void __iomem *reset_addr; static inline void __mxs_setl(u32 mask, void __iomem *reg) { __raw_writel(mask, reg + MXS_SET_ADDR); } static inline void __mxs_clrl(u32 mask, void __iomem *reg) { __raw_writel(mask, reg + MXS_CLR_ADDR); } static inline void __mxs_togl(u32 mask, void __iomem *reg) { __raw_writel(mask, reg + MXS_TOG_ADDR); } #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 #define BM_OCOTP_CTRL_BUSY (1 << 8) #define BM_OCOTP_CTRL_ERROR (1 << 9) #define BM_OCOTP_CTRL_RD_BANK_OPEN (1 << 12) static DEFINE_MUTEX(ocotp_mutex); static u32 ocotp_words[OCOTP_WORD_COUNT]; static const u32 *mxs_get_ocotp(void) { struct device_node *np; void __iomem *ocotp_base; int timeout = 0x400; size_t i; static int once; if (once) return ocotp_words; np = of_find_compatible_node(NULL, NULL, "fsl,ocotp"); ocotp_base = of_iomap(np, 0); WARN_ON(!ocotp_base); mutex_lock(&ocotp_mutex); /* * clk_enable(hbus_clk) for ocotp can be skipped * as it must be on when system is running. */ /* try to clear ERROR bit */ __mxs_clrl(BM_OCOTP_CTRL_ERROR, ocotp_base); /* check both BUSY and ERROR cleared */ while ((__raw_readl(ocotp_base) & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; /* open OCOTP banks for read */ __mxs_setl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); /* approximately wait 32 hclk cycles */ udelay(1); /* poll BUSY bit becoming cleared */ timeout = 0x400; while ((__raw_readl(ocotp_base) & BM_OCOTP_CTRL_BUSY) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; for (i = 0; i < OCOTP_WORD_COUNT; i++) ocotp_words[i] = __raw_readl(ocotp_base + OCOTP_WORD_OFFSET + i * 0x10); /* close banks for power saving */ __mxs_clrl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); once = 1; mutex_unlock(&ocotp_mutex); return ocotp_words; error_unlock: mutex_unlock(&ocotp_mutex); pr_err("%s: timeout in reading OCOTP\n", __func__); return NULL; } enum mac_oui { OUI_FSL, OUI_DENX, OUI_CRYSTALFONTZ, OUI_I2SE, OUI_ARMADEUS, }; static void __init update_fec_mac_prop(enum mac_oui oui) { struct device_node *np, *from = NULL; struct property *newmac; const u32 *ocotp = mxs_get_ocotp(); u8 *macaddr; u32 val; int i; for (i = 0; i < 2; i++) { np = of_find_compatible_node(from, NULL, "fsl,imx28-fec"); if (!np) return; from = np; if (of_property_present(np, "local-mac-address")) continue; newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL); if (!newmac) return; newmac->value = newmac + 1; newmac->length = 6; newmac->name = kstrdup("local-mac-address", GFP_KERNEL); if (!newmac->name) { kfree(newmac); return; } /* * OCOTP only stores the last 4 octets for each mac address, * so hard-code OUI here. */ macaddr = newmac->value; switch (oui) { case OUI_FSL: macaddr[0] = 0x00; macaddr[1] = 0x04; macaddr[2] = 0x9f; break; case OUI_DENX: macaddr[0] = 0xc0; macaddr[1] = 0xe5; macaddr[2] = 0x4e; break; case OUI_CRYSTALFONTZ: macaddr[0] = 0x58; macaddr[1] = 0xb9; macaddr[2] = 0xe1; break; case OUI_I2SE: macaddr[0] = 0x00; macaddr[1] = 0x01; macaddr[2] = 0x87; break; case OUI_ARMADEUS: macaddr[0] = 0x00; macaddr[1] = 0x1e; macaddr[2] = 0xac; break; } val = ocotp[i]; macaddr[3] = (val >> 16) & 0xff; macaddr[4] = (val >> 8) & 0xff; macaddr[5] = (val >> 0) & 0xff; of_update_property(np, newmac); } } static inline void enable_clk_enet_out(void) { struct clk *clk = clk_get_sys("enet_out", NULL); if (!IS_ERR(clk)) clk_prepare_enable(clk); } static void __init imx28_evk_init(void) { update_fec_mac_prop(OUI_FSL); mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } static void __init imx28_apf28_init(void) { update_fec_mac_prop(OUI_ARMADEUS); } static int apx4devkit_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MICREL_PHY_50MHZ_CLK; return 0; } static void __init apx4devkit_init(void) { enable_clk_enet_out(); if (IS_BUILTIN(CONFIG_PHYLIB)) phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK, apx4devkit_phy_fixup); } static void __init crystalfontz_init(void) { update_fec_mac_prop(OUI_CRYSTALFONTZ); } static void __init duckbill_init(void) { update_fec_mac_prop(OUI_I2SE); } static void __init m28cu3_init(void) { update_fec_mac_prop(OUI_DENX); } static const char __init *mxs_get_soc_id(void) { struct device_node *np; void __iomem *digctl_base; np = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl"); digctl_base = of_iomap(np, 0); WARN_ON(!digctl_base); chipid = readl(digctl_base + HW_DIGCTL_CHIPID); socid = chipid & HW_DIGCTL_CHIPID_MASK; iounmap(digctl_base); of_node_put(np); switch (socid) { case HW_DIGCTL_CHIPID_MX23: return "i.MX23"; case HW_DIGCTL_CHIPID_MX28: return "i.MX28"; default: return "Unknown"; } } static u32 __init mxs_get_cpu_rev(void) { u32 rev = chipid & HW_DIGCTL_REV_MASK; switch (socid) { case HW_DIGCTL_CHIPID_MX23: switch (rev) { case 0x0: return MXS_CHIP_REVISION_1_0; case 0x1: return MXS_CHIP_REVISION_1_1; case 0x2: return MXS_CHIP_REVISION_1_2; case 0x3: return MXS_CHIP_REVISION_1_3; case 0x4: return MXS_CHIP_REVISION_1_4; default: return MXS_CHIP_REV_UNKNOWN; } case HW_DIGCTL_CHIPID_MX28: switch (rev) { case 0x0: return MXS_CHIP_REVISION_1_1; case 0x1: return MXS_CHIP_REVISION_1_2; default: return MXS_CHIP_REV_UNKNOWN; } default: return MXS_CHIP_REV_UNKNOWN; } } static const char __init *mxs_get_revision(void) { u32 rev = mxs_get_cpu_rev(); if (rev != MXS_CHIP_REV_UNKNOWN) return kasprintf(GFP_KERNEL, "%d.%d", (rev >> 4) & 0xf, rev & 0xf); else return kasprintf(GFP_KERNEL, "%s", "Unknown"); } #define MX23_CLKCTRL_RESET_OFFSET 0x120 #define MX28_CLKCTRL_RESET_OFFSET 0x1e0 static int __init mxs_restart_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,clkctrl"); reset_addr = of_iomap(np, 0); if (!reset_addr) return -ENODEV; if (of_device_is_compatible(np, "fsl,imx23-clkctrl")) reset_addr += MX23_CLKCTRL_RESET_OFFSET; else reset_addr += MX28_CLKCTRL_RESET_OFFSET; of_node_put(np); return 0; } static void __init eukrea_mbmx283lc_init(void) { mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } static void __init mxs_machine_init(void) { struct device_node *root; struct device *parent; struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; u64 soc_uid = 0; const u32 *ocotp = mxs_get_ocotp(); int ret; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return; root = of_find_node_by_path("/"); ret = of_property_read_string(root, "model", &soc_dev_attr->machine); if (ret) { kfree(soc_dev_attr); return; } soc_dev_attr->family = "Freescale MXS Family"; soc_dev_attr->soc_id = mxs_get_soc_id(); soc_dev_attr->revision = mxs_get_revision(); if (socid == HW_DIGCTL_CHIPID_MX23) { soc_uid = system_serial_low = ocotp[HW_OCOTP_OPS3]; } else if (socid == HW_DIGCTL_CHIPID_MX28) { soc_uid = system_serial_high = ocotp[HW_OCOTP_OPS2]; soc_uid <<= 32; system_serial_low = ocotp[HW_OCOTP_OPS3]; soc_uid |= system_serial_low; } if (soc_uid) soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr->serial_number); kfree(soc_dev_attr->revision); kfree(soc_dev_attr); return; } parent = soc_device_to_device(soc_dev); if (of_machine_is_compatible("fsl,imx28-evk")) imx28_evk_init(); if (of_machine_is_compatible("armadeus,imx28-apf28")) imx28_apf28_init(); else if (of_machine_is_compatible("bluegiga,apx4devkit")) apx4devkit_init(); else if (of_machine_is_compatible("crystalfontz,cfa10036")) crystalfontz_init(); else if (of_machine_is_compatible("eukrea,mbmx283lc")) eukrea_mbmx283lc_init(); else if (of_machine_is_compatible("i2se,duckbill") || of_machine_is_compatible("i2se,duckbill-2")) duckbill_init(); else if (of_machine_is_compatible("msr,m28cu3")) m28cu3_init(); of_platform_default_populate(NULL, NULL, parent); mxs_restart_init(); } #define MXS_CLKCTRL_RESET_CHIP (1 << 1) /* * Reset the system. It is called by machine_restart(). */ static void mxs_restart(enum reboot_mode mode, const char *cmd) { if (reset_addr) { /* reset the chip */ __mxs_setl(MXS_CLKCTRL_RESET_CHIP, reset_addr); pr_err("Failed to assert the chip reset\n"); /* Delay to allow the serial port to show the message */ mdelay(50); } /* We'll take a jump through zero as a poor second */ soft_restart(0); } static const char *const mxs_dt_compat[] __initconst = { "fsl,imx28", "fsl,imx23", NULL, }; DT_MACHINE_START(MXS, "Freescale MXS (Device Tree)") .init_machine = mxs_machine_init, .init_late = mxs_pm_init, .dt_compat = mxs_dt_compat, .restart = mxs_restart, MACHINE_END
linux-master
arch/arm/mach-mxs/mach-mxs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010 Freescale Semiconductor, Inc. */ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/io.h> #include "pm.h" static int mxs_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: cpu_do_idle(); break; default: return -EINVAL; } return 0; } static const struct platform_suspend_ops mxs_suspend_ops = { .enter = mxs_suspend_enter, .valid = suspend_valid_only_mem, }; void __init mxs_pm_init(void) { suspend_set_ops(&mxs_suspend_ops); }
linux-master
arch/arm/mach-mxs/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * * OMAP SRAM detection and management * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren <[email protected]> * * Copyright (C) 2009-2012 Texas Instruments * Added OMAP4/5 support - Santosh Shilimkar <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/set_memory.h> #include <asm/fncpy.h> #include <asm/tlb.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include "soc.h" #include "iomap.h" #include "prm2xxx_3xxx.h" #include "sdrc.h" #include "sram.h" #define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800) #define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000) #define SRAM_BOOTLOADER_SZ 0x00 #define OMAP24XX_VA_REQINFOPERM0 OMAP2_L3_IO_ADDRESS(0x68005048) #define OMAP24XX_VA_READPERM0 OMAP2_L3_IO_ADDRESS(0x68005050) #define OMAP24XX_VA_WRITEPERM0 OMAP2_L3_IO_ADDRESS(0x68005058) #define OMAP34XX_VA_REQINFOPERM0 OMAP2_L3_IO_ADDRESS(0x68012848) #define OMAP34XX_VA_READPERM0 OMAP2_L3_IO_ADDRESS(0x68012850) #define OMAP34XX_VA_WRITEPERM0 OMAP2_L3_IO_ADDRESS(0x68012858) #define OMAP34XX_VA_ADDR_MATCH2 OMAP2_L3_IO_ADDRESS(0x68012880) #define OMAP34XX_VA_SMS_RG_ATT0 OMAP2_L3_IO_ADDRESS(0x6C000048) #define GP_DEVICE 0x300 #define ROUND_DOWN(value, boundary) ((value) & (~((boundary) - 1))) static unsigned long omap_sram_start; static unsigned long omap_sram_size; static void __iomem *omap_sram_base; static unsigned long omap_sram_skip; static void __iomem *omap_sram_ceil; /* * Memory allocator for SRAM: calculates the new ceiling address * for pushing a function using the fncpy API. * * Note that fncpy requires the returned address to be aligned * to an 8-byte boundary. */ static void *omap_sram_push_address(unsigned long size) { unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; available = omap_sram_ceil - (omap_sram_base + omap_sram_skip); if (size > available) { pr_err("Not enough space in SRAM\n"); return NULL; } new_ceil -= size; new_ceil = ROUND_DOWN(new_ceil, FNCPY_ALIGN); omap_sram_ceil = IOMEM(new_ceil); return (void __force *)omap_sram_ceil; } void *omap_sram_push(void *funcp, unsigned long size) { void *sram; unsigned long base; int pages; void *dst = NULL; sram = omap_sram_push_address(size); if (!sram) return NULL; base = (unsigned long)sram & PAGE_MASK; pages = PAGE_ALIGN(size) / PAGE_SIZE; set_memory_rw(base, pages); dst = fncpy(sram, funcp, size); set_memory_rox(base, pages); return dst; } /* * The SRAM context is lost during off-idle and stack * needs to be reset. */ static void omap_sram_reset(void) { omap_sram_ceil = omap_sram_base + omap_sram_size; } /* * Depending on the target RAMFS firewall setup, the public usable amount of * SRAM varies. The default accessible size for all device types is 2k. A GP * device allows ARM11 but not other initiators for full size. This * functionality seems ok until some nice security API happens. */ static int is_sram_locked(void) { if (omap_type() == OMAP2_DEVICE_TYPE_GP) { /* RAMFW: R/W access to all initiators for all qualifier sets */ if (cpu_is_omap242x()) { writel_relaxed(0xFF, OMAP24XX_VA_REQINFOPERM0); /* all q-vects */ writel_relaxed(0xCFDE, OMAP24XX_VA_READPERM0); /* all i-read */ writel_relaxed(0xCFDE, OMAP24XX_VA_WRITEPERM0); /* all i-write */ } if (cpu_is_omap34xx()) { writel_relaxed(0xFFFF, OMAP34XX_VA_REQINFOPERM0); /* all q-vects */ writel_relaxed(0xFFFF, OMAP34XX_VA_READPERM0); /* all i-read */ writel_relaxed(0xFFFF, OMAP34XX_VA_WRITEPERM0); /* all i-write */ writel_relaxed(0x0, OMAP34XX_VA_ADDR_MATCH2); writel_relaxed(0xFFFFFFFF, OMAP34XX_VA_SMS_RG_ATT0); } return 0; } else return 1; /* assume locked with no PPA or security driver */ } /* * The amount of SRAM depends on the core type. * Note that we cannot try to test for SRAM here because writes * to secure SRAM will hang the system. Also the SRAM is not * yet mapped at this point. */ static void __init omap_detect_sram(void) { omap_sram_skip = SRAM_BOOTLOADER_SZ; if (is_sram_locked()) { if (cpu_is_omap34xx()) { omap_sram_start = OMAP3_SRAM_PUB_PA; if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { omap_sram_size = 0x7000; /* 28K */ omap_sram_skip += SZ_16K; } else { omap_sram_size = 0x8000; /* 32K */ } } else { omap_sram_start = OMAP2_SRAM_PUB_PA; omap_sram_size = 0x800; /* 2K */ } } else { if (cpu_is_omap34xx()) { omap_sram_start = OMAP3_SRAM_PA; omap_sram_size = 0x10000; /* 64K */ } else { omap_sram_start = OMAP2_SRAM_PA; if (cpu_is_omap242x()) omap_sram_size = 0xa0000; /* 640K */ else if (cpu_is_omap243x()) omap_sram_size = 0x10000; /* 64K */ } } } /* * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early. */ static void __init omap2_map_sram(void) { unsigned long base; int pages; int cached = 1; if (cpu_is_omap34xx()) { /* * SRAM must be marked as non-cached on OMAP3 since the * CORE DPLL M2 divider change code (in SRAM) runs with the * SDRAM controller disabled, and if it is marked cached, * the ARM may attempt to write cache lines back to SDRAM * which will cause the system to hang. */ cached = 0; } if (omap_sram_size == 0) return; omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE); omap_sram_base = __arm_ioremap_exec(omap_sram_start, omap_sram_size, cached); if (!omap_sram_base) { pr_err("SRAM: Could not map\n"); return; } omap_sram_reset(); /* * Looks like we need to preserve some bootloader code at the * beginning of SRAM for jumping to flash for reboot to work... */ memset_io(omap_sram_base + omap_sram_skip, 0, omap_sram_size - omap_sram_skip); base = (unsigned long)omap_sram_base; pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; set_memory_rox(base, pages); } static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl, u32 base_cs, u32 force_unlock); void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl, u32 base_cs, u32 force_unlock) { BUG_ON(!_omap2_sram_ddr_init); _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl, base_cs, force_unlock); } static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val, u32 mem_type); void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type) { BUG_ON(!_omap2_sram_reprogram_sdrc); _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type); } static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass) { BUG_ON(!_omap2_set_prcm); return _omap2_set_prcm(dpll_ctrl_val, sdrc_rfr_val, bypass); } #ifdef CONFIG_SOC_OMAP2420 static int __init omap242x_sram_init(void) { _omap2_sram_ddr_init = omap_sram_push(omap242x_sram_ddr_init, omap242x_sram_ddr_init_sz); _omap2_sram_reprogram_sdrc = omap_sram_push(omap242x_sram_reprogram_sdrc, omap242x_sram_reprogram_sdrc_sz); _omap2_set_prcm = omap_sram_push(omap242x_sram_set_prcm, omap242x_sram_set_prcm_sz); return 0; } #else static inline int omap242x_sram_init(void) { return 0; } #endif #ifdef CONFIG_SOC_OMAP2430 static int __init omap243x_sram_init(void) { _omap2_sram_ddr_init = omap_sram_push(omap243x_sram_ddr_init, omap243x_sram_ddr_init_sz); _omap2_sram_reprogram_sdrc = omap_sram_push(omap243x_sram_reprogram_sdrc, omap243x_sram_reprogram_sdrc_sz); _omap2_set_prcm = omap_sram_push(omap243x_sram_set_prcm, omap243x_sram_set_prcm_sz); return 0; } #else static inline int omap243x_sram_init(void) { return 0; } #endif #ifdef CONFIG_ARCH_OMAP3 void omap3_sram_restore_context(void) { omap_sram_reset(); omap_push_sram_idle(); } static inline int omap34xx_sram_init(void) { omap3_sram_restore_context(); return 0; } #else static inline int omap34xx_sram_init(void) { return 0; } #endif /* CONFIG_ARCH_OMAP3 */ int __init omap_sram_init(void) { omap_detect_sram(); omap2_map_sram(); if (cpu_is_omap242x()) omap242x_sram_init(); else if (cpu_is_omap2430()) omap243x_sram_init(); else if (cpu_is_omap34xx()) omap34xx_sram_init(); return 0; }
linux-master
arch/arm/mach-omap2/sram.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 OPP table definitions. * * Copyright (C) 2010-2012 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * Kevin Hilman * Thara Gopinath * Copyright (C) 2010-2011 Nokia Corporation. * Eduardo Valentin * Paul Walmsley */ #include <linux/module.h> #include "soc.h" #include "control.h" #include "omap_opp_data.h" #include "pm.h" /* * Structures containing OMAP4430 voltage supported and various * voltage dependent data for each VDD. */ #define OMAP4430_VDD_MPU_OPP50_UV 1025000 #define OMAP4430_VDD_MPU_OPP100_UV 1200000 #define OMAP4430_VDD_MPU_OPPTURBO_UV 1325000 #define OMAP4430_VDD_MPU_OPPNITRO_UV 1388000 #define OMAP4430_VDD_MPU_OPPNITROSB_UV 1398000 struct omap_volt_data omap443x_vdd_mpu_volt_data[] = { VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23), VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27), VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPNITROSB_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITROSB, 0xfa, 0x27), VOLT_DATA_DEFINE(0, 0, 0, 0), }; #define OMAP4430_VDD_IVA_OPP50_UV 950000 #define OMAP4430_VDD_IVA_OPP100_UV 1114000 #define OMAP4430_VDD_IVA_OPPTURBO_UV 1291000 struct omap_volt_data omap443x_vdd_iva_volt_data[] = { VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23), VOLT_DATA_DEFINE(0, 0, 0, 0), }; #define OMAP4430_VDD_CORE_OPP50_UV 962000 #define OMAP4430_VDD_CORE_OPP100_UV 1127000 struct omap_volt_data omap443x_vdd_core_volt_data[] = { VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(0, 0, 0, 0), }; #define OMAP4460_VDD_MPU_OPP50_UV 1025000 #define OMAP4460_VDD_MPU_OPP100_UV 1200000 #define OMAP4460_VDD_MPU_OPPTURBO_UV 1313000 #define OMAP4460_VDD_MPU_OPPNITRO_UV 1375000 struct omap_volt_data omap446x_vdd_mpu_volt_data[] = { VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23), VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27), VOLT_DATA_DEFINE(0, 0, 0, 0), }; #define OMAP4460_VDD_IVA_OPP50_UV 1025000 #define OMAP4460_VDD_IVA_OPP100_UV 1200000 #define OMAP4460_VDD_IVA_OPPTURBO_UV 1313000 #define OMAP4460_VDD_IVA_OPPNITRO_UV 1375000 struct omap_volt_data omap446x_vdd_iva_volt_data[] = { VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23), VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPNITRO, 0xfa, 0x23), VOLT_DATA_DEFINE(0, 0, 0, 0), }; #define OMAP4460_VDD_CORE_OPP50_UV 1025000 #define OMAP4460_VDD_CORE_OPP100_UV 1200000 #define OMAP4460_VDD_CORE_OPP100_OV_UV 1250000 struct omap_volt_data omap446x_vdd_core_volt_data[] = { VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_OV_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100OV, 0xf9, 0x16), VOLT_DATA_DEFINE(0, 0, 0, 0), };
linux-master
arch/arm/mach-omap2/opp4xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * SDRAM timing related functions for OMAP2xxx * * Copyright (C) 2005, 2008 Texas Instruments Inc. * Copyright (C) 2005, 2008 Nokia Corporation * * Tony Lindgren <[email protected]> * Paul Walmsley * Richard Woodruff <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "prm2xxx.h" #include "clock.h" #include "sdrc.h" #include "sram.h" /* Memory timing, DLL mode flags */ #define M_DDR 1 #define M_LOCK_CTRL (1 << 2) #define M_UNLOCK 0 #define M_LOCK 1 static struct memory_timings mem_timings; static u32 curr_perf_level = CORE_CLK_SRC_DPLL_X2; static u32 omap2xxx_sdrc_get_slow_dll_ctrl(void) { return mem_timings.slow_dll_ctrl; } static u32 omap2xxx_sdrc_get_fast_dll_ctrl(void) { return mem_timings.fast_dll_ctrl; } static u32 omap2xxx_sdrc_get_type(void) { return mem_timings.m_type; } /* * Check the DLL lock state, and return tue if running in unlock mode. * This is needed to compensate for the shifted DLL value in unlock mode. */ u32 omap2xxx_sdrc_dll_is_unlocked(void) { /* dlla and dllb are a set */ u32 dll_state = sdrc_read_reg(SDRC_DLLA_CTRL); if ((dll_state & (1 << 2)) == (1 << 2)) return 1; else return 0; } /* * 'level' is the value to store to CM_CLKSEL2_PLL.CORE_CLK_SRC. * Practical values are CORE_CLK_SRC_DPLL (for CORE_CLK = DPLL_CLK) or * CORE_CLK_SRC_DPLL_X2 (for CORE_CLK = * DPLL_CLK * 2) * * Used by the clock framework during CORE DPLL changes */ u32 omap2xxx_sdrc_reprogram(u32 level, u32 force) { u32 dll_ctrl, m_type; u32 prev = curr_perf_level; unsigned long flags; if ((curr_perf_level == level) && !force) return prev; if (level == CORE_CLK_SRC_DPLL) dll_ctrl = omap2xxx_sdrc_get_slow_dll_ctrl(); else if (level == CORE_CLK_SRC_DPLL_X2) dll_ctrl = omap2xxx_sdrc_get_fast_dll_ctrl(); else return prev; m_type = omap2xxx_sdrc_get_type(); local_irq_save(flags); /* * XXX These calls should be abstracted out through a * prm2xxx.c function */ if (cpu_is_omap2420()) writel_relaxed(0xffff, OMAP2420_PRCM_VOLTSETUP); else writel_relaxed(0xffff, OMAP2430_PRCM_VOLTSETUP); omap2_sram_reprogram_sdrc(level, dll_ctrl, m_type); curr_perf_level = level; local_irq_restore(flags); return prev; } /* Used by the clock framework during CORE DPLL changes */ void omap2xxx_sdrc_init_params(u32 force_lock_to_unlock_mode) { unsigned long dll_cnt; u32 fast_dll = 0; /* DDR = 1, SDR = 0 */ mem_timings.m_type = !((sdrc_read_reg(SDRC_MR_0) & 0x3) == 0x1); /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others. * In the case of 2422, its ok to use CS1 instead of CS0. */ if (cpu_is_omap2422()) mem_timings.base_cs = 1; else mem_timings.base_cs = 0; if (mem_timings.m_type != M_DDR) return; /* With DDR we need to determine the low frequency DLL value */ if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL)) mem_timings.dll_mode = M_UNLOCK; else mem_timings.dll_mode = M_LOCK; if (mem_timings.base_cs == 0) { fast_dll = sdrc_read_reg(SDRC_DLLA_CTRL); dll_cnt = sdrc_read_reg(SDRC_DLLA_STATUS) & 0xff00; } else { fast_dll = sdrc_read_reg(SDRC_DLLB_CTRL); dll_cnt = sdrc_read_reg(SDRC_DLLB_STATUS) & 0xff00; } if (force_lock_to_unlock_mode) { fast_dll &= ~0xff00; fast_dll |= dll_cnt; /* Current lock mode */ } /* set fast timings with DLL filter disabled */ mem_timings.fast_dll_ctrl = (fast_dll | (3 << 8)); /* No disruptions, DDR will be offline & C-ABI not followed */ omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl, mem_timings.fast_dll_ctrl, mem_timings.base_cs, force_lock_to_unlock_mode); mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */ /* Turn status into unlock ctrl */ mem_timings.slow_dll_ctrl |= ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2)); /* 90 degree phase for anything below 133MHz + disable DLL filter */ mem_timings.slow_dll_ctrl |= ((1 << 1) | (3 << 8)); }
linux-master
arch/arm/mach-omap2/sdrc2xxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP WakeupGen Source file * * OMAP WakeupGen is the interrupt controller extension used along * with ARM GIC to wake the CPU out from low power states on * external interrupts. It is responsible for generating wakeup * event from the incoming interrupts and enable bits. It is * implemented in MPU always ON power domain. During normal operation, * WakeupGen delivers external interrupts directly to the GIC. * * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/cpu_pm.h> #include "omap-wakeupgen.h" #include "omap-secure.h" #include "soc.h" #include "omap4-sar-layout.h" #include "common.h" #include "pm.h" #define AM43XX_NR_REG_BANKS 7 #define AM43XX_IRQS 224 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS #define MAX_IRQS AM43XX_IRQS #define DEFAULT_NR_REG_BANKS 5 #define DEFAULT_IRQS 160 #define WKG_MASK_ALL 0x00000000 #define WKG_UNMASK_ALL 0xffffffff #define CPU_ENA_OFFSET 0x400 #define CPU0_ID 0x0 #define CPU1_ID 0x1 #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = DEFAULT_NR_REG_BANKS; static unsigned int max_irqs = DEFAULT_IRQS; static unsigned int omap_secure_apis; #ifdef CONFIG_CPU_PM static unsigned int wakeupgen_context[MAX_NR_REG_BANKS]; #endif struct omap_wakeupgen_ops { void (*save_context)(void); void (*restore_context)(void); }; static struct omap_wakeupgen_ops *wakeupgen_ops; /* * Static helper functions. */ static inline u32 wakeupgen_readl(u8 idx, u32 cpu) { return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); } static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) { writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); } static inline void sar_writel(u32 val, u32 offset, u8 idx) { writel_relaxed(val, sar_base + offset + (idx * 4)); } static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) { /* * Each WakeupGen register controls 32 interrupt. * i.e. 1 bit per SPI IRQ */ *reg_index = irq >> 5; *bit_posn = irq %= 32; return 0; } static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) { u32 val, bit_number; u8 i; if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) return; val = wakeupgen_readl(i, cpu); val &= ~BIT(bit_number); wakeupgen_writel(val, i, cpu); } static void _wakeupgen_set(unsigned int irq, unsigned int cpu) { u32 val, bit_number; u8 i; if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) return; val = wakeupgen_readl(i, cpu); val |= BIT(bit_number); wakeupgen_writel(val, i, cpu); } /* * Architecture specific Mask extension */ static void wakeupgen_mask(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]); raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); irq_chip_mask_parent(d); } /* * Architecture specific Unmask extension */ static void wakeupgen_unmask(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]); raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); irq_chip_unmask_parent(d); } /* * The sys_nirq pins bypass peripheral modules and are wired directly * to MPUSS wakeupgen. They get automatically inverted for GIC. */ static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) { bool inverted = false; switch (type) { case IRQ_TYPE_LEVEL_LOW: type &= ~IRQ_TYPE_LEVEL_MASK; type |= IRQ_TYPE_LEVEL_HIGH; inverted = true; break; case IRQ_TYPE_EDGE_FALLING: type &= ~IRQ_TYPE_EDGE_BOTH; type |= IRQ_TYPE_EDGE_RISING; inverted = true; break; default: break; } if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) pr_warn("wakeupgen: irq%li polarity inverted in dts\n", d->hwirq); return irq_chip_set_type_parent(d, type); } #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); static void _wakeupgen_save_masks(unsigned int cpu) { u8 i; for (i = 0; i < irq_banks; i++) per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); } static void _wakeupgen_restore_masks(unsigned int cpu) { u8 i; for (i = 0; i < irq_banks; i++) wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); } static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) { u8 i; for (i = 0; i < irq_banks; i++) wakeupgen_writel(reg, i, cpu); } /* * Mask or unmask all interrupts on given CPU. * 0 = Mask all interrupts on the 'cpu' * 1 = Unmask all interrupts on the 'cpu' * Ensure that the initial mask is maintained. This is faster than * iterating through GIC registers to arrive at the correct masks. */ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); if (set) { _wakeupgen_save_masks(cpu); _wakeupgen_set_all(cpu, WKG_MASK_ALL); } else { _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); _wakeupgen_restore_masks(cpu); } raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); } #endif #ifdef CONFIG_CPU_PM static inline void omap4_irq_save_context(void) { u32 i, val; if (omap_rev() == OMAP4430_REV_ES1_0) return; for (i = 0; i < irq_banks; i++) { /* Save the CPUx interrupt mask for IRQ 0 to 127 */ val = wakeupgen_readl(i, 0); sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); val = wakeupgen_readl(i, 1); sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); /* * Disable the secure interrupts for CPUx. The restore * code blindly restores secure and non-secure interrupt * masks from SAR RAM. Secure interrupts are not suppose * to be enabled from HLOS. So overwrite the SAR location * so that the secure interrupt remains disabled. */ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); } /* Save AuxBoot* registers */ val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET); val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1); writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET); /* Save SyncReq generation logic */ val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET); val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN); writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET); /* Set the Backup Bit Mask status */ val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET); val |= SAR_BACKUP_STATUS_WAKEUPGEN; writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET); } static inline void omap5_irq_save_context(void) { u32 i, val; for (i = 0; i < irq_banks; i++) { /* Save the CPUx interrupt mask for IRQ 0 to 159 */ val = wakeupgen_readl(i, 0); sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i); val = wakeupgen_readl(i, 1); sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i); sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i); sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i); } /* Save AuxBoot* registers */ val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET); val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET); /* Set the Backup Bit Mask status */ val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); val |= SAR_BACKUP_STATUS_WAKEUPGEN; writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); } static inline void am43xx_irq_save_context(void) { u32 i; for (i = 0; i < irq_banks; i++) { wakeupgen_context[i] = wakeupgen_readl(i, 0); wakeupgen_writel(0, i, CPU0_ID); } } /* * Save WakeupGen interrupt context in SAR BANK3. Restore is done by * ROM code. WakeupGen IP is integrated along with GIC to manage the * interrupt wakeups from CPU low power states. It manages * masking/unmasking of Shared peripheral interrupts(SPI). So the * interrupt enable/disable control should be in sync and consistent * at WakeupGen and GIC so that interrupts are not lost. */ static void irq_save_context(void) { /* DRA7 has no SAR to save */ if (soc_is_dra7xx()) return; if (wakeupgen_ops && wakeupgen_ops->save_context) wakeupgen_ops->save_context(); } /* * Clear WakeupGen SAR backup status. */ static void irq_sar_clear(void) { u32 val; u32 offset = SAR_BACKUP_STATUS_OFFSET; /* DRA7 has no SAR to save */ if (soc_is_dra7xx()) return; if (soc_is_omap54xx()) offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; val = readl_relaxed(sar_base + offset); val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; writel_relaxed(val, sar_base + offset); } static void am43xx_irq_restore_context(void) { u32 i; for (i = 0; i < irq_banks; i++) wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID); } static void irq_restore_context(void) { if (wakeupgen_ops && wakeupgen_ops->restore_context) wakeupgen_ops->restore_context(); } /* * Save GIC and Wakeupgen interrupt context using secure API * for HS/EMU devices. */ static void irq_save_secure_context(void) { u32 ret; ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); if (ret != API_HAL_RET_VALUE_OK) pr_err("GIC and Wakeupgen context save failed\n"); } /* Define ops for context save and restore for each SoC */ static struct omap_wakeupgen_ops omap4_wakeupgen_ops = { .save_context = omap4_irq_save_context, .restore_context = irq_sar_clear, }; static struct omap_wakeupgen_ops omap5_wakeupgen_ops = { .save_context = omap5_irq_save_context, .restore_context = irq_sar_clear, }; static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = { .save_context = am43xx_irq_save_context, .restore_context = am43xx_irq_restore_context, }; #else static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {}; static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {}; static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {}; #endif #ifdef CONFIG_HOTPLUG_CPU static int omap_wakeupgen_cpu_online(unsigned int cpu) { wakeupgen_irqmask_all(cpu, 0); return 0; } static int omap_wakeupgen_cpu_dead(unsigned int cpu) { wakeupgen_irqmask_all(cpu, 1); return 0; } static void __init irq_hotplug_init(void) { cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online", omap_wakeupgen_cpu_online, NULL); cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD, "arm/omap-wake:dead", NULL, omap_wakeupgen_cpu_dead); } #else static void __init irq_hotplug_init(void) {} #endif #ifdef CONFIG_CPU_PM static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx()) irq_save_context(); else irq_save_secure_context(); break; case CPU_CLUSTER_PM_EXIT: if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx()) irq_restore_context(); break; } return NOTIFY_OK; } static struct notifier_block irq_notifier_block = { .notifier_call = irq_notifier, }; static void __init irq_pm_init(void) { /* FIXME: Remove this when MPU OSWR support is added */ if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) cpu_pm_register_notifier(&irq_notifier_block); } #else static void __init irq_pm_init(void) {} #endif void __iomem *omap_get_wakeupgen_base(void) { return wakeupgen_base; } int omap_secure_apis_support(void) { return omap_secure_apis; } static struct irq_chip wakeupgen_chip = { .name = "WUGEN", .irq_eoi = irq_chip_eoi_parent, .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = wakeupgen_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif }; static int wakeupgen_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count != 3) return -EINVAL; /* No PPI should point to this domain */ if (fwspec->param[0] != 0) return -EINVAL; *hwirq = fwspec->param[1]; *type = fwspec->param[2]; return 0; } return -EINVAL; } static int wakeupgen_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; irq_hw_number_t hwirq; int i; if (fwspec->param_count != 3) return -EINVAL; /* Not GIC compliant */ if (fwspec->param[0] != 0) return -EINVAL; /* No PPI should point to this domain */ hwirq = fwspec->param[1]; if (hwirq >= MAX_IRQS) return -EINVAL; /* Can't deal with this */ for (i = 0; i < nr_irqs; i++) irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &wakeupgen_chip, NULL); parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops wakeupgen_domain_ops = { .translate = wakeupgen_domain_translate, .alloc = wakeupgen_domain_alloc, .free = irq_domain_free_irqs_common, }; /* * Initialise the wakeupgen module. */ static int __init wakeupgen_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; int i; unsigned int boot_cpu = smp_processor_id(); u32 val; if (!parent) { pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } /* Not supported on OMAP4 ES1.0 silicon */ if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); return -EPERM; } /* Static mapping, never released */ wakeupgen_base = of_iomap(node, 0); if (WARN_ON(!wakeupgen_base)) return -ENOMEM; if (cpu_is_omap44xx()) { irq_banks = OMAP4_NR_BANKS; max_irqs = OMAP4_NR_IRQS; omap_secure_apis = 1; wakeupgen_ops = &omap4_wakeupgen_ops; } else if (soc_is_omap54xx()) { wakeupgen_ops = &omap5_wakeupgen_ops; } else if (soc_is_am43xx()) { irq_banks = AM43XX_NR_REG_BANKS; max_irqs = AM43XX_IRQS; wakeupgen_ops = &am43xx_wakeupgen_ops; } domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, node, &wakeupgen_domain_ops, NULL); if (!domain) { iounmap(wakeupgen_base); return -ENOMEM; } /* Clear all IRQ bitmasks at wakeupGen level */ for (i = 0; i < irq_banks; i++) { wakeupgen_writel(0, i, CPU0_ID); if (!soc_is_am43xx()) wakeupgen_writel(0, i, CPU1_ID); } /* * FIXME: Add support to set_smp_affinity() once the core * GIC code has necessary hooks in place. */ /* Associate all the IRQs to boot CPU like GIC init does. */ for (i = 0; i < max_irqs; i++) irq_target_cpu[i] = boot_cpu; /* * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together. * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode * independently. * This needs to be set one time thanks to always ON domain. * * We do not support ES1 behavior anymore. OMAP5 is assumed to be * ES2.0, and the same is applicable for DRA7. */ if (soc_is_omap54xx() || soc_is_dra7xx()) { val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE); val |= BIT(5); omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val); } irq_hotplug_init(); irq_pm_init(); sar_base = omap4_get_sar_ram_base(); return 0; } IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
linux-master
arch/arm/mach-omap2/omap-wakeupgen.c
// SPDX-License-Identifier: GPL-2.0-only /* * Common powerdomain framework functions * * Copyright (C) 2010-2011 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * * Derived from mach-omap2/powerdomain.c written by Paul Walmsley */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/bug.h> #include "pm.h" #include "cm.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" /* * OMAP3 and OMAP4 specific register bit initialisations * Notice that the names here are not according to each power * domain but the bit mapping used applies to all of them */ /* OMAP3 and OMAP4 Memory Onstate Masks (common across all power domains) */ #define OMAP_MEM0_ONSTATE_MASK OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK #define OMAP_MEM1_ONSTATE_MASK OMAP3430_L1FLATMEMONSTATE_MASK #define OMAP_MEM2_ONSTATE_MASK OMAP3430_SHAREDL2CACHEFLATONSTATE_MASK #define OMAP_MEM3_ONSTATE_MASK OMAP3430_L2FLATMEMONSTATE_MASK #define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK /* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */ #define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK #define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK #define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK #define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK #define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK /* OMAP3 and OMAP4 Memory Status bits */ #define OMAP_MEM0_STATEST_MASK OMAP3430_SHAREDL1CACHEFLATSTATEST_MASK #define OMAP_MEM1_STATEST_MASK OMAP3430_L1FLATMEMSTATEST_MASK #define OMAP_MEM2_STATEST_MASK OMAP3430_SHAREDL2CACHEFLATSTATEST_MASK #define OMAP_MEM3_STATEST_MASK OMAP3430_L2FLATMEMSTATEST_MASK #define OMAP_MEM4_STATEST_MASK OMAP4430_OCP_NRET_BANK_STATEST_MASK /* Common Internal functions used across OMAP rev's*/ u32 omap2_pwrdm_get_mem_bank_onstate_mask(u8 bank) { switch (bank) { case 0: return OMAP_MEM0_ONSTATE_MASK; case 1: return OMAP_MEM1_ONSTATE_MASK; case 2: return OMAP_MEM2_ONSTATE_MASK; case 3: return OMAP_MEM3_ONSTATE_MASK; case 4: return OMAP_MEM4_ONSTATE_MASK; default: WARN_ON(1); /* should never happen */ return -EEXIST; } return 0; } u32 omap2_pwrdm_get_mem_bank_retst_mask(u8 bank) { switch (bank) { case 0: return OMAP_MEM0_RETSTATE_MASK; case 1: return OMAP_MEM1_RETSTATE_MASK; case 2: return OMAP_MEM2_RETSTATE_MASK; case 3: return OMAP_MEM3_RETSTATE_MASK; case 4: return OMAP_MEM4_RETSTATE_MASK; default: WARN_ON(1); /* should never happen */ return -EEXIST; } return 0; } u32 omap2_pwrdm_get_mem_bank_stst_mask(u8 bank) { switch (bank) { case 0: return OMAP_MEM0_STATEST_MASK; case 1: return OMAP_MEM1_STATEST_MASK; case 2: return OMAP_MEM2_STATEST_MASK; case 3: return OMAP_MEM3_STATEST_MASK; case 4: return OMAP_MEM4_STATEST_MASK; default: WARN_ON(1); /* should never happen */ return -EEXIST; } return 0; }
linux-master
arch/arm/mach-omap2/powerdomain-common.c
// SPDX-License-Identifier: GPL-2.0 /* * OMAP2/3 clockdomain common data * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup/sleep * dependencies for the OMAP2/3 chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs or sleepdep_srcs array must have a * dep_bit assigned. So wkdep_srcs/sleepdep_srcs are really just * software-controllable dependencies. Non-software-controllable * dependencies do exist, but they are not encoded below (yet). * * 24xx does not support programmable sleep dependencies (SLEEPDEP) * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "cm-regbits-34xx.h" #include "cm-regbits-44xx.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" /* * Clockdomain dependencies for wkdeps/sleepdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* Wakeup dependency source arrays */ /* 2xxx-specific possible dependencies */ /* 2xxx PM_WKDEP_GFX: CORE, MPU, WKUP */ struct clkdm_dep gfx_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 2xxx PM_WKDEP_DSP: CORE, MPU, WKUP */ struct clkdm_dep dsp_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* * OMAP2/3-common clockdomains * * Even though the 2420 has a single PRCM module from the * interconnect's perspective, internally it does appear to have * separate PRM and CM clockdomains. The usual test case is * sys_clkout/sys_clkout2. */ /* This is an implicit clockdomain - it is never defined as such in TRM */ struct clockdomain wkup_common_clkdm = { .name = "wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .dep_bit = OMAP_EN_WKUP_SHIFT, .flags = CLKDM_ACTIVE_WITH_MPU, };
linux-master
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/common.c * * Code common to all OMAP2+ machines. * * Copyright (C) 2009 Texas Instruments * Copyright (C) 2010 Nokia Corporation * Tony Lindgren <[email protected]> * Added OMAP4 support - Santosh Shilimkar <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include "common.h" #include "omap-secure.h" /* * Stub function for OMAP2 so that common files * continue to build when custom builds are used */ int __weak omap_secure_ram_reserve_memblock(void) { return 0; } void __init omap_reserve(void) { omap_secure_ram_reserve_memblock(); omap_barrier_reserve_memblock(); }
linux-master
arch/arm/mach-omap2/common.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP54XX Power domains framework * * Copyright (C) 2013 Texas Instruments, Inc. * * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "prcm-common.h" #include "prcm44xx.h" #include "prm54xx.h" #include "prcm_mpu54xx.h" /* core_54xx_pwrdm: CORE power domain */ static struct powerdomain core_54xx_pwrdm = { .name = "core_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_CORE_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 5, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* core_nret_bank */ [1] = PWRSTS_OFF_RET, /* core_ocmram */ [2] = PWRSTS_OFF_RET, /* core_other_bank */ [3] = PWRSTS_OFF_RET, /* ipu_l2ram */ [4] = PWRSTS_OFF_RET, /* ipu_unicache */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* core_nret_bank */ [1] = PWRSTS_OFF_RET, /* core_ocmram */ [2] = PWRSTS_OFF_RET, /* core_other_bank */ [3] = PWRSTS_OFF_RET, /* ipu_l2ram */ [4] = PWRSTS_OFF_RET, /* ipu_unicache */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* abe_54xx_pwrdm: Audio back end power domain */ static struct powerdomain abe_54xx_pwrdm = { .name = "abe_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_ABE_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* aessmem */ [1] = PWRSTS_OFF_RET, /* periphmem */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* aessmem */ [1] = PWRSTS_OFF_RET, /* periphmem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* coreaon_54xx_pwrdm: Always ON logic that sits in VDD_CORE voltage domain */ static struct powerdomain coreaon_54xx_pwrdm = { .name = "coreaon_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_COREAON_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; /* dss_54xx_pwrdm: Display subsystem power domain */ static struct powerdomain dss_54xx_pwrdm = { .name = "dss_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_DSS_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* dss_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* dss_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* cpu0_54xx_pwrdm: MPU0 processor and Neon coprocessor power domain */ static struct powerdomain cpu0_54xx_pwrdm = { .name = "cpu0_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP54XX_PRCM_MPU_PRM_C0_INST, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu0_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu0_l1 */ }, }; /* cpu1_54xx_pwrdm: MPU1 processor and Neon coprocessor power domain */ static struct powerdomain cpu1_54xx_pwrdm = { .name = "cpu1_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP54XX_PRCM_MPU_PRM_C1_INST, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu1_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu1_l1 */ }, }; /* emu_54xx_pwrdm: Emulation power domain */ static struct powerdomain emu_54xx_pwrdm = { .name = "emu_pwrdm", .voltdm = { .name = "wkup" }, .prcm_offs = OMAP54XX_PRM_EMU_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* emu_bank */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* emu_bank */ }, }; /* mpu_54xx_pwrdm: Modena processor and the Neon coprocessor power domain */ static struct powerdomain mpu_54xx_pwrdm = { .name = "mpu_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP54XX_PRM_MPU_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* mpu_l2 */ [1] = PWRSTS_RET, /* mpu_ram */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* mpu_l2 */ [1] = PWRSTS_OFF_RET, /* mpu_ram */ }, }; /* custefuse_54xx_pwrdm: Customer efuse controller power domain */ static struct powerdomain custefuse_54xx_pwrdm = { .name = "custefuse_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_CUSTEFUSE_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* dsp_54xx_pwrdm: Tesla processor power domain */ static struct powerdomain dsp_54xx_pwrdm = { .name = "dsp_pwrdm", .voltdm = { .name = "mm" }, .prcm_offs = OMAP54XX_PRM_DSP_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* dsp_edma */ [1] = PWRSTS_OFF_RET, /* dsp_l1 */ [2] = PWRSTS_OFF_RET, /* dsp_l2 */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* dsp_edma */ [1] = PWRSTS_OFF_RET, /* dsp_l1 */ [2] = PWRSTS_OFF_RET, /* dsp_l2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* cam_54xx_pwrdm: Camera subsystem power domain */ static struct powerdomain cam_54xx_pwrdm = { .name = "cam_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_CAM_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cam_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* cam_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* l3init_54xx_pwrdm: L3 initators pheripherals power domain */ static struct powerdomain l3init_54xx_pwrdm = { .name = "l3init_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP54XX_PRM_L3INIT_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* l3init_bank1 */ [1] = PWRSTS_OFF_RET, /* l3init_bank2 */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* l3init_bank1 */ [1] = PWRSTS_OFF_RET, /* l3init_bank2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* gpu_54xx_pwrdm: 3D accelerator power domain */ static struct powerdomain gpu_54xx_pwrdm = { .name = "gpu_pwrdm", .voltdm = { .name = "mm" }, .prcm_offs = OMAP54XX_PRM_GPU_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* gpu_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* gpu_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* wkupaon_54xx_pwrdm: Wake-up power domain */ static struct powerdomain wkupaon_54xx_pwrdm = { .name = "wkupaon_pwrdm", .voltdm = { .name = "wkup" }, .prcm_offs = OMAP54XX_PRM_WKUPAON_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* wkup_bank */ }, }; /* iva_54xx_pwrdm: IVA-HD power domain */ static struct powerdomain iva_54xx_pwrdm = { .name = "iva_pwrdm", .voltdm = { .name = "mm" }, .prcm_offs = OMAP54XX_PRM_IVA_INST, .prcm_partition = OMAP54XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* hwa_mem */ [1] = PWRSTS_OFF_RET, /* sl2_mem */ [2] = PWRSTS_OFF_RET, /* tcm1_mem */ [3] = PWRSTS_OFF_RET, /* tcm2_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET, /* hwa_mem */ [1] = PWRSTS_OFF_RET, /* sl2_mem */ [2] = PWRSTS_OFF_RET, /* tcm1_mem */ [3] = PWRSTS_OFF_RET, /* tcm2_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* * The following power domains are not under SW control * * mpuaon * mmaon */ /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_omap54xx[] __initdata = { &core_54xx_pwrdm, &abe_54xx_pwrdm, &coreaon_54xx_pwrdm, &dss_54xx_pwrdm, &cpu0_54xx_pwrdm, &cpu1_54xx_pwrdm, &emu_54xx_pwrdm, &mpu_54xx_pwrdm, &custefuse_54xx_pwrdm, &dsp_54xx_pwrdm, &cam_54xx_pwrdm, &l3init_54xx_pwrdm, &gpu_54xx_pwrdm, &wkupaon_54xx_pwrdm, &iva_54xx_pwrdm, NULL }; void __init omap54xx_powerdomains_init(void) { pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap54xx); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains54xx_data.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Framebuffer device registration for TI OMAP platforms * * Copyright (C) 2006 Nokia Corporation * Author: Imre Deak <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/memblock.h> #include <linux/io.h> #include <linux/omapfb.h> #include <linux/dma-mapping.h> #include <asm/mach/map.h> #include "soc.h" #include "display.h" #ifdef CONFIG_OMAP2_VRFB /* * The first memory resource is the register region for VRFB, * the rest are VRFB virtual memory areas for each VRFB context. */ static const struct resource omap2_vrfb_resources[] = { DEFINE_RES_MEM_NAMED(0x68008000u, 0x40, "vrfb-regs"), DEFINE_RES_MEM_NAMED(0x70000000u, 0x4000000, "vrfb-area-0"), DEFINE_RES_MEM_NAMED(0x74000000u, 0x4000000, "vrfb-area-1"), DEFINE_RES_MEM_NAMED(0x78000000u, 0x4000000, "vrfb-area-2"), DEFINE_RES_MEM_NAMED(0x7c000000u, 0x4000000, "vrfb-area-3"), }; static const struct resource omap3_vrfb_resources[] = { DEFINE_RES_MEM_NAMED(0x6C000180u, 0xc0, "vrfb-regs"), DEFINE_RES_MEM_NAMED(0x70000000u, 0x4000000, "vrfb-area-0"), DEFINE_RES_MEM_NAMED(0x74000000u, 0x4000000, "vrfb-area-1"), DEFINE_RES_MEM_NAMED(0x78000000u, 0x4000000, "vrfb-area-2"), DEFINE_RES_MEM_NAMED(0x7c000000u, 0x4000000, "vrfb-area-3"), DEFINE_RES_MEM_NAMED(0xe0000000u, 0x4000000, "vrfb-area-4"), DEFINE_RES_MEM_NAMED(0xe4000000u, 0x4000000, "vrfb-area-5"), DEFINE_RES_MEM_NAMED(0xe8000000u, 0x4000000, "vrfb-area-6"), DEFINE_RES_MEM_NAMED(0xec000000u, 0x4000000, "vrfb-area-7"), DEFINE_RES_MEM_NAMED(0xf0000000u, 0x4000000, "vrfb-area-8"), DEFINE_RES_MEM_NAMED(0xf4000000u, 0x4000000, "vrfb-area-9"), DEFINE_RES_MEM_NAMED(0xf8000000u, 0x4000000, "vrfb-area-10"), DEFINE_RES_MEM_NAMED(0xfc000000u, 0x4000000, "vrfb-area-11"), }; int __init omap_init_vrfb(void) { struct platform_device *pdev; const struct resource *res; unsigned int num_res; if (cpu_is_omap24xx()) { res = omap2_vrfb_resources; num_res = ARRAY_SIZE(omap2_vrfb_resources); } else if (cpu_is_omap34xx()) { res = omap3_vrfb_resources; num_res = ARRAY_SIZE(omap3_vrfb_resources); } else { return 0; } pdev = platform_device_register_resndata(NULL, "omapvrfb", -1, res, num_res, NULL, 0); return PTR_ERR_OR_ZERO(pdev); } #else int __init omap_init_vrfb(void) { return 0; } #endif #if IS_ENABLED(CONFIG_FB_OMAP2) static u64 omap_fb_dma_mask = ~(u32)0; static struct omapfb_platform_data omapfb_config; static struct platform_device omap_fb_device = { .name = "omapfb", .id = -1, .dev = { .dma_mask = &omap_fb_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &omapfb_config, }, .num_resources = 0, }; int __init omap_init_fb(void) { return platform_device_register(&omap_fb_device); } #else int __init omap_init_fb(void) { return 0; } #endif
linux-master
arch/arm/mach-omap2/fb.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_2xxx_3xxx_ipblock_data.c - common IP block data for OMAP2/3 * * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley */ #include <linux/dmaengine.h> #include <linux/omap-dma.h> #include "omap_hwmod.h" #include "hdq1w.h" #include "omap_hwmod_common_data.h" /* UART */ static struct omap_hwmod_class_sysconfig omap2_uart_sysc = { .rev_offs = 0x50, .sysc_offs = 0x54, .syss_offs = 0x58, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2_uart_class = { .name = "uart", .sysc = &omap2_uart_sysc, }; /* * 'venc' class * video encoder */ struct omap_hwmod_class omap2_venc_hwmod_class = { .name = "venc", }; /* * omap_hwmod class data */ struct omap_hwmod_class l3_hwmod_class = { .name = "l3", }; struct omap_hwmod_class l4_hwmod_class = { .name = "l4", }; struct omap_hwmod_class mpu_hwmod_class = { .name = "mpu", }; struct omap_hwmod_class iva_hwmod_class = { .name = "iva", }; static struct omap_hwmod_class_sysconfig omap2_hdq1w_sysc = { .rev_offs = 0x0, .sysc_offs = 0x14, .syss_offs = 0x18, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2_hdq1w_class = { .name = "hdq1w", .sysc = &omap2_hdq1w_sysc, .reset = &omap_hdq1w_reset, };
linux-master
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod implementation for OMAP2/3/4 * * Copyright (C) 2009-2011 Nokia Corporation * Copyright (C) 2011-2012 Texas Instruments, Inc. * * Paul Walmsley, Benoît Cousson, Kevin Hilman * * Created in collaboration with (alphabetical order): Thara Gopinath, * Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari Poussa, Anand * Sawant, Santosh Shilimkar, Richard Woodruff * * Introduction * ------------ * One way to view an OMAP SoC is as a collection of largely unrelated * IP blocks connected by interconnects. The IP blocks include * devices such as ARM processors, audio serial interfaces, UARTs, * etc. Some of these devices, like the DSP, are created by TI; * others, like the SGX, largely originate from external vendors. In * TI's documentation, on-chip devices are referred to as "OMAP * modules." Some of these IP blocks are identical across several * OMAP versions. Others are revised frequently. * * These OMAP modules are tied together by various interconnects. * Most of the address and data flow between modules is via OCP-based * interconnects such as the L3 and L4 buses; but there are other * interconnects that distribute the hardware clock tree, handle idle * and reset signaling, supply power, and connect the modules to * various pads or balls on the OMAP package. * * OMAP hwmod provides a consistent way to describe the on-chip * hardware blocks and their integration into the rest of the chip. * This description can be automatically generated from the TI * hardware database. OMAP hwmod provides a standard, consistent API * to reset, enable, idle, and disable these hardware blocks. And * hwmod provides a way for other core code, such as the Linux device * code or the OMAP power management and address space mapping code, * to query the hardware database. * * Using hwmod * ----------- * Drivers won't call hwmod functions directly. That is done by the * omap_device code, and in rare occasions, by custom integration code * in arch/arm/ *omap*. The omap_device code includes functions to * build a struct platform_device using omap_hwmod data, and that is * currently how hwmod data is communicated to drivers and to the * Linux driver model. Most drivers will call omap_hwmod functions only * indirectly, via pm_runtime*() functions. * * From a layering perspective, here is where the OMAP hwmod code * fits into the kernel software stack: * * +-------------------------------+ * | Device driver code | * | (e.g., drivers/) | * +-------------------------------+ * | Linux driver model | * | (platform_device / | * | platform_driver data/code) | * +-------------------------------+ * | OMAP core-driver integration | * |(arch/arm/mach-omap2/devices.c)| * +-------------------------------+ * | omap_device code | * | (../plat-omap/omap_device.c) | * +-------------------------------+ * ----> | omap_hwmod code/data | <----- * | (../mach-omap2/omap_hwmod*) | * +-------------------------------+ * | OMAP clock/PRCM/register fns | * | ({read,write}l_relaxed, clk*) | * +-------------------------------+ * * Device drivers should not contain any OMAP-specific code or data in * them. They should only contain code to operate the IP block that * the driver is responsible for. This is because these IP blocks can * also appear in other SoCs, either from TI (such as DaVinci) or from * other manufacturers; and drivers should be reusable across other * platforms. * * The OMAP hwmod code also will attempt to reset and idle all on-chip * devices upon boot. The goal here is for the kernel to be * completely self-reliant and independent from bootloaders. This is * to ensure a repeatable configuration, both to ensure consistent * runtime behavior, and to make it easier for others to reproduce * bugs. * * OMAP module activity states * --------------------------- * The hwmod code considers modules to be in one of several activity * states. IP blocks start out in an UNKNOWN state, then once they * are registered via the hwmod code, proceed to the REGISTERED state. * Once their clock names are resolved to clock pointers, the module * enters the CLKS_INITED state; and finally, once the module has been * reset and the integration registers programmed, the INITIALIZED state * is entered. The hwmod code will then place the module into either * the IDLE state to save power, or in the case of a critical system * module, the ENABLED state. * * OMAP core integration code can then call omap_hwmod*() functions * directly to move the module between the IDLE, ENABLED, and DISABLED * states, as needed. This is done during both the PM idle loop, and * in the OMAP core integration code's implementation of the PM runtime * functions. * * References * ---------- * This is a partial list. * - OMAP2420 Multimedia Processor Silicon Revision 2.1.1, 2.2 (SWPU064) * - OMAP2430 Multimedia Device POP Silicon Revision 2.1 (SWPU090) * - OMAP34xx Multimedia Device Silicon Revision 3.1 (SWPU108) * - OMAP4430 Multimedia Device Silicon Revision 1.0 (SWPU140) * - Open Core Protocol Specification 2.2 * * To do: * - handle IO mapping * - bus throughput & module latency measurement code * * XXX add tests at the beginning of each function to ensure the hwmod is * in the appropriate state * XXX error return values should be checked to ensure that they are * appropriate */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/memblock.h> #include <linux/platform_data/ti-sysc.h> #include <dt-bindings/bus/ti-sysc.h> #include <asm/system_misc.h> #include "clock.h" #include "omap_hwmod.h" #include "soc.h" #include "common.h" #include "clockdomain.h" #include "hdq1w.h" #include "mmc.h" #include "powerdomain.h" #include "cm2xxx.h" #include "cm3xxx.h" #include "cm33xx.h" #include "prm.h" #include "prm3xxx.h" #include "prm44xx.h" #include "prm33xx.h" #include "prminst44xx.h" #include "pm.h" #include "wd_timer.h" /* Name of the OMAP hwmod for the MPU */ #define MPU_INITIATOR_NAME "mpu" /* * Number of struct omap_hwmod_link records per struct * omap_hwmod_ocp_if record (master->slave and slave->master) */ #define LINKS_PER_OCP_IF 2 /* * Address offset (in bytes) between the reset control and the reset * status registers: 4 bytes on OMAP4 */ #define OMAP4_RST_CTRL_ST_OFFSET 4 /* * Maximum length for module clock handle names */ #define MOD_CLK_MAX_NAME_LEN 32 /** * struct clkctrl_provider - clkctrl provider mapping data * @num_addrs: number of base address ranges for the provider * @addr: base address(es) for the provider * @size: size(s) of the provider address space(s) * @node: device node associated with the provider * @link: list link */ struct clkctrl_provider { int num_addrs; u32 *addr; u32 *size; struct device_node *node; struct list_head link; }; static LIST_HEAD(clkctrl_providers); /** * struct omap_hwmod_reset - IP specific reset functions * @match: string to match against the module name * @len: number of characters to match * @reset: IP specific reset function * * Used only in cases where struct omap_hwmod is dynamically allocated. */ struct omap_hwmod_reset { const char *match; int len; int (*reset)(struct omap_hwmod *oh); }; /** * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations * @enable_module: function to enable a module (via MODULEMODE) * @disable_module: function to disable a module (via MODULEMODE) * * XXX Eventually this functionality will be hidden inside the PRM/CM * device drivers. Until then, this should avoid huge blocks of cpu_is_*() * conditionals in this code. */ struct omap_hwmod_soc_ops { void (*enable_module)(struct omap_hwmod *oh); int (*disable_module)(struct omap_hwmod *oh); int (*wait_target_ready)(struct omap_hwmod *oh); int (*assert_hardreset)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*deassert_hardreset)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*is_hardreset_asserted)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*init_clkdm)(struct omap_hwmod *oh); void (*update_context_lost)(struct omap_hwmod *oh); int (*get_context_lost)(struct omap_hwmod *oh); int (*disable_direct_prcm)(struct omap_hwmod *oh); u32 (*xlate_clkctrl)(struct omap_hwmod *oh); }; /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ static struct omap_hwmod_soc_ops soc_ops; /* omap_hwmod_list contains all registered struct omap_hwmods */ static LIST_HEAD(omap_hwmod_list); static DEFINE_MUTEX(list_lock); /* mpu_oh: used to add/remove MPU initiator from sleepdep list */ static struct omap_hwmod *mpu_oh; /* inited: set to true once the hwmod code is initialized */ static bool inited; /* Private functions */ /** * _update_sysc_cache - return the module OCP_SYSCONFIG register, keep copy * @oh: struct omap_hwmod * * * Load the current value of the hwmod OCP_SYSCONFIG register into the * struct omap_hwmod for later use. Returns -EINVAL if the hwmod has no * OCP_SYSCONFIG register or 0 upon success. */ static int _update_sysc_cache(struct omap_hwmod *oh) { if (!oh->class->sysc) { WARN(1, "omap_hwmod: %s: cannot read OCP_SYSCONFIG: not defined on hwmod's class\n", oh->name); return -EINVAL; } /* XXX ensure module interface clock is up */ oh->_sysc_cache = omap_hwmod_read(oh, oh->class->sysc->sysc_offs); if (!(oh->class->sysc->sysc_flags & SYSC_NO_CACHE)) oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED; return 0; } /** * _write_sysconfig - write a value to the module's OCP_SYSCONFIG register * @v: OCP_SYSCONFIG value to write * @oh: struct omap_hwmod * * * Write @v into the module class' OCP_SYSCONFIG register, if it has * one. No return value. */ static void _write_sysconfig(u32 v, struct omap_hwmod *oh) { if (!oh->class->sysc) { WARN(1, "omap_hwmod: %s: cannot write OCP_SYSCONFIG: not defined on hwmod's class\n", oh->name); return; } /* XXX ensure module interface clock is up */ /* Module might have lost context, always update cache and register */ oh->_sysc_cache = v; /* * Some IP blocks (such as RTC) require unlocking of IP before * accessing its registers. If a function pointer is present * to unlock, then call it before accessing sysconfig and * call lock after writing sysconfig. */ if (oh->class->unlock) oh->class->unlock(oh); omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs); if (oh->class->lock) oh->class->lock(oh); } /** * _set_master_standbymode: set the OCP_SYSCONFIG MIDLEMODE field in @v * @oh: struct omap_hwmod * * @standbymode: MIDLEMODE field bits * @v: pointer to register contents to modify * * Update the master standby mode bits in @v to be @standbymode for * the @oh hwmod. Does not write to the hardware. Returns -EINVAL * upon error or 0 upon success. */ static int _set_master_standbymode(struct omap_hwmod *oh, u8 standbymode, u32 *v) { u32 mstandby_mask; u8 mstandby_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_MIDLEMODE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } mstandby_shift = oh->class->sysc->sysc_fields->midle_shift; mstandby_mask = (0x3 << mstandby_shift); *v &= ~mstandby_mask; *v |= __ffs(standbymode) << mstandby_shift; return 0; } /** * _set_slave_idlemode: set the OCP_SYSCONFIG SIDLEMODE field in @v * @oh: struct omap_hwmod * * @idlemode: SIDLEMODE field bits * @v: pointer to register contents to modify * * Update the slave idle mode bits in @v to be @idlemode for the @oh * hwmod. Does not write to the hardware. Returns -EINVAL upon error * or 0 upon success. */ static int _set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode, u32 *v) { u32 sidle_mask; u8 sidle_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SIDLEMODE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } sidle_shift = oh->class->sysc->sysc_fields->sidle_shift; sidle_mask = (0x3 << sidle_shift); *v &= ~sidle_mask; *v |= __ffs(idlemode) << sidle_shift; return 0; } /** * _set_clockactivity: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v * @oh: struct omap_hwmod * * @clockact: CLOCKACTIVITY field bits * @v: pointer to register contents to modify * * Update the clockactivity mode bits in @v to be @clockact for the * @oh hwmod. Used for additional powersaving on some modules. Does * not write to the hardware. Returns -EINVAL upon error or 0 upon * success. */ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v) { u32 clkact_mask; u8 clkact_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_CLOCKACTIVITY)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } clkact_shift = oh->class->sysc->sysc_fields->clkact_shift; clkact_mask = (0x3 << clkact_shift); *v &= ~clkact_mask; *v |= clockact << clkact_shift; return 0; } /** * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * * Set the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _set_softreset(struct omap_hwmod *oh, u32 *v) { u32 softrst_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); *v |= softrst_mask; return 0; } /** * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _clear_softreset(struct omap_hwmod *oh, u32 *v) { u32 softrst_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: sysc_fields absent for sysconfig class\n", oh->name); return -EINVAL; } softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); *v &= ~softrst_mask; return 0; } /** * _wait_softreset_complete - wait for an OCP softreset to complete * @oh: struct omap_hwmod * to wait on * * Wait until the IP block represented by @oh reports that its OCP * softreset is complete. This can be triggered by software (see * _ocp_softreset()) or by hardware upon returning from off-mode (one * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT * microseconds. Returns the number of microseconds waited. */ static int _wait_softreset_complete(struct omap_hwmod *oh) { struct omap_hwmod_class_sysconfig *sysc; u32 softrst_mask; int c = 0; sysc = oh->class->sysc; if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS && sysc->syss_offs > 0) omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { softrst_mask = (0x1 << sysc->sysc_fields->srst_shift); omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs) & softrst_mask), MAX_MODULE_SOFTRESET_WAIT, c); } return c; } /** * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v * @oh: struct omap_hwmod * * * The DMADISABLE bit is a semi-automatic bit present in sysconfig register * of some modules. When the DMA must perform read/write accesses, the * DMADISABLE bit is cleared by the hardware. But when the DMA must stop * for power management, software must set the DMADISABLE bit back to 1. * * Set the DMADISABLE bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _set_dmadisable(struct omap_hwmod *oh) { u32 v; u32 dmadisable_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_DMADISABLE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } /* clocks must be on for this operation */ if (oh->_state != _HWMOD_STATE_ENABLED) { pr_warn("omap_hwmod: %s: dma can be disabled only from enabled state\n", oh->name); return -EINVAL; } pr_debug("omap_hwmod: %s: setting DMADISABLE\n", oh->name); v = oh->_sysc_cache; dmadisable_mask = (0x1 << oh->class->sysc->sysc_fields->dmadisable_shift); v |= dmadisable_mask; _write_sysconfig(v, oh); return 0; } /** * _set_module_autoidle: set the OCP_SYSCONFIG AUTOIDLE field in @v * @oh: struct omap_hwmod * * @autoidle: desired AUTOIDLE bitfield value (0 or 1) * @v: pointer to register contents to modify * * Update the module autoidle bit in @v to be @autoidle for the @oh * hwmod. The autoidle bit controls whether the module can gate * internal clocks automatically when it isn't doing anything; the * exact function of this bit varies on a per-module basis. This * function does not write to the hardware. Returns -EINVAL upon * error or 0 upon success. */ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle, u32 *v) { u32 autoidle_mask; u8 autoidle_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_AUTOIDLE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } autoidle_shift = oh->class->sysc->sysc_fields->autoidle_shift; autoidle_mask = (0x1 << autoidle_shift); *v &= ~autoidle_mask; *v |= autoidle << autoidle_shift; return 0; } /** * _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware * @oh: struct omap_hwmod * * * Allow the hardware module @oh to send wakeups. Returns -EINVAL * upon error or 0 upon success. */ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v) { if (!oh->class->sysc || !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) || (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) || (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP))) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) *v |= 0x1 << oh->class->sysc->sysc_fields->enwkup_shift; if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); /* XXX test pwrdm_get_wken for this hwmod's subsystem */ return 0; } static struct clockdomain *_get_clkdm(struct omap_hwmod *oh) { struct clk_hw_omap *clk; if (!oh) return NULL; if (oh->clkdm) { return oh->clkdm; } else if (oh->_clk) { if (!omap2_clk_is_hw_omap(__clk_get_hw(oh->_clk))) return NULL; clk = to_clk_hw_omap(__clk_get_hw(oh->_clk)); return clk->clkdm; } return NULL; } /** * _add_initiator_dep: prevent @oh from smart-idling while @init_oh is active * @oh: struct omap_hwmod * * * Prevent the hardware module @oh from entering idle while the * hardare module initiator @init_oh is active. Useful when a module * will be accessed by a particular initiator (e.g., if a module will * be accessed by the IVA, there should be a sleepdep between the IVA * initiator and the module). Only applies to modules in smart-idle * mode. If the clockdomain is marked as not needing autodeps, return * 0 without doing anything. Otherwise, returns -EINVAL upon error or * passes along clkdm_add_sleepdep() value upon success. */ static int _add_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh) { struct clockdomain *clkdm, *init_clkdm; clkdm = _get_clkdm(oh); init_clkdm = _get_clkdm(init_oh); if (!clkdm || !init_clkdm) return -EINVAL; if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS) return 0; return clkdm_add_sleepdep(clkdm, init_clkdm); } /** * _del_initiator_dep: allow @oh to smart-idle even if @init_oh is active * @oh: struct omap_hwmod * * * Allow the hardware module @oh to enter idle while the hardare * module initiator @init_oh is active. Useful when a module will not * be accessed by a particular initiator (e.g., if a module will not * be accessed by the IVA, there should be no sleepdep between the IVA * initiator and the module). Only applies to modules in smart-idle * mode. If the clockdomain is marked as not needing autodeps, return * 0 without doing anything. Returns -EINVAL upon error or passes * along clkdm_del_sleepdep() value upon success. */ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh) { struct clockdomain *clkdm, *init_clkdm; clkdm = _get_clkdm(oh); init_clkdm = _get_clkdm(init_oh); if (!clkdm || !init_clkdm) return -EINVAL; if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS) return 0; return clkdm_del_sleepdep(clkdm, init_clkdm); } static const struct of_device_id ti_clkctrl_match_table[] __initconst = { { .compatible = "ti,clkctrl" }, { } }; static int __init _setup_clkctrl_provider(struct device_node *np) { struct clkctrl_provider *provider; int i; provider = memblock_alloc(sizeof(*provider), SMP_CACHE_BYTES); if (!provider) return -ENOMEM; provider->node = np; provider->num_addrs = of_address_count(np); provider->addr = memblock_alloc(sizeof(void *) * provider->num_addrs, SMP_CACHE_BYTES); if (!provider->addr) return -ENOMEM; provider->size = memblock_alloc(sizeof(u32) * provider->num_addrs, SMP_CACHE_BYTES); if (!provider->size) return -ENOMEM; for (i = 0; i < provider->num_addrs; i++) { struct resource res; of_address_to_resource(np, i, &res); provider->addr[i] = res.start; provider->size[i] = resource_size(&res); pr_debug("%s: %pOF: %pR\n", __func__, np, &res); } list_add(&provider->link, &clkctrl_providers); return 0; } static int __init _init_clkctrl_providers(void) { struct device_node *np; int ret = 0; for_each_matching_node(np, ti_clkctrl_match_table) { ret = _setup_clkctrl_provider(np); if (ret) { of_node_put(np); break; } } return ret; } static u32 _omap4_xlate_clkctrl(struct omap_hwmod *oh) { if (!oh->prcm.omap4.modulemode) return 0; return omap_cm_xlate_clkctrl(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs); } static struct clk *_lookup_clkctrl_clk(struct omap_hwmod *oh) { struct clkctrl_provider *provider; struct clk *clk; u32 addr; if (!soc_ops.xlate_clkctrl) return NULL; addr = soc_ops.xlate_clkctrl(oh); if (!addr) return NULL; pr_debug("%s: %s: addr=%x\n", __func__, oh->name, addr); list_for_each_entry(provider, &clkctrl_providers, link) { int i; for (i = 0; i < provider->num_addrs; i++) { if (provider->addr[i] <= addr && provider->addr[i] + provider->size[i] > addr) { struct of_phandle_args clkspec; clkspec.np = provider->node; clkspec.args_count = 2; clkspec.args[0] = addr - provider->addr[0]; clkspec.args[1] = 0; clk = of_clk_get_from_provider(&clkspec); pr_debug("%s: %s got %p (offset=%x, provider=%pOF)\n", __func__, oh->name, clk, clkspec.args[0], provider->node); return clk; } } } return NULL; } /** * _init_main_clk - get a struct clk * for the hwmod's main functional clk * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh _clk (main * functional clock pointer) if a clock matching the hwmod name is found, * or a main_clk is present. Returns 0 on success or -EINVAL on error. */ static int _init_main_clk(struct omap_hwmod *oh) { int ret = 0; struct clk *clk = NULL; clk = _lookup_clkctrl_clk(oh); if (!IS_ERR_OR_NULL(clk)) { pr_debug("%s: mapped main_clk %s for %s\n", __func__, __clk_get_name(clk), oh->name); oh->main_clk = __clk_get_name(clk); oh->_clk = clk; soc_ops.disable_direct_prcm(oh); } else { if (!oh->main_clk) return 0; oh->_clk = clk_get(NULL, oh->main_clk); } if (IS_ERR(oh->_clk)) { pr_warn("omap_hwmod: %s: cannot clk_get main_clk %s\n", oh->name, oh->main_clk); return -EINVAL; } /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(oh->_clk); if (!_get_clkdm(oh)) pr_debug("omap_hwmod: %s: missing clockdomain for %s.\n", oh->name, oh->main_clk); return ret; } /** * _init_interface_clks - get a struct clk * for the hwmod's interface clks * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh OCP slave interface * clock pointers. Returns 0 on success or -EINVAL on error. */ static int _init_interface_clks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct clk *c; int ret = 0; list_for_each_entry(os, &oh->slave_ports, node) { if (!os->clk) continue; c = clk_get(NULL, os->clk); if (IS_ERR(c)) { pr_warn("omap_hwmod: %s: cannot clk_get interface_clk %s\n", oh->name, os->clk); ret = -EINVAL; continue; } os->_clk = c; /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(os->_clk); } return ret; } /** * _init_opt_clk - get a struct clk * for the hwmod's optional clocks * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh omap_hwmod_opt_clk * clock pointers. Returns 0 on success or -EINVAL on error. */ static int _init_opt_clks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; struct clk *c; int i; int ret = 0; for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) { c = clk_get(NULL, oc->clk); if (IS_ERR(c)) { pr_warn("omap_hwmod: %s: cannot clk_get opt_clk %s\n", oh->name, oc->clk); ret = -EINVAL; continue; } oc->_clk = c; /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(oc->_clk); } return ret; } static void _enable_optional_clocks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; int i; pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) if (oc->_clk) { pr_debug("omap_hwmod: enable %s:%s\n", oc->role, __clk_get_name(oc->_clk)); clk_enable(oc->_clk); } } static void _disable_optional_clocks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; int i; pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) if (oc->_clk) { pr_debug("omap_hwmod: disable %s:%s\n", oc->role, __clk_get_name(oc->_clk)); clk_disable(oc->_clk); } } /** * _enable_clocks - enable hwmod main clock and interface clocks * @oh: struct omap_hwmod * * * Enables all clocks necessary for register reads and writes to succeed * on the hwmod @oh. Returns 0. */ static int _enable_clocks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); if (oh->flags & HWMOD_OPT_CLKS_NEEDED) _enable_optional_clocks(oh); if (oh->_clk) clk_enable(oh->_clk); list_for_each_entry(os, &oh->slave_ports, node) { if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) { omap2_clk_deny_idle(os->_clk); clk_enable(os->_clk); } } /* The opt clocks are controlled by the device driver. */ return 0; } /** * _omap4_clkctrl_managed_by_clkfwk - true if clkctrl managed by clock framework * @oh: struct omap_hwmod * */ static bool _omap4_clkctrl_managed_by_clkfwk(struct omap_hwmod *oh) { if (oh->prcm.omap4.flags & HWMOD_OMAP4_CLKFWK_CLKCTR_CLOCK) return true; return false; } /** * _omap4_has_clkctrl_clock - returns true if a module has clkctrl clock * @oh: struct omap_hwmod * */ static bool _omap4_has_clkctrl_clock(struct omap_hwmod *oh) { if (oh->prcm.omap4.clkctrl_offs) return true; if (!oh->prcm.omap4.clkctrl_offs && oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET) return true; return false; } /** * _disable_clocks - disable hwmod main clock and interface clocks * @oh: struct omap_hwmod * * * Disables the hwmod @oh main functional and interface clocks. Returns 0. */ static int _disable_clocks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; pr_debug("omap_hwmod: %s: disabling clocks\n", oh->name); if (oh->_clk) clk_disable(oh->_clk); list_for_each_entry(os, &oh->slave_ports, node) { if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) { clk_disable(os->_clk); omap2_clk_allow_idle(os->_clk); } } if (oh->flags & HWMOD_OPT_CLKS_NEEDED) _disable_optional_clocks(oh); /* The opt clocks are controlled by the device driver. */ return 0; } /** * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * * * Enables the PRCM module mode related to the hwmod @oh. * No return value. */ static void _omap4_enable_module(struct omap_hwmod *oh) { if (!oh->clkdm || !oh->prcm.omap4.modulemode || _omap4_clkctrl_managed_by_clkfwk(oh)) return; pr_debug("omap_hwmod: %s: %s: %d\n", oh->name, __func__, oh->prcm.omap4.modulemode); omap_cm_module_enable(oh->prcm.omap4.modulemode, oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs); } /** * _omap4_wait_target_disable - wait for a module to be disabled on OMAP4 * @oh: struct omap_hwmod * * * Wait for a module @oh to enter slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully enters * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_idle() function. */ static int _omap4_wait_target_disable(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->_int_flags & _HWMOD_NO_MPU_PORT || !oh->clkdm) return 0; if (oh->flags & HWMOD_NO_IDLEST) return 0; if (_omap4_clkctrl_managed_by_clkfwk(oh)) return 0; if (!_omap4_has_clkctrl_clock(oh)) return 0; return omap_cm_wait_module_idle(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs, 0); } /** * _save_mpu_port_index - find and save the index to @oh's MPU port * @oh: struct omap_hwmod * * * Determines the array index of the OCP slave port that the MPU uses * to address the device, and saves it into the struct omap_hwmod. * Intended to be called during hwmod registration only. No return * value. */ static void __init _save_mpu_port_index(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os = NULL; if (!oh) return; oh->_int_flags |= _HWMOD_NO_MPU_PORT; list_for_each_entry(os, &oh->slave_ports, node) { if (os->user & OCP_USER_MPU) { oh->_mpu_port = os; oh->_int_flags &= ~_HWMOD_NO_MPU_PORT; break; } } return; } /** * _find_mpu_rt_port - return omap_hwmod_ocp_if accessible by the MPU * @oh: struct omap_hwmod * * * Given a pointer to a struct omap_hwmod record @oh, return a pointer * to the struct omap_hwmod_ocp_if record that is used by the MPU to * communicate with the IP block. This interface need not be directly * connected to the MPU (and almost certainly is not), but is directly * connected to the IP block represented by @oh. Returns a pointer * to the struct omap_hwmod_ocp_if * upon success, or returns NULL upon * error or if there does not appear to be a path from the MPU to this * IP block. */ static struct omap_hwmod_ocp_if *_find_mpu_rt_port(struct omap_hwmod *oh) { if (!oh || oh->_int_flags & _HWMOD_NO_MPU_PORT || oh->slaves_cnt == 0) return NULL; return oh->_mpu_port; }; /** * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * Ensure that the OCP_SYSCONFIG register for the IP block represented * by @oh is set to indicate to the PRCM that the IP block is active. * Usually this means placing the module into smart-idle mode and * smart-standby, but if there is a bug in the automatic idle handling * for the IP block, it may need to be placed into the force-idle or * no-idle variants of these modes. No return value. */ static void _enable_sysc(struct omap_hwmod *oh) { u8 idlemode, sf; u32 v; bool clkdm_act; struct clockdomain *clkdm; if (!oh->class->sysc) return; /* * Wait until reset has completed, this is needed as the IP * block is reset automatically by hardware in some cases * (off-mode for example), and the drivers require the * IP to be ready when they access it */ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _enable_optional_clocks(oh); _wait_softreset_complete(oh); if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; clkdm = _get_clkdm(oh); if (sf & SYSC_HAS_SIDLEMODE) { if (oh->flags & HWMOD_SWSUP_SIDLE || oh->flags & HWMOD_SWSUP_SIDLE_ACT) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } /* * This is special handling for some IPs like * 32k sync timer. Force them to idle! */ clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); if (clkdm_act && !(oh->class->sysc->idlemodes & (SIDLE_SMART | SIDLE_SMART_WKUP))) idlemode = HWMOD_IDLEMODE_FORCE; _set_slave_idlemode(oh, idlemode, &v); } if (sf & SYSC_HAS_MIDLEMODE) { if (oh->flags & HWMOD_FORCE_MSTANDBY) { idlemode = HWMOD_IDLEMODE_FORCE; } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_master_standbymode(oh, idlemode, &v); } /* * XXX The clock framework should handle this, by * calling into this code. But this must wait until the * clock structures are tagged with omap_hwmod entries */ if ((oh->flags & HWMOD_SET_DEFAULT_CLOCKACT) && (sf & SYSC_HAS_CLOCKACTIVITY)) _set_clockactivity(oh, CLOCKACT_TEST_ICLK, &v); _write_sysconfig(v, oh); /* * Set the autoidle bit only after setting the smartidle bit * Setting this will not have any impact on the other modules. */ if (sf & SYSC_HAS_AUTOIDLE) { idlemode = (oh->flags & HWMOD_NO_OCP_AUTOIDLE) ? 0 : 1; _set_module_autoidle(oh, idlemode, &v); _write_sysconfig(v, oh); } } /** * _idle_sysc - try to put a module into idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * If module is marked as SWSUP_SIDLE, force the module into slave * idle; otherwise, configure it for smart-idle. If module is marked * as SWSUP_MSUSPEND, force the module into master standby; otherwise, * configure it for smart-standby. No return value. */ static void _idle_sysc(struct omap_hwmod *oh) { u8 idlemode, sf; u32 v; if (!oh->class->sysc) return; v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; if (sf & SYSC_HAS_SIDLEMODE) { if (oh->flags & HWMOD_SWSUP_SIDLE) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_slave_idlemode(oh, idlemode, &v); } if (sf & SYSC_HAS_MIDLEMODE) { if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_master_standbymode(oh, idlemode, &v); } /* If the cached value is the same as the new value, skip the write */ if (oh->_sysc_cache != v) _write_sysconfig(v, oh); } /** * _shutdown_sysc - force a module into idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * Force the module into slave idle and master suspend. No return * value. */ static void _shutdown_sysc(struct omap_hwmod *oh) { u32 v; u8 sf; if (!oh->class->sysc) return; v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; if (sf & SYSC_HAS_SIDLEMODE) _set_slave_idlemode(oh, HWMOD_IDLEMODE_FORCE, &v); if (sf & SYSC_HAS_MIDLEMODE) _set_master_standbymode(oh, HWMOD_IDLEMODE_FORCE, &v); if (sf & SYSC_HAS_AUTOIDLE) _set_module_autoidle(oh, 1, &v); _write_sysconfig(v, oh); } /** * _lookup - find an omap_hwmod by name * @name: find an omap_hwmod by name * * Return a pointer to an omap_hwmod by name, or NULL if not found. */ static struct omap_hwmod *_lookup(const char *name) { struct omap_hwmod *oh, *temp_oh; oh = NULL; list_for_each_entry(temp_oh, &omap_hwmod_list, node) { if (!strcmp(name, temp_oh->name)) { oh = temp_oh; break; } } return oh; } /** * _init_clkdm - look up a clockdomain name, store pointer in omap_hwmod * @oh: struct omap_hwmod * * * Convert a clockdomain name stored in a struct omap_hwmod into a * clockdomain pointer, and save it into the struct omap_hwmod. * Return -EINVAL if the clkdm_name lookup failed. */ static int _init_clkdm(struct omap_hwmod *oh) { if (!oh->clkdm_name) { pr_debug("omap_hwmod: %s: missing clockdomain\n", oh->name); return 0; } oh->clkdm = clkdm_lookup(oh->clkdm_name); if (!oh->clkdm) { pr_warn("omap_hwmod: %s: could not associate to clkdm %s\n", oh->name, oh->clkdm_name); return 0; } pr_debug("omap_hwmod: %s: associated to clkdm %s\n", oh->name, oh->clkdm_name); return 0; } /** * _init_clocks - clk_get() all clocks associated with this hwmod. Retrieve as * well the clockdomain. * @oh: struct omap_hwmod * * @np: device_node mapped to this hwmod * * Called by omap_hwmod_setup_*() (after omap2_clk_init()). * Resolves all clock names embedded in the hwmod. Returns 0 on * success, or a negative error code on failure. */ static int _init_clocks(struct omap_hwmod *oh, struct device_node *np) { int ret = 0; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; pr_debug("omap_hwmod: %s: looking up clocks\n", oh->name); if (soc_ops.init_clkdm) ret |= soc_ops.init_clkdm(oh); ret |= _init_main_clk(oh); ret |= _init_interface_clks(oh); ret |= _init_opt_clks(oh); if (!ret) oh->_state = _HWMOD_STATE_CLKS_INITED; else pr_warn("omap_hwmod: %s: cannot _init_clocks\n", oh->name); return ret; } /** * _lookup_hardreset - fill register bit info for this hwmod/reset line * @oh: struct omap_hwmod * * @name: name of the reset line in the context of this hwmod * @ohri: struct omap_hwmod_rst_info * that this function will fill in * * Return the bit position of the reset line that match the * input name. Return -ENOENT if not found. */ static int _lookup_hardreset(struct omap_hwmod *oh, const char *name, struct omap_hwmod_rst_info *ohri) { int i; for (i = 0; i < oh->rst_lines_cnt; i++) { const char *rst_line = oh->rst_lines[i].name; if (!strcmp(rst_line, name)) { ohri->rst_shift = oh->rst_lines[i].rst_shift; ohri->st_shift = oh->rst_lines[i].st_shift; pr_debug("omap_hwmod: %s: %s: %s: rst %d st %d\n", oh->name, __func__, rst_line, ohri->rst_shift, ohri->st_shift); return 0; } } return -ENOENT; } /** * _assert_hardreset - assert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to lookup and assert * * Some IP like dsp, ipu or iva contain processor that require an HW * reset line to be assert / deassert in order to enable fully the IP. * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of * asserting the hardreset line on the currently-booted SoC, or passes * along the return value from _lookup_hardreset() or the SoC's * assert_hardreset code. */ static int _assert_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; if (!oh) return -EINVAL; if (!soc_ops.assert_hardreset) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; ret = soc_ops.assert_hardreset(oh, &ohri); return ret; } /** * _deassert_hardreset - deassert the HW reset line of submodules contained * in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to look up and deassert * * Some IP like dsp, ipu or iva contain processor that require an HW * reset line to be assert / deassert in order to enable fully the IP. * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of * deasserting the hardreset line on the currently-booted SoC, or passes * along the return value from _lookup_hardreset() or the SoC's * deassert_hardreset code. */ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; if (!oh) return -EINVAL; if (!soc_ops.deassert_hardreset) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; if (oh->clkdm) { /* * A clockdomain must be in SW_SUP otherwise reset * might not be completed. The clockdomain can be set * in HW_AUTO only when the module become ready. */ clkdm_deny_idle(oh->clkdm); ret = clkdm_hwmod_enable(oh->clkdm, oh); if (ret) { WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n", oh->name, oh->clkdm->name, ret); return ret; } } _enable_clocks(oh); if (soc_ops.enable_module) soc_ops.enable_module(oh); ret = soc_ops.deassert_hardreset(oh, &ohri); if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); if (ret == -EBUSY) pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); if (oh->clkdm) { /* * Set the clockdomain to HW_AUTO, assuming that the * previous state was HW_AUTO. */ clkdm_allow_idle(oh->clkdm); clkdm_hwmod_disable(oh->clkdm, oh); } return ret; } /** * _read_hardreset - read the HW reset line state of submodules * contained in the hwmod module * @oh: struct omap_hwmod * * @name: name of the reset line to look up and read * * Return the state of the reset line. Returns -EINVAL if @oh is * null, -ENOSYS if we have no way of reading the hardreset line * status on the currently-booted SoC, or passes along the return * value from _lookup_hardreset() or the SoC's is_hardreset_asserted * code. */ static int _read_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; if (!oh) return -EINVAL; if (!soc_ops.is_hardreset_asserted) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; return soc_ops.is_hardreset_asserted(oh, &ohri); } /** * _are_all_hardreset_lines_asserted - return true if the @oh is hard-reset * @oh: struct omap_hwmod * * * If all hardreset lines associated with @oh are asserted, then return true. * Otherwise, if part of @oh is out hardreset or if no hardreset lines * associated with @oh are asserted, then return false. * This function is used to avoid executing some parts of the IP block * enable/disable sequence if its hardreset line is set. */ static bool _are_all_hardreset_lines_asserted(struct omap_hwmod *oh) { int i, rst_cnt = 0; if (oh->rst_lines_cnt == 0) return false; for (i = 0; i < oh->rst_lines_cnt; i++) if (_read_hardreset(oh, oh->rst_lines[i].name) > 0) rst_cnt++; if (oh->rst_lines_cnt == rst_cnt) return true; return false; } /** * _are_any_hardreset_lines_asserted - return true if any part of @oh is * hard-reset * @oh: struct omap_hwmod * * * If any hardreset lines associated with @oh are asserted, then * return true. Otherwise, if no hardreset lines associated with @oh * are asserted, or if @oh has no hardreset lines, then return false. * This function is used to avoid executing some parts of the IP block * enable/disable sequence if any hardreset line is set. */ static bool _are_any_hardreset_lines_asserted(struct omap_hwmod *oh) { int rst_cnt = 0; int i; for (i = 0; i < oh->rst_lines_cnt && rst_cnt == 0; i++) if (_read_hardreset(oh, oh->rst_lines[i].name) > 0) rst_cnt++; return (rst_cnt) ? true : false; } /** * _omap4_disable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * * * Disable the PRCM module mode related to the hwmod @oh. * Return EINVAL if the modulemode is not supported and 0 in case of success. */ static int _omap4_disable_module(struct omap_hwmod *oh) { int v; if (!oh->clkdm || !oh->prcm.omap4.modulemode || _omap4_clkctrl_managed_by_clkfwk(oh)) return -EINVAL; /* * Since integration code might still be doing something, only * disable if all lines are under hardreset. */ if (_are_any_hardreset_lines_asserted(oh)) return 0; pr_debug("omap_hwmod: %s: %s\n", oh->name, __func__); omap_cm_module_disable(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs); v = _omap4_wait_target_disable(oh); if (v) pr_warn("omap_hwmod: %s: _wait_target_disable failed\n", oh->name); return 0; } /** * _ocp_softreset - reset an omap_hwmod via the OCP_SYSCONFIG bit * @oh: struct omap_hwmod * * * Resets an omap_hwmod @oh via the OCP_SYSCONFIG bit. hwmod must be * enabled for this to work. Returns -ENOENT if the hwmod cannot be * reset this way, -EINVAL if the hwmod is in the wrong state, * -ETIMEDOUT if the module did not reset in time, or 0 upon success. * * In OMAP3 a specific SYSSTATUS register is used to get the reset status. * Starting in OMAP4, some IPs do not have SYSSTATUS registers and instead * use the SYSCONFIG softreset bit to provide the status. * * Note that some IP like McBSP do have reset control but don't have * reset status. */ static int _ocp_softreset(struct omap_hwmod *oh) { u32 v; int c = 0; int ret = 0; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -ENOENT; /* clocks must be on for this operation */ if (oh->_state != _HWMOD_STATE_ENABLED) { pr_warn("omap_hwmod: %s: reset can only be entered from enabled state\n", oh->name); return -EINVAL; } /* For some modules, all optionnal clocks need to be enabled as well */ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _enable_optional_clocks(oh); pr_debug("omap_hwmod: %s: resetting via OCP SOFTRESET\n", oh->name); v = oh->_sysc_cache; ret = _set_softreset(oh, &v); if (ret) goto dis_opt_clks; _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); c = _wait_softreset_complete(oh); if (c == MAX_MODULE_SOFTRESET_WAIT) { pr_warn("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); ret = -ETIMEDOUT; goto dis_opt_clks; } else { pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); } ret = _clear_softreset(oh, &v); if (ret) goto dis_opt_clks; _write_sysconfig(v, oh); /* * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from * _wait_target_ready() or _reset() */ dis_opt_clks: if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); return ret; } /** * _reset - reset an omap_hwmod * @oh: struct omap_hwmod * * * Resets an omap_hwmod @oh. If the module has a custom reset * function pointer defined, then call it to reset the IP block, and * pass along its return value to the caller. Otherwise, if the IP * block has an OCP_SYSCONFIG register with a SOFTRESET bitfield * associated with it, call a function to reset the IP block via that * method, and pass along the return value to the caller. Finally, if * the IP block has some hardreset lines associated with it, assert * all of those, but do _not_ deassert them. (This is because driver * authors have expressed an apparent requirement to control the * deassertion of the hardreset lines themselves.) * * The default software reset mechanism for most OMAP IP blocks is * triggered via the OCP_SYSCONFIG.SOFTRESET bit. However, some * hwmods cannot be reset via this method. Some are not targets and * therefore have no OCP header registers to access. Others (like the * IVA) have idiosyncratic reset sequences. So for these relatively * rare cases, custom reset code can be supplied in the struct * omap_hwmod_class .reset function pointer. * * _set_dmadisable() is called to set the DMADISABLE bit so that it * does not prevent idling of the system. This is necessary for cases * where ROMCODE/BOOTLOADER uses dma and transfers control to the * kernel without disabling dma. * * Passes along the return value from either _ocp_softreset() or the * custom reset function - these must return -EINVAL if the hwmod * cannot be reset this way or if the hwmod is in the wrong state, * -ETIMEDOUT if the module did not reset in time, or 0 upon success. */ static int _reset(struct omap_hwmod *oh) { int i, r; pr_debug("omap_hwmod: %s: resetting\n", oh->name); if (oh->class->reset) { r = oh->class->reset(oh); } else { if (oh->rst_lines_cnt > 0) { for (i = 0; i < oh->rst_lines_cnt; i++) _assert_hardreset(oh, oh->rst_lines[i].name); return 0; } else { r = _ocp_softreset(oh); if (r == -ENOENT) r = 0; } } _set_dmadisable(oh); /* * OCP_SYSCONFIG bits need to be reprogrammed after a * softreset. The _enable() function should be split to avoid * the rewrite of the OCP_SYSCONFIG register. */ if (oh->class->sysc) { _update_sysc_cache(oh); _enable_sysc(oh); } return r; } /** * _omap4_update_context_lost - increment hwmod context loss counter if * hwmod context was lost, and clear hardware context loss reg * @oh: hwmod to check for context loss * * If the PRCM indicates that the hwmod @oh lost context, increment * our in-memory context loss counter, and clear the RM_*_CONTEXT * bits. No return value. */ static void _omap4_update_context_lost(struct omap_hwmod *oh) { if (oh->prcm.omap4.flags & HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT) return; if (!prm_was_any_context_lost_old(oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.context_offs)) return; oh->prcm.omap4.context_lost_counter++; prm_clear_context_loss_flags_old(oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.context_offs); } /** * _omap4_get_context_lost - get context loss counter for a hwmod * @oh: hwmod to get context loss counter for * * Returns the in-memory context loss counter for a hwmod. */ static int _omap4_get_context_lost(struct omap_hwmod *oh) { return oh->prcm.omap4.context_lost_counter; } /** * _enable - enable an omap_hwmod * @oh: struct omap_hwmod * * * Enables an omap_hwmod @oh such that the MPU can access the hwmod's * register target. Returns -EINVAL if the hwmod is in the wrong * state or passes along the return value of _wait_target_ready(). */ static int _enable(struct omap_hwmod *oh) { int r; pr_debug("omap_hwmod: %s: enabling\n", oh->name); /* * hwmods with HWMOD_INIT_NO_IDLE flag set are left in enabled * state at init. */ if (oh->_int_flags & _HWMOD_SKIP_ENABLE) { oh->_int_flags &= ~_HWMOD_SKIP_ENABLE; return 0; } if (oh->_state != _HWMOD_STATE_INITIALIZED && oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_DISABLED) { WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n", oh->name); return -EINVAL; } /* * If an IP block contains HW reset lines and all of them are * asserted, we let integration code associated with that * block handle the enable. We've received very little * information on what those driver authors need, and until * detailed information is provided and the driver code is * posted to the public lists, this is probably the best we * can do. */ if (_are_all_hardreset_lines_asserted(oh)) return 0; _add_initiator_dep(oh, mpu_oh); if (oh->clkdm) { /* * A clockdomain must be in SW_SUP before enabling * completely the module. The clockdomain can be set * in HW_AUTO only when the module become ready. */ clkdm_deny_idle(oh->clkdm); r = clkdm_hwmod_enable(oh->clkdm, oh); if (r) { WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n", oh->name, oh->clkdm->name, r); return r; } } _enable_clocks(oh); if (soc_ops.enable_module) soc_ops.enable_module(oh); if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(true); if (soc_ops.update_context_lost) soc_ops.update_context_lost(oh); r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) : -EINVAL; if (oh->clkdm && !(oh->flags & HWMOD_CLKDM_NOAUTO)) clkdm_allow_idle(oh->clkdm); if (!r) { oh->_state = _HWMOD_STATE_ENABLED; /* Access the sysconfig only if the target is ready */ if (oh->class->sysc) { if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED)) _update_sysc_cache(oh); _enable_sysc(oh); } } else { if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); pr_err("omap_hwmod: %s: _wait_target_ready failed: %d\n", oh->name, r); if (oh->clkdm) clkdm_hwmod_disable(oh->clkdm, oh); } return r; } /** * _idle - idle an omap_hwmod * @oh: struct omap_hwmod * * * Idles an omap_hwmod @oh. This should be called once the hwmod has * no further work. Returns -EINVAL if the hwmod is in the wrong * state or returns 0. */ static int _idle(struct omap_hwmod *oh) { if (oh->flags & HWMOD_NO_IDLE) { oh->_int_flags |= _HWMOD_SKIP_ENABLE; return 0; } pr_debug("omap_hwmod: %s: idling\n", oh->name); if (_are_all_hardreset_lines_asserted(oh)) return 0; if (oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n", oh->name); return -EINVAL; } if (oh->class->sysc) _idle_sysc(oh); _del_initiator_dep(oh, mpu_oh); /* * If HWMOD_CLKDM_NOAUTO is set then we don't * deny idle the clkdm again since idle was already denied * in _enable() */ if (oh->clkdm && !(oh->flags & HWMOD_CLKDM_NOAUTO)) clkdm_deny_idle(oh->clkdm); if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); /* * The module must be in idle mode before disabling any parents * clocks. Otherwise, the parent clock might be disabled before * the module transition is done, and thus will prevent the * transition to complete properly. */ _disable_clocks(oh); if (oh->clkdm) { clkdm_allow_idle(oh->clkdm); clkdm_hwmod_disable(oh->clkdm, oh); } oh->_state = _HWMOD_STATE_IDLE; return 0; } /** * _shutdown - shutdown an omap_hwmod * @oh: struct omap_hwmod * * * Shut down an omap_hwmod @oh. This should be called when the driver * used for the hwmod is removed or unloaded or if the driver is not * used by the system. Returns -EINVAL if the hwmod is in the wrong * state or returns 0. */ static int _shutdown(struct omap_hwmod *oh) { int ret, i; u8 prev_state; if (_are_all_hardreset_lines_asserted(oh)) return 0; if (oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n", oh->name); return -EINVAL; } pr_debug("omap_hwmod: %s: disabling\n", oh->name); if (oh->class->pre_shutdown) { prev_state = oh->_state; if (oh->_state == _HWMOD_STATE_IDLE) _enable(oh); ret = oh->class->pre_shutdown(oh); if (ret) { if (prev_state == _HWMOD_STATE_IDLE) _idle(oh); return ret; } } if (oh->class->sysc) { if (oh->_state == _HWMOD_STATE_IDLE) _enable(oh); _shutdown_sysc(oh); } /* clocks and deps are already disabled in idle */ if (oh->_state == _HWMOD_STATE_ENABLED) { _del_initiator_dep(oh, mpu_oh); /* XXX what about the other system initiators here? dma, dsp */ if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); if (oh->clkdm) clkdm_hwmod_disable(oh->clkdm, oh); } /* XXX Should this code also force-disable the optional clocks? */ for (i = 0; i < oh->rst_lines_cnt; i++) _assert_hardreset(oh, oh->rst_lines[i].name); oh->_state = _HWMOD_STATE_DISABLED; return 0; } static int of_dev_find_hwmod(struct device_node *np, struct omap_hwmod *oh) { int count, i, res; const char *p; count = of_property_count_strings(np, "ti,hwmods"); if (count < 1) return -ENODEV; for (i = 0; i < count; i++) { res = of_property_read_string_index(np, "ti,hwmods", i, &p); if (res) continue; if (!strcmp(p, oh->name)) { pr_debug("omap_hwmod: dt %pOFn[%i] uses hwmod %s\n", np, i, oh->name); return i; } } return -ENODEV; } /** * of_dev_hwmod_lookup - look up needed hwmod from dt blob * @np: struct device_node * * @oh: struct omap_hwmod * * @index: index of the entry found * @found: struct device_node * found or NULL * * Parse the dt blob and find out needed hwmod. Recursive function is * implemented to take care hierarchical dt blob parsing. * Return: Returns 0 on success, -ENODEV when not found. */ static int of_dev_hwmod_lookup(struct device_node *np, struct omap_hwmod *oh, int *index, struct device_node **found) { struct device_node *np0 = NULL; int res; res = of_dev_find_hwmod(np, oh); if (res >= 0) { *found = np; *index = res; return 0; } for_each_child_of_node(np, np0) { struct device_node *fc; int i; res = of_dev_hwmod_lookup(np0, oh, &i, &fc); if (res == 0) { *found = fc; *index = i; of_node_put(np0); return 0; } } *found = NULL; *index = 0; return -ENODEV; } /** * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets * * @oh: struct omap_hwmod * * @np: struct device_node * * * Fix up module register offsets for modules with mpu_rt_idx. * Only needed for cpsw with interconnect target module defined * in device tree while still using legacy hwmod platform data * for rev, sysc and syss registers. * * Can be removed when all cpsw hwmod platform data has been * dropped. */ static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh, struct device_node *np, struct resource *res) { struct device_node *child = NULL; int error; child = of_get_next_child(np, child); if (!child) return; error = of_address_to_resource(child, oh->mpu_rt_idx, res); if (error) pr_err("%s: error mapping mpu_rt_idx: %i\n", __func__, error); } /** * omap_hwmod_parse_module_range - map module IO range from device tree * @oh: struct omap_hwmod * * @np: struct device_node * * * Parse the device tree range an interconnect target module provides * for it's child device IP blocks. This way we can support the old * "ti,hwmods" property with just dts data without a need for platform * data for IO resources. And we don't need all the child IP device * nodes available in the dts. */ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, struct device_node *np, struct resource *res) { struct property *prop; const char *name; int err; of_property_for_each_string(np, "compatible", prop, name) if (!strncmp("ti,sysc-", name, 8)) break; if (!name) return -ENOENT; err = of_range_to_resource(np, 0, res); if (err) return err; pr_debug("omap_hwmod: %s %pOFn at %pR\n", oh->name, np, &res); if (oh && oh->mpu_rt_idx) { omap_hwmod_fix_mpu_rt_idx(oh, np, res); return 0; } return 0; } /** * _init_mpu_rt_base - populate the virtual address for a hwmod * @oh: struct omap_hwmod * to locate the virtual address * @data: (unused, caller should pass NULL) * @index: index of the reg entry iospace in device tree * @np: struct device_node * of the IP block's device node in the DT data * * Cache the virtual address used by the MPU to access this IP block's * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * * If SYSC access is not needed, the registers will not be remapped * and non-availability of MPU access is not treated as an error. * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, int index, struct device_node *np) { void __iomem *va_start = NULL; struct resource res; int error; if (!oh) return -EINVAL; _save_mpu_port_index(oh); /* if we don't need sysc access we don't need to ioremap */ if (!oh->class->sysc) return 0; /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; if (!np) { pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; } /* Do we have a dts range for the interconnect target module? */ error = omap_hwmod_parse_module_range(oh, np, &res); if (!error) va_start = ioremap(res.start, resource_size(&res)); /* No ranges, rely on device reg entry */ if (!va_start) va_start = of_iomap(np, index + oh->mpu_rt_idx); if (!va_start) { pr_err("omap_hwmod: %s: Missing dt reg%i for %pOF\n", oh->name, index, np); return -ENXIO; } pr_debug("omap_hwmod: %s: MPU register target at va %p\n", oh->name, va_start); oh->_mpu_rt_va = va_start; return 0; } static void __init parse_module_flags(struct omap_hwmod *oh, struct device_node *np) { if (of_property_read_bool(np, "ti,no-reset-on-init")) oh->flags |= HWMOD_INIT_NO_RESET; if (of_property_read_bool(np, "ti,no-idle-on-init")) oh->flags |= HWMOD_INIT_NO_IDLE; if (of_property_read_bool(np, "ti,no-idle")) oh->flags |= HWMOD_NO_IDLE; } /** * _init - initialize internal data for the hwmod @oh * @oh: struct omap_hwmod * * @n: (unused) * * Look up the clocks and the address space used by the MPU to access * registers belonging to the hwmod @oh. @oh must already be * registered at this point. This is the first of two phases for * hwmod initialization. Code called here does not touch any hardware * registers, it simply prepares internal data structures. Returns 0 * upon success or if the hwmod isn't registered or if the hwmod's * address space is not defined, or -EINVAL upon failure. */ static int __init _init(struct omap_hwmod *oh, void *data) { int r, index; struct device_node *np = NULL; struct device_node *bus; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; bus = of_find_node_by_name(NULL, "ocp"); if (!bus) return -ENODEV; r = of_dev_hwmod_lookup(bus, oh, &index, &np); if (r) pr_debug("omap_hwmod: %s missing dt data\n", oh->name); else if (np && index) pr_warn("omap_hwmod: %s using broken dt data from %pOFn\n", oh->name, np); r = _init_mpu_rt_base(oh, NULL, index, np); if (r < 0) { WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", oh->name); return 0; } r = _init_clocks(oh, np); if (r < 0) { WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name); return -EINVAL; } if (np) { struct device_node *child; parse_module_flags(oh, np); child = of_get_next_child(np, NULL); if (child) parse_module_flags(oh, child); } oh->_state = _HWMOD_STATE_INITIALIZED; return 0; } /** * _setup_iclk_autoidle - configure an IP block's interface clocks * @oh: struct omap_hwmod * * * Set up the module's interface clocks. XXX This function is still mostly * a stub; implementing this properly requires iclk autoidle usecounting in * the clock code. No return value. */ static void _setup_iclk_autoidle(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; if (oh->_state != _HWMOD_STATE_INITIALIZED) return; list_for_each_entry(os, &oh->slave_ports, node) { if (!os->_clk) continue; if (os->flags & OCPIF_SWSUP_IDLE) { /* * we might have multiple users of one iclk with * different requirements, disable autoidle when * the module is enabled, e.g. dss iclk */ } else { /* we are enabling autoidle afterwards anyways */ clk_enable(os->_clk); } } return; } /** * _setup_reset - reset an IP block during the setup process * @oh: struct omap_hwmod * * * Reset the IP block corresponding to the hwmod @oh during the setup * process. The IP block is first enabled so it can be successfully * reset. Returns 0 upon success or a negative error code upon * failure. */ static int _setup_reset(struct omap_hwmod *oh) { int r = 0; if (oh->_state != _HWMOD_STATE_INITIALIZED) return -EINVAL; if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK) return -EPERM; if (oh->rst_lines_cnt == 0) { r = _enable(oh); if (r) { pr_warn("omap_hwmod: %s: cannot be enabled for reset (%d)\n", oh->name, oh->_state); return -EINVAL; } } if (!(oh->flags & HWMOD_INIT_NO_RESET)) r = _reset(oh); return r; } /** * _setup_postsetup - transition to the appropriate state after _setup * @oh: struct omap_hwmod * * * Place an IP block represented by @oh into a "post-setup" state -- * either IDLE, ENABLED, or DISABLED. ("post-setup" simply means that * this function is called at the end of _setup().) The postsetup * state for an IP block can be changed by calling * omap_hwmod_enter_postsetup_state() early in the boot process, * before one of the omap_hwmod_setup*() functions are called for the * IP block. * * The IP block stays in this state until a PM runtime-based driver is * loaded for that IP block. A post-setup state of IDLE is * appropriate for almost all IP blocks with runtime PM-enabled * drivers, since those drivers are able to enable the IP block. A * post-setup state of ENABLED is appropriate for kernels with PM * runtime disabled. The DISABLED state is appropriate for unusual IP * blocks such as the MPU WDTIMER on kernels without WDTIMER drivers * included, since the WDTIMER starts running on reset and will reset * the MPU if left active. * * This post-setup mechanism is deprecated. Once all of the OMAP * drivers have been converted to use PM runtime, and all of the IP * block data and interconnect data is available to the hwmod code, it * should be possible to replace this mechanism with a "lazy reset" * arrangement. In a "lazy reset" setup, each IP block is enabled * when the driver first probes, then all remaining IP blocks without * drivers are either shut down or enabled after the drivers have * loaded. However, this cannot take place until the above * preconditions have been met, since otherwise the late reset code * has no way of knowing which IP blocks are in use by drivers, and * which ones are unused. * * No return value. */ static void _setup_postsetup(struct omap_hwmod *oh) { u8 postsetup_state; if (oh->rst_lines_cnt > 0) return; postsetup_state = oh->_postsetup_state; if (postsetup_state == _HWMOD_STATE_UNKNOWN) postsetup_state = _HWMOD_STATE_ENABLED; /* * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data - * it should be set by the core code as a runtime flag during startup */ if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) && (postsetup_state == _HWMOD_STATE_IDLE)) { oh->_int_flags |= _HWMOD_SKIP_ENABLE; postsetup_state = _HWMOD_STATE_ENABLED; } if (postsetup_state == _HWMOD_STATE_IDLE) _idle(oh); else if (postsetup_state == _HWMOD_STATE_DISABLED) _shutdown(oh); else if (postsetup_state != _HWMOD_STATE_ENABLED) WARN(1, "hwmod: %s: unknown postsetup state %d! defaulting to enabled\n", oh->name, postsetup_state); return; } /** * _setup - prepare IP block hardware for use * @oh: struct omap_hwmod * * @n: (unused, pass NULL) * * Configure the IP block represented by @oh. This may include * enabling the IP block, resetting it, and placing it into a * post-setup state, depending on the type of IP block and applicable * flags. IP blocks are reset to prevent any previous configuration * by the bootloader or previous operating system from interfering * with power management or other parts of the system. The reset can * be avoided; see omap_hwmod_no_setup_reset(). This is the second of * two phases for hwmod initialization. Code called here generally * affects the IP block hardware, or system integration hardware * associated with the IP block. Returns 0. */ static int _setup(struct omap_hwmod *oh, void *data) { if (oh->_state != _HWMOD_STATE_INITIALIZED) return 0; if (oh->parent_hwmod) { int r; r = _enable(oh->parent_hwmod); WARN(r, "hwmod: %s: setup: failed to enable parent hwmod %s\n", oh->name, oh->parent_hwmod->name); } _setup_iclk_autoidle(oh); if (!_setup_reset(oh)) _setup_postsetup(oh); if (oh->parent_hwmod) { u8 postsetup_state; postsetup_state = oh->parent_hwmod->_postsetup_state; if (postsetup_state == _HWMOD_STATE_IDLE) _idle(oh->parent_hwmod); else if (postsetup_state == _HWMOD_STATE_DISABLED) _shutdown(oh->parent_hwmod); else if (postsetup_state != _HWMOD_STATE_ENABLED) WARN(1, "hwmod: %s: unknown postsetup state %d! defaulting to enabled\n", oh->parent_hwmod->name, postsetup_state); } return 0; } /** * _register - register a struct omap_hwmod * @oh: struct omap_hwmod * * * Registers the omap_hwmod @oh. Returns -EEXIST if an omap_hwmod * already has been registered by the same name; -EINVAL if the * omap_hwmod is in the wrong state, if @oh is NULL, if the * omap_hwmod's class field is NULL; if the omap_hwmod is missing a * name, or if the omap_hwmod's class is missing a name; or 0 upon * success. * * XXX The data should be copied into bootmem, so the original data * should be marked __initdata and freed after init. This would allow * unneeded omap_hwmods to be freed on multi-OMAP configurations. Note * that the copy process would be relatively complex due to the large number * of substructures. */ static int _register(struct omap_hwmod *oh) { if (!oh || !oh->name || !oh->class || !oh->class->name || (oh->_state != _HWMOD_STATE_UNKNOWN)) return -EINVAL; pr_debug("omap_hwmod: %s: registering\n", oh->name); if (_lookup(oh->name)) return -EEXIST; list_add_tail(&oh->node, &omap_hwmod_list); INIT_LIST_HEAD(&oh->slave_ports); spin_lock_init(&oh->_lock); lockdep_set_class(&oh->_lock, &oh->hwmod_key); oh->_state = _HWMOD_STATE_REGISTERED; /* * XXX Rather than doing a strcmp(), this should test a flag * set in the hwmod data, inserted by the autogenerator code. */ if (!strcmp(oh->name, MPU_INITIATOR_NAME)) mpu_oh = oh; return 0; } /** * _add_link - add an interconnect between two IP blocks * @oi: pointer to a struct omap_hwmod_ocp_if record * * Add struct omap_hwmod_link records connecting the slave IP block * specified in @oi->slave to @oi. This code is assumed to run before * preemption or SMP has been enabled, thus avoiding the need for * locking in this code. Changes to this assumption will require * additional locking. Returns 0. */ static int _add_link(struct omap_hwmod_ocp_if *oi) { pr_debug("omap_hwmod: %s -> %s: adding link\n", oi->master->name, oi->slave->name); list_add(&oi->node, &oi->slave->slave_ports); oi->slave->slaves_cnt++; return 0; } /** * _register_link - register a struct omap_hwmod_ocp_if * @oi: struct omap_hwmod_ocp_if * * * Registers the omap_hwmod_ocp_if record @oi. Returns -EEXIST if it * has already been registered; -EINVAL if @oi is NULL or if the * record pointed to by @oi is missing required fields; or 0 upon * success. * * XXX The data should be copied into bootmem, so the original data * should be marked __initdata and freed after init. This would allow * unneeded omap_hwmods to be freed on multi-OMAP configurations. */ static int __init _register_link(struct omap_hwmod_ocp_if *oi) { if (!oi || !oi->master || !oi->slave || !oi->user) return -EINVAL; if (oi->_int_flags & _OCPIF_INT_FLAGS_REGISTERED) return -EEXIST; pr_debug("omap_hwmod: registering link from %s to %s\n", oi->master->name, oi->slave->name); /* * Register the connected hwmods, if they haven't been * registered already */ if (oi->master->_state != _HWMOD_STATE_REGISTERED) _register(oi->master); if (oi->slave->_state != _HWMOD_STATE_REGISTERED) _register(oi->slave); _add_link(oi); oi->_int_flags |= _OCPIF_INT_FLAGS_REGISTERED; return 0; } /* Static functions intended only for use in soc_ops field function pointers */ /** * _omap2xxx_3xxx_wait_target_ready - wait for a module to leave slave idle * @oh: struct omap_hwmod * * * Wait for a module @oh to leave slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully leaves * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_ready() function. */ static int _omap2xxx_3xxx_wait_target_ready(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->flags & HWMOD_NO_IDLEST) return 0; if (!_find_mpu_rt_port(oh)) return 0; /* XXX check module SIDLEMODE, hardreset status, enabled clocks */ return omap_cm_wait_module_ready(0, oh->prcm.omap2.module_offs, oh->prcm.omap2.idlest_reg_id, oh->prcm.omap2.idlest_idle_bit); } /** * _omap4_wait_target_ready - wait for a module to leave slave idle * @oh: struct omap_hwmod * * * Wait for a module @oh to leave slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully leaves * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_ready() function. */ static int _omap4_wait_target_ready(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->flags & HWMOD_NO_IDLEST || !oh->clkdm) return 0; if (!_find_mpu_rt_port(oh)) return 0; if (_omap4_clkctrl_managed_by_clkfwk(oh)) return 0; if (!_omap4_has_clkctrl_clock(oh)) return 0; /* XXX check module SIDLEMODE, hardreset status */ return omap_cm_wait_module_ready(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs, 0); } /** * _omap2_assert_hardreset - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to assert hardreset * @ohri: hardreset line data * * Call omap2_prm_assert_hardreset() with parameters extracted from * the hwmod @oh and the hardreset line data @ohri. Only intended for * use as an soc_ops function pointer. Passes along the return value * from omap2_prm_assert_hardreset(). XXX This function is scheduled * for removal when the PRM code is moved into drivers/. */ static int _omap2_assert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_assert_hardreset(ohri->rst_shift, 0, oh->prcm.omap2.module_offs, 0); } /** * _omap2_deassert_hardreset - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call omap2_prm_deassert_hardreset() with parameters extracted from * the hwmod @oh and the hardreset line data @ohri. Only intended for * use as an soc_ops function pointer. Passes along the return value * from omap2_prm_deassert_hardreset(). XXX This function is * scheduled for removal when the PRM code is moved into drivers/. */ static int _omap2_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0, oh->prcm.omap2.module_offs, 0, 0); } /** * _omap2_is_hardreset_asserted - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to test hardreset * @ohri: hardreset line data * * Call omap2_prm_is_hardreset_asserted() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap2_prm_is_hardreset_asserted(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap2_is_hardreset_asserted(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_is_hardreset_asserted(ohri->st_shift, 0, oh->prcm.omap2.module_offs, 0); } /** * _omap4_assert_hardreset - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to assert hardreset * @ohri: hardreset line data * * Call omap4_prminst_assert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap4_prminst_assert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap4_assert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; return omap_prm_assert_hardreset(ohri->rst_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs); } /** * _omap4_deassert_hardreset - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call omap4_prminst_deassert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap4_prminst_deassert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap4_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; if (ohri->st_shift) pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n", oh->name, ohri->name); return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs, oh->prcm.omap4.rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET); } /** * _omap4_is_hardreset_asserted - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to test hardreset * @ohri: hardreset line data * * Call omap4_prminst_is_hardreset_asserted() with parameters * extracted from the hwmod @oh and the hardreset line data @ohri. * Only intended for use as an soc_ops function pointer. Passes along * the return value from omap4_prminst_is_hardreset_asserted(). XXX * This function is scheduled for removal when the PRM code is moved * into drivers/. */ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; return omap_prm_is_hardreset_asserted(ohri->rst_shift, oh->clkdm->pwrdm.ptr-> prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs); } /** * _omap4_disable_direct_prcm - disable direct PRCM control for hwmod * @oh: struct omap_hwmod * to disable control for * * Disables direct PRCM clkctrl done by hwmod core. Instead, the hwmod * will be using its main_clk to enable/disable the module. Returns * 0 if successful. */ static int _omap4_disable_direct_prcm(struct omap_hwmod *oh) { if (!oh) return -EINVAL; oh->prcm.omap4.flags |= HWMOD_OMAP4_CLKFWK_CLKCTR_CLOCK; return 0; } /** * _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call am33xx_prminst_deassert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from am33xx_prminst_deassert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _am33xx_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs, oh->prcm.omap4.rstst_offs); } /* Public functions */ u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs) { if (oh->flags & HWMOD_16BIT_REG) return readw_relaxed(oh->_mpu_rt_va + reg_offs); else return readl_relaxed(oh->_mpu_rt_va + reg_offs); } void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs) { if (oh->flags & HWMOD_16BIT_REG) writew_relaxed(v, oh->_mpu_rt_va + reg_offs); else writel_relaxed(v, oh->_mpu_rt_va + reg_offs); } /** * omap_hwmod_softreset - reset a module via SYSCONFIG.SOFTRESET bit * @oh: struct omap_hwmod * * * This is a public function exposed to drivers. Some drivers may need to do * some settings before and after resetting the device. Those drivers after * doing the necessary settings could use this function to start a reset by * setting the SYSCONFIG.SOFTRESET bit. */ int omap_hwmod_softreset(struct omap_hwmod *oh) { u32 v; int ret; if (!oh || !(oh->_sysc_cache)) return -EINVAL; v = oh->_sysc_cache; ret = _set_softreset(oh, &v); if (ret) goto error; _write_sysconfig(v, oh); ret = _clear_softreset(oh, &v); if (ret) goto error; _write_sysconfig(v, oh); error: return ret; } /** * omap_hwmod_lookup - look up a registered omap_hwmod by name * @name: name of the omap_hwmod to look up * * Given a @name of an omap_hwmod, return a pointer to the registered * struct omap_hwmod *, or NULL upon error. */ struct omap_hwmod *omap_hwmod_lookup(const char *name) { struct omap_hwmod *oh; if (!name) return NULL; oh = _lookup(name); return oh; } /** * omap_hwmod_for_each - call function for each registered omap_hwmod * @fn: pointer to a callback function * @data: void * data to pass to callback function * * Call @fn for each registered omap_hwmod, passing @data to each * function. @fn must return 0 for success or any other value for * failure. If @fn returns non-zero, the iteration across omap_hwmods * will stop and the non-zero return value will be passed to the * caller of omap_hwmod_for_each(). @fn is called with * omap_hwmod_for_each() held. */ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data), void *data) { struct omap_hwmod *temp_oh; int ret = 0; if (!fn) return -EINVAL; list_for_each_entry(temp_oh, &omap_hwmod_list, node) { ret = (*fn)(temp_oh, data); if (ret) break; } return ret; } /** * omap_hwmod_register_links - register an array of hwmod links * @ois: pointer to an array of omap_hwmod_ocp_if to register * * Intended to be called early in boot before the clock framework is * initialized. If @ois is not null, will register all omap_hwmods * listed in @ois that are valid for this chip. Returns -EINVAL if * omap_hwmod_init() hasn't been called before calling this function, * -ENOMEM if the link memory area can't be allocated, or 0 upon * success. */ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois) { int r, i; if (!inited) return -EINVAL; if (!ois) return 0; if (ois[0] == NULL) /* Empty list */ return 0; i = 0; do { r = _register_link(ois[i]); WARN(r && r != -EEXIST, "omap_hwmod: _register_link(%s -> %s) returned %d\n", ois[i]->master->name, ois[i]->slave->name, r); } while (ois[++i]); return 0; } static int __init omap_hwmod_setup_one(const char *oh_name); /** * _ensure_mpu_hwmod_is_setup - ensure the MPU SS hwmod is init'ed and set up * @oh: pointer to the hwmod currently being set up (usually not the MPU) * * If the hwmod data corresponding to the MPU subsystem IP block * hasn't been initialized and set up yet, do so now. This must be * done first since sleep dependencies may be added from other hwmods * to the MPU. Intended to be called only by omap_hwmod_setup*(). No * return value. */ static void __init _ensure_mpu_hwmod_is_setup(struct omap_hwmod *oh) { if (!mpu_oh || mpu_oh->_state == _HWMOD_STATE_UNKNOWN) pr_err("omap_hwmod: %s: MPU initiator hwmod %s not yet registered\n", __func__, MPU_INITIATOR_NAME); else if (mpu_oh->_state == _HWMOD_STATE_REGISTERED && oh != mpu_oh) omap_hwmod_setup_one(MPU_INITIATOR_NAME); } /** * omap_hwmod_setup_one - set up a single hwmod * @oh_name: const char * name of the already-registered hwmod to set up * * Initialize and set up a single hwmod. Intended to be used for a * small number of early devices, such as the timer IP blocks used for * the scheduler clock. Must be called after omap2_clk_init(). * Resolves the struct clk names to struct clk pointers for each * registered omap_hwmod. Also calls _setup() on each hwmod. Returns * -EINVAL upon error or 0 upon success. */ static int __init omap_hwmod_setup_one(const char *oh_name) { struct omap_hwmod *oh; pr_debug("omap_hwmod: %s: %s\n", oh_name, __func__); oh = _lookup(oh_name); if (!oh) { WARN(1, "omap_hwmod: %s: hwmod not yet registered\n", oh_name); return -EINVAL; } _ensure_mpu_hwmod_is_setup(oh); _init(oh, NULL); _setup(oh, NULL); return 0; } static void omap_hwmod_check_one(struct device *dev, const char *name, s8 v1, u8 v2) { if (v1 < 0) return; if (v1 != v2) dev_warn(dev, "%s %d != %d\n", name, v1, v2); } /** * omap_hwmod_check_sysc - check sysc against platform sysc * @dev: struct device * @data: module data * @sysc_fields: new sysc configuration */ static int omap_hwmod_check_sysc(struct device *dev, const struct ti_sysc_module_data *data, struct sysc_regbits *sysc_fields) { const struct sysc_regbits *regbits = data->cap->regbits; omap_hwmod_check_one(dev, "dmadisable_shift", regbits->dmadisable_shift, sysc_fields->dmadisable_shift); omap_hwmod_check_one(dev, "midle_shift", regbits->midle_shift, sysc_fields->midle_shift); omap_hwmod_check_one(dev, "sidle_shift", regbits->sidle_shift, sysc_fields->sidle_shift); omap_hwmod_check_one(dev, "clkact_shift", regbits->clkact_shift, sysc_fields->clkact_shift); omap_hwmod_check_one(dev, "enwkup_shift", regbits->enwkup_shift, sysc_fields->enwkup_shift); omap_hwmod_check_one(dev, "srst_shift", regbits->srst_shift, sysc_fields->srst_shift); omap_hwmod_check_one(dev, "autoidle_shift", regbits->autoidle_shift, sysc_fields->autoidle_shift); return 0; } /** * omap_hwmod_init_regbits - init sysconfig specific register bits * @dev: struct device * @oh: module * @data: module data * @sysc_fields: new sysc configuration */ static int omap_hwmod_init_regbits(struct device *dev, struct omap_hwmod *oh, const struct ti_sysc_module_data *data, struct sysc_regbits **sysc_fields) { switch (data->cap->type) { case TI_SYSC_OMAP2: case TI_SYSC_OMAP2_TIMER: *sysc_fields = &omap_hwmod_sysc_type1; break; case TI_SYSC_OMAP3_SHAM: *sysc_fields = &omap3_sham_sysc_fields; break; case TI_SYSC_OMAP3_AES: *sysc_fields = &omap3xxx_aes_sysc_fields; break; case TI_SYSC_OMAP4: case TI_SYSC_OMAP4_TIMER: *sysc_fields = &omap_hwmod_sysc_type2; break; case TI_SYSC_OMAP4_SIMPLE: *sysc_fields = &omap_hwmod_sysc_type3; break; case TI_SYSC_OMAP34XX_SR: *sysc_fields = &omap34xx_sr_sysc_fields; break; case TI_SYSC_OMAP36XX_SR: *sysc_fields = &omap36xx_sr_sysc_fields; break; case TI_SYSC_OMAP4_SR: *sysc_fields = &omap36xx_sr_sysc_fields; break; case TI_SYSC_OMAP4_MCASP: *sysc_fields = &omap_hwmod_sysc_type_mcasp; break; case TI_SYSC_OMAP4_USB_HOST_FS: *sysc_fields = &omap_hwmod_sysc_type_usb_host_fs; break; default: *sysc_fields = NULL; if (!oh->class->sysc->sysc_fields) return 0; dev_err(dev, "sysc_fields not found\n"); return -EINVAL; } return omap_hwmod_check_sysc(dev, data, *sysc_fields); } /** * omap_hwmod_init_reg_offs - initialize sysconfig register offsets * @dev: struct device * @data: module data * @rev_offs: revision register offset * @sysc_offs: sysc register offset * @syss_offs: syss register offset */ static int omap_hwmod_init_reg_offs(struct device *dev, const struct ti_sysc_module_data *data, s32 *rev_offs, s32 *sysc_offs, s32 *syss_offs) { *rev_offs = -ENODEV; *sysc_offs = 0; *syss_offs = 0; if (data->offsets[SYSC_REVISION] >= 0) *rev_offs = data->offsets[SYSC_REVISION]; if (data->offsets[SYSC_SYSCONFIG] >= 0) *sysc_offs = data->offsets[SYSC_SYSCONFIG]; if (data->offsets[SYSC_SYSSTATUS] >= 0) *syss_offs = data->offsets[SYSC_SYSSTATUS]; return 0; } /** * omap_hwmod_init_sysc_flags - initialize sysconfig features * @dev: struct device * @data: module data * @sysc_flags: module configuration */ static int omap_hwmod_init_sysc_flags(struct device *dev, const struct ti_sysc_module_data *data, u32 *sysc_flags) { *sysc_flags = 0; switch (data->cap->type) { case TI_SYSC_OMAP2: case TI_SYSC_OMAP2_TIMER: /* See SYSC_OMAP2_* in include/dt-bindings/bus/ti-sysc.h */ if (data->cfg->sysc_val & SYSC_OMAP2_CLOCKACTIVITY) *sysc_flags |= SYSC_HAS_CLOCKACTIVITY; if (data->cfg->sysc_val & SYSC_OMAP2_EMUFREE) *sysc_flags |= SYSC_HAS_EMUFREE; if (data->cfg->sysc_val & SYSC_OMAP2_ENAWAKEUP) *sysc_flags |= SYSC_HAS_ENAWAKEUP; if (data->cfg->sysc_val & SYSC_OMAP2_SOFTRESET) *sysc_flags |= SYSC_HAS_SOFTRESET; if (data->cfg->sysc_val & SYSC_OMAP2_AUTOIDLE) *sysc_flags |= SYSC_HAS_AUTOIDLE; break; case TI_SYSC_OMAP4: case TI_SYSC_OMAP4_TIMER: /* See SYSC_OMAP4_* in include/dt-bindings/bus/ti-sysc.h */ if (data->cfg->sysc_val & SYSC_OMAP4_DMADISABLE) *sysc_flags |= SYSC_HAS_DMADISABLE; if (data->cfg->sysc_val & SYSC_OMAP4_FREEEMU) *sysc_flags |= SYSC_HAS_EMUFREE; if (data->cfg->sysc_val & SYSC_OMAP4_SOFTRESET) *sysc_flags |= SYSC_HAS_SOFTRESET; break; case TI_SYSC_OMAP34XX_SR: case TI_SYSC_OMAP36XX_SR: /* See SYSC_OMAP3_SR_* in include/dt-bindings/bus/ti-sysc.h */ if (data->cfg->sysc_val & SYSC_OMAP3_SR_ENAWAKEUP) *sysc_flags |= SYSC_HAS_ENAWAKEUP; break; default: if (data->cap->regbits->emufree_shift >= 0) *sysc_flags |= SYSC_HAS_EMUFREE; if (data->cap->regbits->enwkup_shift >= 0) *sysc_flags |= SYSC_HAS_ENAWAKEUP; if (data->cap->regbits->srst_shift >= 0) *sysc_flags |= SYSC_HAS_SOFTRESET; if (data->cap->regbits->autoidle_shift >= 0) *sysc_flags |= SYSC_HAS_AUTOIDLE; break; } if (data->cap->regbits->midle_shift >= 0 && data->cfg->midlemodes) *sysc_flags |= SYSC_HAS_MIDLEMODE; if (data->cap->regbits->sidle_shift >= 0 && data->cfg->sidlemodes) *sysc_flags |= SYSC_HAS_SIDLEMODE; if (data->cfg->quirks & SYSC_QUIRK_UNCACHED) *sysc_flags |= SYSC_NO_CACHE; if (data->cfg->quirks & SYSC_QUIRK_RESET_STATUS) *sysc_flags |= SYSC_HAS_RESET_STATUS; if (data->cfg->syss_mask & 1) *sysc_flags |= SYSS_HAS_RESET_STATUS; return 0; } /** * omap_hwmod_init_idlemodes - initialize module idle modes * @dev: struct device * @data: module data * @idlemodes: module supported idle modes */ static int omap_hwmod_init_idlemodes(struct device *dev, const struct ti_sysc_module_data *data, u32 *idlemodes) { *idlemodes = 0; if (data->cfg->midlemodes & BIT(SYSC_IDLE_FORCE)) *idlemodes |= MSTANDBY_FORCE; if (data->cfg->midlemodes & BIT(SYSC_IDLE_NO)) *idlemodes |= MSTANDBY_NO; if (data->cfg->midlemodes & BIT(SYSC_IDLE_SMART)) *idlemodes |= MSTANDBY_SMART; if (data->cfg->midlemodes & BIT(SYSC_IDLE_SMART_WKUP)) *idlemodes |= MSTANDBY_SMART_WKUP; if (data->cfg->sidlemodes & BIT(SYSC_IDLE_FORCE)) *idlemodes |= SIDLE_FORCE; if (data->cfg->sidlemodes & BIT(SYSC_IDLE_NO)) *idlemodes |= SIDLE_NO; if (data->cfg->sidlemodes & BIT(SYSC_IDLE_SMART)) *idlemodes |= SIDLE_SMART; if (data->cfg->sidlemodes & BIT(SYSC_IDLE_SMART_WKUP)) *idlemodes |= SIDLE_SMART_WKUP; return 0; } /** * omap_hwmod_check_module - check new module against platform data * @dev: struct device * @oh: module * @data: new module data * @sysc_fields: sysc register bits * @rev_offs: revision register offset * @sysc_offs: sysconfig register offset * @syss_offs: sysstatus register offset * @sysc_flags: sysc specific flags * @idlemodes: sysc supported idlemodes */ static int omap_hwmod_check_module(struct device *dev, struct omap_hwmod *oh, const struct ti_sysc_module_data *data, struct sysc_regbits *sysc_fields, s32 rev_offs, s32 sysc_offs, s32 syss_offs, u32 sysc_flags, u32 idlemodes) { if (!oh->class->sysc) return -ENODEV; if (oh->class->sysc->sysc_fields && sysc_fields != oh->class->sysc->sysc_fields) dev_warn(dev, "sysc_fields mismatch\n"); if (rev_offs != oh->class->sysc->rev_offs) dev_warn(dev, "rev_offs %08x != %08x\n", rev_offs, oh->class->sysc->rev_offs); if (sysc_offs != oh->class->sysc->sysc_offs) dev_warn(dev, "sysc_offs %08x != %08x\n", sysc_offs, oh->class->sysc->sysc_offs); if (syss_offs != oh->class->sysc->syss_offs) dev_warn(dev, "syss_offs %08x != %08x\n", syss_offs, oh->class->sysc->syss_offs); if (sysc_flags != oh->class->sysc->sysc_flags) dev_warn(dev, "sysc_flags %08x != %08x\n", sysc_flags, oh->class->sysc->sysc_flags); if (idlemodes != oh->class->sysc->idlemodes) dev_warn(dev, "idlemodes %08x != %08x\n", idlemodes, oh->class->sysc->idlemodes); if (data->cfg->srst_udelay != oh->class->sysc->srst_udelay) dev_warn(dev, "srst_udelay %i != %i\n", data->cfg->srst_udelay, oh->class->sysc->srst_udelay); return 0; } /** * omap_hwmod_allocate_module - allocate new module * @dev: struct device * @oh: module * @sysc_fields: sysc register bits * @clockdomain: clockdomain * @rev_offs: revision register offset * @sysc_offs: sysconfig register offset * @syss_offs: sysstatus register offset * @sysc_flags: sysc specific flags * @idlemodes: sysc supported idlemodes * * Note that the allocations here cannot use devm as ti-sysc can rebind. */ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh, const struct ti_sysc_module_data *data, struct sysc_regbits *sysc_fields, struct clockdomain *clkdm, s32 rev_offs, s32 sysc_offs, s32 syss_offs, u32 sysc_flags, u32 idlemodes) { struct omap_hwmod_class_sysconfig *sysc; struct omap_hwmod_class *class = NULL; struct omap_hwmod_ocp_if *oi = NULL; void __iomem *regs = NULL; unsigned long flags; sysc = kzalloc(sizeof(*sysc), GFP_KERNEL); if (!sysc) return -ENOMEM; sysc->sysc_fields = sysc_fields; sysc->rev_offs = rev_offs; sysc->sysc_offs = sysc_offs; sysc->syss_offs = syss_offs; sysc->sysc_flags = sysc_flags; sysc->idlemodes = idlemodes; sysc->srst_udelay = data->cfg->srst_udelay; if (!oh->_mpu_rt_va) { regs = ioremap(data->module_pa, data->module_size); if (!regs) goto out_free_sysc; } /* * We may need a new oh->class as the other devices in the same class * may not yet have ioremapped their registers. */ if (oh->class->name && strcmp(oh->class->name, data->name)) { class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL); if (!class) goto out_unmap; } if (list_empty(&oh->slave_ports)) { oi = kzalloc(sizeof(*oi), GFP_KERNEL); if (!oi) goto out_free_class; /* * Note that we assume interconnect interface clocks will be * managed by the interconnect driver for OCPIF_SWSUP_IDLE case * on omap24xx and omap3. */ oi->slave = oh; oi->user = OCP_USER_MPU | OCP_USER_SDMA; } spin_lock_irqsave(&oh->_lock, flags); if (regs) oh->_mpu_rt_va = regs; if (class) oh->class = class; oh->class->sysc = sysc; if (oi) _add_link(oi); if (clkdm) oh->clkdm = clkdm; oh->_state = _HWMOD_STATE_INITIALIZED; oh->_postsetup_state = _HWMOD_STATE_DEFAULT; _setup(oh, NULL); spin_unlock_irqrestore(&oh->_lock, flags); return 0; out_free_class: kfree(class); out_unmap: iounmap(regs); out_free_sysc: kfree(sysc); return -ENOMEM; } static const struct omap_hwmod_reset omap24xx_reset_quirks[] = { { .match = "msdi", .len = 4, .reset = omap_msdi_reset, }, }; static const struct omap_hwmod_reset omap_reset_quirks[] = { { .match = "dss_core", .len = 8, .reset = omap_dss_reset, }, { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, }, { .match = "i2c", .len = 3, .reset = omap_i2c_reset, }, { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, }, }; static void omap_hwmod_init_reset_quirk(struct device *dev, struct omap_hwmod *oh, const struct ti_sysc_module_data *data, const struct omap_hwmod_reset *quirks, int quirks_sz) { const struct omap_hwmod_reset *quirk; int i; for (i = 0; i < quirks_sz; i++) { quirk = &quirks[i]; if (!strncmp(data->name, quirk->match, quirk->len)) { oh->class->reset = quirk->reset; return; } } } static void omap_hwmod_init_reset_quirks(struct device *dev, struct omap_hwmod *oh, const struct ti_sysc_module_data *data) { if (soc_is_omap24xx()) omap_hwmod_init_reset_quirk(dev, oh, data, omap24xx_reset_quirks, ARRAY_SIZE(omap24xx_reset_quirks)); omap_hwmod_init_reset_quirk(dev, oh, data, omap_reset_quirks, ARRAY_SIZE(omap_reset_quirks)); } /** * omap_hwmod_init_module - initialize new module * @dev: struct device * @data: module data * @cookie: cookie for the caller to use for later calls */ int omap_hwmod_init_module(struct device *dev, const struct ti_sysc_module_data *data, struct ti_sysc_cookie *cookie) { struct omap_hwmod *oh; struct sysc_regbits *sysc_fields; s32 rev_offs, sysc_offs, syss_offs; u32 sysc_flags, idlemodes; int error; if (!dev || !data || !data->name || !cookie) return -EINVAL; oh = _lookup(data->name); if (!oh) { oh = kzalloc(sizeof(*oh), GFP_KERNEL); if (!oh) return -ENOMEM; oh->name = data->name; oh->_state = _HWMOD_STATE_UNKNOWN; lockdep_register_key(&oh->hwmod_key); /* Unused, can be handled by PRM driver handling resets */ oh->prcm.omap4.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT; oh->class = kzalloc(sizeof(*oh->class), GFP_KERNEL); if (!oh->class) { kfree(oh); return -ENOMEM; } omap_hwmod_init_reset_quirks(dev, oh, data); oh->class->name = data->name; mutex_lock(&list_lock); error = _register(oh); mutex_unlock(&list_lock); } cookie->data = oh; error = omap_hwmod_init_regbits(dev, oh, data, &sysc_fields); if (error) return error; error = omap_hwmod_init_reg_offs(dev, data, &rev_offs, &sysc_offs, &syss_offs); if (error) return error; error = omap_hwmod_init_sysc_flags(dev, data, &sysc_flags); if (error) return error; error = omap_hwmod_init_idlemodes(dev, data, &idlemodes); if (error) return error; if (data->cfg->quirks & SYSC_QUIRK_NO_IDLE) oh->flags |= HWMOD_NO_IDLE; if (data->cfg->quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) oh->flags |= HWMOD_INIT_NO_IDLE; if (data->cfg->quirks & SYSC_QUIRK_NO_RESET_ON_INIT) oh->flags |= HWMOD_INIT_NO_RESET; if (data->cfg->quirks & SYSC_QUIRK_USE_CLOCKACT) oh->flags |= HWMOD_SET_DEFAULT_CLOCKACT; if (data->cfg->quirks & SYSC_QUIRK_SWSUP_SIDLE) oh->flags |= HWMOD_SWSUP_SIDLE; if (data->cfg->quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) oh->flags |= HWMOD_SWSUP_SIDLE_ACT; if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY) oh->flags |= HWMOD_SWSUP_MSTANDBY; if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO) oh->flags |= HWMOD_CLKDM_NOAUTO; error = omap_hwmod_check_module(dev, oh, data, sysc_fields, rev_offs, sysc_offs, syss_offs, sysc_flags, idlemodes); if (!error) return error; return omap_hwmod_allocate_module(dev, oh, data, sysc_fields, cookie->clkdm, rev_offs, sysc_offs, syss_offs, sysc_flags, idlemodes); } /** * omap_hwmod_setup_earlycon_flags - set up flags for early console * * Enable DEBUG_OMAPUART_FLAGS for uart hwmod that is being used as * early concole so that hwmod core doesn't reset and keep it in idle * that specific uart. */ #ifdef CONFIG_SERIAL_EARLYCON static void __init omap_hwmod_setup_earlycon_flags(void) { struct device_node *np; struct omap_hwmod *oh; const char *uart; np = of_find_node_by_path("/chosen"); if (np) { uart = of_get_property(np, "stdout-path", NULL); if (uart) { np = of_find_node_by_path(uart); if (np) { uart = of_get_property(np, "ti,hwmods", NULL); oh = omap_hwmod_lookup(uart); if (!oh) { uart = of_get_property(np->parent, "ti,hwmods", NULL); oh = omap_hwmod_lookup(uart); } if (oh) oh->flags |= DEBUG_OMAPUART_FLAGS; } } } } #endif /** * omap_hwmod_setup_all - set up all registered IP blocks * * Initialize and set up all IP blocks registered with the hwmod code. * Must be called after omap2_clk_init(). Resolves the struct clk * names to struct clk pointers for each registered omap_hwmod. Also * calls _setup() on each hwmod. Returns 0 upon success. */ static int __init omap_hwmod_setup_all(void) { if (!inited) return 0; _ensure_mpu_hwmod_is_setup(NULL); omap_hwmod_for_each(_init, NULL); #ifdef CONFIG_SERIAL_EARLYCON omap_hwmod_setup_earlycon_flags(); #endif omap_hwmod_for_each(_setup, NULL); return 0; } omap_postcore_initcall(omap_hwmod_setup_all); /** * omap_hwmod_enable - enable an omap_hwmod * @oh: struct omap_hwmod * * * Enable an omap_hwmod @oh. Intended to be called by omap_device_enable(). * Returns -EINVAL on error or passes along the return value from _enable(). */ int omap_hwmod_enable(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _enable(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /** * omap_hwmod_idle - idle an omap_hwmod * @oh: struct omap_hwmod * * * Idle an omap_hwmod @oh. Intended to be called by omap_device_idle(). * Returns -EINVAL on error or passes along the return value from _idle(). */ int omap_hwmod_idle(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _idle(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /** * omap_hwmod_shutdown - shutdown an omap_hwmod * @oh: struct omap_hwmod * * * Shutdown an omap_hwmod @oh. Intended to be called by * omap_device_shutdown(). Returns -EINVAL on error or passes along * the return value from _shutdown(). */ int omap_hwmod_shutdown(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _shutdown(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /* * IP block data retrieval functions */ /** * omap_hwmod_get_mpu_rt_va - return the module's base address (for the MPU) * @oh: struct omap_hwmod * * * Returns the virtual address corresponding to the beginning of the * module's register target, in the address range that is intended to * be used by the MPU. Returns the virtual address upon success or NULL * upon error. */ void __iomem *omap_hwmod_get_mpu_rt_va(struct omap_hwmod *oh) { if (!oh) return NULL; if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return NULL; if (oh->_state == _HWMOD_STATE_UNKNOWN) return NULL; return oh->_mpu_rt_va; } /* * XXX what about functions for drivers to save/restore ocp_sysconfig * for context save/restore operations? */ /** * omap_hwmod_assert_hardreset - assert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to lookup and assert * * Some IP like dsp, ipu or iva contain processor that require * an HW reset line to be assert / deassert in order to enable fully * the IP. Returns -EINVAL if @oh is null or if the operation is not * yet supported on this OMAP; otherwise, passes along the return value * from _assert_hardreset(). */ int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name) { int ret; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); ret = _assert_hardreset(oh, name); spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_deassert_hardreset - deassert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to look up and deassert * * Some IP like dsp, ipu or iva contain processor that require * an HW reset line to be assert / deassert in order to enable fully * the IP. Returns -EINVAL if @oh is null or if the operation is not * yet supported on this OMAP; otherwise, passes along the return value * from _deassert_hardreset(). */ int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name) { int ret; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); ret = _deassert_hardreset(oh, name); spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_for_each_by_class - call @fn for each hwmod of class @classname * @classname: struct omap_hwmod_class name to search for * @fn: callback function pointer to call for each hwmod in class @classname * @user: arbitrary context data to pass to the callback function * * For each omap_hwmod of class @classname, call @fn. * If the callback function returns something other than * zero, the iterator is terminated, and the callback function's return * value is passed back to the caller. Returns 0 upon success, -EINVAL * if @classname or @fn are NULL, or passes back the error code from @fn. */ int omap_hwmod_for_each_by_class(const char *classname, int (*fn)(struct omap_hwmod *oh, void *user), void *user) { struct omap_hwmod *temp_oh; int ret = 0; if (!classname || !fn) return -EINVAL; pr_debug("omap_hwmod: %s: looking for modules of class %s\n", __func__, classname); list_for_each_entry(temp_oh, &omap_hwmod_list, node) { if (!strcmp(temp_oh->class->name, classname)) { pr_debug("omap_hwmod: %s: %s: calling callback fn\n", __func__, temp_oh->name); ret = (*fn)(temp_oh, user); if (ret) break; } } if (ret) pr_debug("omap_hwmod: %s: iterator terminated early: %d\n", __func__, ret); return ret; } /** * omap_hwmod_set_postsetup_state - set the post-_setup() state for this hwmod * @oh: struct omap_hwmod * * @state: state that _setup() should leave the hwmod in * * Sets the hwmod state that @oh will enter at the end of _setup() * (called by omap_hwmod_setup_*()). See also the documentation * for _setup_postsetup(), above. Returns 0 upon success or * -EINVAL if there is a problem with the arguments or if the hwmod is * in the wrong state. */ int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state) { int ret; unsigned long flags; if (!oh) return -EINVAL; if (state != _HWMOD_STATE_DISABLED && state != _HWMOD_STATE_ENABLED && state != _HWMOD_STATE_IDLE) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); if (oh->_state != _HWMOD_STATE_REGISTERED) { ret = -EINVAL; goto ohsps_unlock; } oh->_postsetup_state = state; ret = 0; ohsps_unlock: spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_init - initialize the hwmod code * * Sets up some function pointers needed by the hwmod code to operate on the * currently-booted SoC. Intended to be called once during kernel init * before any hwmods are registered. No return value. */ void __init omap_hwmod_init(void) { if (cpu_is_omap24xx()) { soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready; soc_ops.assert_hardreset = _omap2_assert_hardreset; soc_ops.deassert_hardreset = _omap2_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted; } else if (cpu_is_omap34xx()) { soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready; soc_ops.assert_hardreset = _omap2_assert_hardreset; soc_ops.deassert_hardreset = _omap2_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; } else if (cpu_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; soc_ops.assert_hardreset = _omap4_assert_hardreset; soc_ops.deassert_hardreset = _omap4_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; soc_ops.update_context_lost = _omap4_update_context_lost; soc_ops.get_context_lost = _omap4_get_context_lost; soc_ops.disable_direct_prcm = _omap4_disable_direct_prcm; soc_ops.xlate_clkctrl = _omap4_xlate_clkctrl; } else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; soc_ops.assert_hardreset = _omap4_assert_hardreset; soc_ops.deassert_hardreset = _am33xx_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; soc_ops.disable_direct_prcm = _omap4_disable_direct_prcm; soc_ops.xlate_clkctrl = _omap4_xlate_clkctrl; } else { WARN(1, "omap_hwmod: unknown SoC type\n"); } _init_clkctrl_providers(); inited = true; }
linux-master
arch/arm/mach-omap2/omap_hwmod.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/usb-tusb6010.c * * Copyright (C) 2006 Nokia Corporation */ #include <linux/err.h> #include <linux/string.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/export.h> #include <linux/platform_data/usb-omap.h> #include <linux/usb/musb.h> #include "usb-tusb6010.h" #include "gpmc.h" static u8 async_cs, sync_cs; static unsigned refclk_psec; static struct gpmc_settings tusb_async = { .wait_on_read = true, .wait_on_write = true, .device_width = GPMC_DEVWIDTH_16BIT, .mux_add_data = GPMC_MUX_AD, }; static struct gpmc_settings tusb_sync = { .burst_read = true, .burst_write = true, .sync_read = true, .sync_write = true, .wait_on_read = true, .wait_on_write = true, .burst_len = GPMC_BURST_16, .device_width = GPMC_DEVWIDTH_16BIT, .mux_add_data = GPMC_MUX_AD, }; /* NOTE: timings are from tusb 6010 datasheet Rev 1.8, 12-Sept 2006 */ static int tusb_set_async_mode(unsigned sysclk_ps) { struct gpmc_device_timings dev_t; struct gpmc_timings t; unsigned t_acsnh_advnh = sysclk_ps + 3000; memset(&dev_t, 0, sizeof(dev_t)); dev_t.t_ceasu = 8 * 1000; dev_t.t_avdasu = t_acsnh_advnh - 7000; dev_t.t_ce_avd = 1000; dev_t.t_avdp_r = t_acsnh_advnh; dev_t.t_oeasu = t_acsnh_advnh + 1000; dev_t.t_oe = 300; dev_t.t_cez_r = 7000; dev_t.t_cez_w = dev_t.t_cez_r; dev_t.t_avdp_w = t_acsnh_advnh; dev_t.t_weasu = t_acsnh_advnh + 1000; dev_t.t_wpl = 300; dev_t.cyc_aavdh_we = 1; gpmc_calc_timings(&t, &tusb_async, &dev_t); return gpmc_cs_set_timings(async_cs, &t, &tusb_async); } static int tusb_set_sync_mode(unsigned sysclk_ps) { struct gpmc_device_timings dev_t; struct gpmc_timings t; unsigned t_scsnh_advnh = sysclk_ps + 3000; memset(&dev_t, 0, sizeof(dev_t)); dev_t.clk = 11100; dev_t.t_bacc = 1000; dev_t.t_ces = 1000; dev_t.t_ceasu = 8 * 1000; dev_t.t_avdasu = t_scsnh_advnh - 7000; dev_t.t_ce_avd = 1000; dev_t.t_avdp_r = t_scsnh_advnh; dev_t.cyc_aavdh_oe = 3; dev_t.cyc_oe = 5; dev_t.t_ce_rdyz = 7000; dev_t.t_avdp_w = t_scsnh_advnh; dev_t.cyc_aavdh_we = 3; dev_t.cyc_wpl = 6; gpmc_calc_timings(&t, &tusb_sync, &dev_t); return gpmc_cs_set_timings(sync_cs, &t, &tusb_sync); } /* tusb driver calls this when it changes the chip's clocking */ static int tusb6010_platform_retime(unsigned is_refclk) { static const char error[] = KERN_ERR "tusb6010 %s retime error %d\n"; unsigned sysclk_ps; int status; if (!refclk_psec) return -ENODEV; sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60; status = tusb_set_async_mode(sysclk_ps); if (status < 0) { printk(error, "async", status); goto done; } status = tusb_set_sync_mode(sysclk_ps); if (status < 0) printk(error, "sync", status); done: return status; } static struct resource tusb_resources[] = { /* Order is significant! The start/end fields * are updated during setup.. */ { /* Asynchronous access */ .flags = IORESOURCE_MEM, }, { /* Synchronous access */ .flags = IORESOURCE_MEM, }, }; static u64 tusb_dmamask = ~(u32)0; static struct platform_device tusb_device = { .name = "musb-tusb", .id = -1, .dev = { .dma_mask = &tusb_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(tusb_resources), .resource = tusb_resources, }; /* this may be called only from board-*.c setup code */ int __init tusb6010_setup_interface(struct musb_hdrc_platform_data *data, unsigned int ps_refclk, unsigned int waitpin, unsigned int async, unsigned int sync, unsigned int dmachan) { int status; static char error[] __initdata = KERN_ERR "tusb6010 init error %d, %d\n"; /* ASYNC region, primarily for PIO */ status = gpmc_cs_request(async, SZ_16M, (unsigned long *) &tusb_resources[0].start); if (status < 0) { printk(error, 1, status); return status; } tusb_resources[0].end = tusb_resources[0].start + 0x9ff; tusb_async.wait_pin = waitpin; async_cs = async; status = gpmc_cs_program_settings(async_cs, &tusb_async); if (status < 0) return status; /* SYNC region, primarily for DMA */ status = gpmc_cs_request(sync, SZ_16M, (unsigned long *) &tusb_resources[1].start); if (status < 0) { printk(error, 2, status); return status; } tusb_resources[1].end = tusb_resources[1].start + 0x9ff; tusb_sync.wait_pin = waitpin; sync_cs = sync; status = gpmc_cs_program_settings(sync_cs, &tusb_sync); if (status < 0) return status; /* set up memory timings ... can speed them up later */ if (!ps_refclk) { printk(error, 4, status); return -ENODEV; } refclk_psec = ps_refclk; status = tusb6010_platform_retime(1); if (status < 0) { printk(error, 5, status); return status; } /* finish device setup ... */ if (!data) { printk(error, 6, status); return -ENODEV; } tusb_device.dev.platform_data = data; /* so far so good ... register the device */ status = platform_device_register(&tusb_device); if (status < 0) { printk(error, 7, status); return status; } return 0; }
linux-master
arch/arm/mach-omap2/usb-tusb6010.c
// SPDX-License-Identifier: GPL-2.0 /* * AM33XX Arch Power Management Routines * * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach */ #include <linux/cpuidle.h> #include <linux/platform_data/pm33xx.h> #include <linux/suspend.h> #include <asm/cpuidle.h> #include <asm/smp_scu.h> #include <asm/suspend.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/cpu.h> #include <linux/platform_data/gpio-omap.h> #include <linux/wkup_m3_ipc.h> #include <linux/of.h> #include <linux/rtc.h> #include "cm33xx.h" #include "common.h" #include "control.h" #include "clockdomain.h" #include "iomap.h" #include "pm.h" #include "powerdomain.h" #include "prm33xx.h" #include "soc.h" #include "sram.h" #include "omap-secure.h" static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm; static struct clockdomain *gfx_l4ls_clkdm; static void __iomem *scu_base; static int (*idle_fn)(u32 wfi_flags); struct amx3_idle_state { int wfi_flags; }; static struct amx3_idle_state *idle_states; static int am43xx_map_scu(void) { scu_base = ioremap(scu_a9_get_base(), SZ_256); if (!scu_base) return -ENOMEM; return 0; } static int am33xx_check_off_mode_enable(void) { if (enable_off_mode) pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n"); /* off mode not supported on am335x so return 0 always */ return 0; } static int am43xx_check_off_mode_enable(void) { /* * Check for am437x-gp-evm which has the right Hardware design to * support this mode reliably. */ if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode) return enable_off_mode; else if (enable_off_mode) pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n"); return 0; } static int amx3_common_init(int (*idle)(u32 wfi_flags)) { gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); per_pwrdm = pwrdm_lookup("per_pwrdm"); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm)) return -ENODEV; (void)clkdm_for_each(omap_pm_clkdms_setup, NULL); /* CEFUSE domain can be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); if (!cefuse_pwrdm) pr_err("PM: Failed to get cefuse_pwrdm\n"); else if (omap_type() != OMAP2_DEVICE_TYPE_GP) pr_info("PM: Leaving EFUSE power domain active\n"); else omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); idle_fn = idle; return 0; } static int am33xx_suspend_init(int (*idle)(u32 wfi_flags)) { gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); if (!gfx_l4ls_clkdm) { pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n"); return -ENODEV; } return amx3_common_init(idle); } static int am43xx_suspend_init(int (*idle)(u32 wfi_flags)) { int ret = 0; ret = am43xx_map_scu(); if (ret) { pr_err("PM: Could not ioremap SCU\n"); return ret; } ret = amx3_common_init(idle); return ret; } static int amx3_suspend_deinit(void) { idle_fn = NULL; return 0; } static void amx3_pre_suspend_common(void) { omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF); } static void amx3_post_suspend_common(void) { int status; /* * Because gfx_pwrdm is the only one under MPU control, * comment on transition status */ status = pwrdm_read_pwrst(gfx_pwrdm); if (status != PWRDM_POWER_OFF) pr_err("PM: GFX domain did not transition: %x\n", status); } static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long), unsigned long args) { int ret = 0; amx3_pre_suspend_common(); ret = cpu_suspend(args, fn); amx3_post_suspend_common(); /* * BUG: GFX_L4LS clock domain needs to be woken up to * ensure thet L4LS clock domain does not get stuck in * transition. If that happens L3 module does not get * disabled, thereby leading to PER power domain * transition failing */ clkdm_wakeup(gfx_l4ls_clkdm); clkdm_sleep(gfx_l4ls_clkdm); return ret; } static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long), unsigned long args) { int ret = 0; /* Suspend secure side on HS devices */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { if (optee_available) omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0); else omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); } amx3_pre_suspend_common(); scu_power_mode(scu_base, SCU_PM_POWEROFF); ret = cpu_suspend(args, fn); scu_power_mode(scu_base, SCU_PM_NORMAL); if (!am43xx_check_off_mode_enable()) amx3_post_suspend_common(); /* * Resume secure side on HS devices. * * Note that even on systems with OP-TEE available this resume call is * issued to the ROM. This is because upon waking from suspend the ROM * is restored as the secure monitor. On systems with OP-TEE ROM will * restore OP-TEE during this call. */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); return ret; } static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args) { int ret = 0; if (omap_irq_pending() || need_resched()) return ret; ret = cpu_suspend(args, fn); return ret; } static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args) { int ret = 0; if (!scu_base) return 0; scu_power_mode(scu_base, SCU_PM_DORMANT); ret = cpu_suspend(args, fn); scu_power_mode(scu_base, SCU_PM_NORMAL); return ret; } static void amx3_begin_suspend(void) { cpu_idle_poll_ctrl(true); } static void amx3_finish_suspend(void) { cpu_idle_poll_ctrl(false); } static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void) { if (soc_is_am33xx()) return &am33xx_pm_sram; else if (soc_is_am437x()) return &am43xx_pm_sram; else return NULL; } static void am43xx_save_context(void) { } static void am33xx_save_context(void) { omap_intc_save_context(); } static void am33xx_restore_context(void) { omap_intc_restore_context(); } static void am43xx_restore_context(void) { /* * HACK: restore dpll_per_clkdcoldo register contents, to avoid * breaking suspend-resume */ writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14)); } static struct am33xx_pm_platform_data am33xx_ops = { .init = am33xx_suspend_init, .deinit = amx3_suspend_deinit, .soc_suspend = am33xx_suspend, .cpu_suspend = am33xx_cpu_suspend, .begin_suspend = amx3_begin_suspend, .finish_suspend = amx3_finish_suspend, .get_sram_addrs = amx3_get_sram_addrs, .save_context = am33xx_save_context, .restore_context = am33xx_restore_context, .check_off_mode_enable = am33xx_check_off_mode_enable, }; static struct am33xx_pm_platform_data am43xx_ops = { .init = am43xx_suspend_init, .deinit = amx3_suspend_deinit, .soc_suspend = am43xx_suspend, .cpu_suspend = am43xx_cpu_suspend, .begin_suspend = amx3_begin_suspend, .finish_suspend = amx3_finish_suspend, .get_sram_addrs = amx3_get_sram_addrs, .save_context = am43xx_save_context, .restore_context = am43xx_restore_context, .check_off_mode_enable = am43xx_check_off_mode_enable, }; static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void) { if (soc_is_am33xx()) return &am33xx_ops; else if (soc_is_am437x()) return &am43xx_ops; else return NULL; } #ifdef CONFIG_SUSPEND /* * Block system suspend initially. Later on pm33xx sets up it's own * platform_suspend_ops after probe. That depends also on loaded * wkup_m3_ipc and booted am335x-pm-firmware.elf. */ static int amx3_suspend_block(suspend_state_t state) { pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n"); return -EINVAL; } static int amx3_pm_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: return 1; default: return 0; } } static const struct platform_suspend_ops amx3_blocked_pm_ops = { .begin = amx3_suspend_block, .valid = amx3_pm_valid, }; static void __init amx3_block_suspend(void) { suspend_set_ops(&amx3_blocked_pm_ops); } #else static inline void amx3_block_suspend(void) { } #endif /* CONFIG_SUSPEND */ int __init amx3_common_pm_init(void) { struct am33xx_pm_platform_data *pdata; struct platform_device_info devinfo; pdata = am33xx_pm_get_pdata(); memset(&devinfo, 0, sizeof(devinfo)); devinfo.name = "pm33xx"; devinfo.data = pdata; devinfo.size_data = sizeof(*pdata); devinfo.id = -1; platform_device_register_full(&devinfo); amx3_block_suspend(); return 0; } static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) { struct device_node *state_node; struct amx3_idle_state states[CPUIDLE_STATE_MAX]; int i; int state_count = 1; for (i = 0; ; i++) { state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); if (!state_node) break; if (!of_device_is_available(state_node)) continue; if (i == CPUIDLE_STATE_MAX) { pr_warn("%s: cpuidle states reached max possible\n", __func__); break; } states[state_count].wfi_flags = 0; if (of_property_read_bool(state_node, "ti,idle-wkup-m3")) states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 | WFI_FLAG_FLUSH_CACHE; state_count++; } idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL); if (!idle_states) return -ENOMEM; for (i = 1; i < state_count; i++) idle_states[i].wfi_flags = states[i].wfi_flags; return 0; } static int amx3_idle_enter(unsigned long index) { struct amx3_idle_state *idle_state = &idle_states[index]; if (!idle_state) return -EINVAL; if (idle_fn) idle_fn(idle_state->wfi_flags); return 0; } static struct cpuidle_ops amx3_cpuidle_ops __initdata = { .init = amx3_idle_init, .suspend = amx3_idle_enter, }; CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops); CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);
linux-master
arch/arm/mach-omap2/pm33xx-core.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2plus display device setup / initialization. * * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ * Senthilvadivu Guruswamy * Sumit Semwal */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/platform_data/omapdss.h> #include "omap_hwmod.h" #include "omap_device.h" #include "common.h" #include "soc.h" #include "iomap.h" #include "control.h" #include "display.h" #include "prm.h" #define DISPC_CONTROL 0x0040 #define DISPC_CONTROL2 0x0238 #define DISPC_CONTROL3 0x0848 #define DISPC_IRQSTATUS 0x0018 #define DSS_CONTROL 0x40 #define DSS_SDI_CONTROL 0x44 #define DSS_PLL_CONTROL 0x48 #define LCD_EN_MASK (0x1 << 0) #define DIGIT_EN_MASK (0x1 << 1) #define FRAMEDONE_IRQ_SHIFT 0 #define EVSYNC_EVEN_IRQ_SHIFT 2 #define EVSYNC_ODD_IRQ_SHIFT 3 #define FRAMEDONE2_IRQ_SHIFT 22 #define FRAMEDONE3_IRQ_SHIFT 30 #define FRAMEDONETV_IRQ_SHIFT 24 /* * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC * reset before deciding that something has gone wrong */ #define FRAMEDONE_IRQ_TIMEOUT 100 #if defined(CONFIG_FB_OMAP2) static struct platform_device omap_display_device = { .name = "omapdss", .id = -1, .dev = { .platform_data = NULL, }, }; #define OMAP4_DSIPHY_SYSCON_OFFSET 0x78 static struct regmap *omap4_dsi_mux_syscon; static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) { u32 enable_mask, enable_shift; u32 pipd_mask, pipd_shift; u32 reg; int ret; if (dsi_id == 0) { enable_mask = OMAP4_DSI1_LANEENABLE_MASK; enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; pipd_mask = OMAP4_DSI1_PIPD_MASK; pipd_shift = OMAP4_DSI1_PIPD_SHIFT; } else if (dsi_id == 1) { enable_mask = OMAP4_DSI2_LANEENABLE_MASK; enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; pipd_mask = OMAP4_DSI2_PIPD_MASK; pipd_shift = OMAP4_DSI2_PIPD_SHIFT; } else { return -ENODEV; } ret = regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg); if (ret) return ret; reg &= ~enable_mask; reg &= ~pipd_mask; reg |= (lanes << enable_shift) & enable_mask; reg |= (lanes << pipd_shift) & pipd_mask; regmap_write(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, reg); return 0; } static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) { if (cpu_is_omap44xx()) return omap4_dsi_mux_pads(dsi_id, lane_mask); return 0; } static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) { if (cpu_is_omap44xx()) omap4_dsi_mux_pads(dsi_id, 0); } static enum omapdss_version __init omap_display_get_version(void) { if (cpu_is_omap24xx()) return OMAPDSS_VER_OMAP24xx; else if (cpu_is_omap3630()) return OMAPDSS_VER_OMAP3630; else if (cpu_is_omap34xx()) { if (soc_is_am35xx()) { return OMAPDSS_VER_AM35xx; } else { if (omap_rev() < OMAP3430_REV_ES3_0) return OMAPDSS_VER_OMAP34xx_ES1; else return OMAPDSS_VER_OMAP34xx_ES3; } } else if (omap_rev() == OMAP4430_REV_ES1_0) return OMAPDSS_VER_OMAP4430_ES1; else if (omap_rev() == OMAP4430_REV_ES2_0 || omap_rev() == OMAP4430_REV_ES2_1 || omap_rev() == OMAP4430_REV_ES2_2) return OMAPDSS_VER_OMAP4430_ES2; else if (cpu_is_omap44xx()) return OMAPDSS_VER_OMAP4; else if (soc_is_omap54xx()) return OMAPDSS_VER_OMAP5; else if (soc_is_am43xx()) return OMAPDSS_VER_AM43xx; else if (soc_is_dra7xx()) return OMAPDSS_VER_DRA7xx; else return OMAPDSS_VER_UNKNOWN; } static int __init omapdss_init_fbdev(void) { static struct omap_dss_board_info board_data = { .dsi_enable_pads = omap_dsi_enable_pads, .dsi_disable_pads = omap_dsi_disable_pads, }; struct device_node *node; int r; board_data.version = omap_display_get_version(); if (board_data.version == OMAPDSS_VER_UNKNOWN) { pr_err("DSS not supported on this SoC\n"); return -ENODEV; } omap_display_device.dev.platform_data = &board_data; r = platform_device_register(&omap_display_device); if (r < 0) { pr_err("Unable to register omapdss device\n"); return r; } /* create vrfb device */ r = omap_init_vrfb(); if (r < 0) { pr_err("Unable to register omapvrfb device\n"); return r; } /* create FB device */ r = omap_init_fb(); if (r < 0) { pr_err("Unable to register omapfb device\n"); return r; } /* create V4L2 display device */ r = omap_init_vout(); if (r < 0) { pr_err("Unable to register omap_vout device\n"); return r; } /* add DSI info for omap4 */ node = of_find_node_by_name(NULL, "omap4_padconf_global"); if (node) omap4_dsi_mux_syscon = syscon_node_to_regmap(node); of_node_put(node); return 0; } static const char * const omapdss_compat_names[] __initconst = { "ti,omap2-dss", "ti,omap3-dss", "ti,omap4-dss", "ti,omap5-dss", "ti,dra7-dss", }; static struct device_node * __init omapdss_find_dss_of_node(void) { struct device_node *node; int i; for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) { node = of_find_compatible_node(NULL, NULL, omapdss_compat_names[i]); if (node) return node; } return NULL; } static int __init omapdss_init_of(void) { int r; struct device_node *node; struct platform_device *pdev; /* only create dss helper devices if dss is enabled in the .dts */ node = omapdss_find_dss_of_node(); if (!node) return 0; if (!of_device_is_available(node)) { of_node_put(node); return 0; } pdev = of_find_device_by_node(node); if (!pdev) { pr_err("Unable to find DSS platform device\n"); of_node_put(node); return -ENODEV; } r = of_platform_populate(node, NULL, NULL, &pdev->dev); put_device(&pdev->dev); of_node_put(node); if (r) { pr_err("Unable to populate DSS submodule devices\n"); return r; } return omapdss_init_fbdev(); } omap_device_initcall(omapdss_init_of); #endif /* CONFIG_FB_OMAP2 */ static void dispc_disable_outputs(void) { u32 v, irq_mask = 0; bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; int i; struct omap_dss_dispc_dev_attr *da; struct omap_hwmod *oh; oh = omap_hwmod_lookup("dss_dispc"); if (!oh) { WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n"); return; } if (!oh->dev_attr) { pr_err("display: could not disable outputs during reset due to missing dev_attr\n"); return; } da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr; /* store value of LCDENABLE and DIGITENABLE bits */ v = omap_hwmod_read(oh, DISPC_CONTROL); lcd_en = v & LCD_EN_MASK; digit_en = v & DIGIT_EN_MASK; /* store value of LCDENABLE for LCD2 */ if (da->manager_count > 2) { v = omap_hwmod_read(oh, DISPC_CONTROL2); lcd2_en = v & LCD_EN_MASK; } /* store value of LCDENABLE for LCD3 */ if (da->manager_count > 3) { v = omap_hwmod_read(oh, DISPC_CONTROL3); lcd3_en = v & LCD_EN_MASK; } if (!(lcd_en | digit_en | lcd2_en | lcd3_en)) return; /* no managers currently enabled */ /* * If any manager was enabled, we need to disable it before * DSS clocks are disabled or DISPC module is reset */ if (lcd_en) irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT; if (digit_en) { if (da->has_framedonetv_irq) { irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT; } else { irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT | 1 << EVSYNC_ODD_IRQ_SHIFT; } } if (lcd2_en) irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT; if (lcd3_en) irq_mask |= 1 << FRAMEDONE3_IRQ_SHIFT; /* * clear any previous FRAMEDONE, FRAMEDONETV, * EVSYNC_EVEN/ODD, FRAMEDONE2 or FRAMEDONE3 interrupts */ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS); /* disable LCD and TV managers */ v = omap_hwmod_read(oh, DISPC_CONTROL); v &= ~(LCD_EN_MASK | DIGIT_EN_MASK); omap_hwmod_write(v, oh, DISPC_CONTROL); /* disable LCD2 manager */ if (da->manager_count > 2) { v = omap_hwmod_read(oh, DISPC_CONTROL2); v &= ~LCD_EN_MASK; omap_hwmod_write(v, oh, DISPC_CONTROL2); } /* disable LCD3 manager */ if (da->manager_count > 3) { v = omap_hwmod_read(oh, DISPC_CONTROL3); v &= ~LCD_EN_MASK; omap_hwmod_write(v, oh, DISPC_CONTROL3); } i = 0; while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) != irq_mask) { i++; if (i > FRAMEDONE_IRQ_TIMEOUT) { pr_err("didn't get FRAMEDONE1/2/3 or TV interrupt\n"); break; } mdelay(1); } } int omap_dss_reset(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; int c = 0; int i, r; if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) { pr_err("dss_core: hwmod data doesn't contain reset data\n"); return -EINVAL; } for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) clk_prepare_enable(oc->_clk); dispc_disable_outputs(); /* clear SDI registers */ if (cpu_is_omap3430()) { omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL); omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL); } /* * clear DSS_CONTROL register to switch DSS clock sources to * PRCM clock, if any */ omap_hwmod_write(0x0, oh, DSS_CONTROL); omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warn("dss_core: waiting for reset to finish failed\n"); else pr_debug("dss_core: softreset done\n"); for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) clk_disable_unprepare(oc->_clk); r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; return r; }
linux-master
arch/arm/mach-omap2/display.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips * * Copyright (C) 2009-2011 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * The data in this file should be completely autogeneratable from * the TI hardware database or other technical documentation. * * XXX these should be marked initdata for multi-OMAP kernels */ #include <linux/platform_data/i2c-omap.h> #include <linux/power/smartreflex.h> #include <linux/platform_data/hsmmc-omap.h> #include "l3_3xxx.h" #include "l4_3xxx.h" #include "soc.h" #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" #include "prm-regbits-34xx.h" #include "cm-regbits-34xx.h" #include "i2c.h" #include "wd_timer.h" /* * OMAP3xxx hardware module integration data * * All of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ #define AM35XX_IPSS_USBOTGSS_BASE 0x5C040000 /* * IP blocks */ /* L3 */ static struct omap_hwmod omap3xxx_l3_main_hwmod = { .name = "l3_main", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 CORE */ static struct omap_hwmod omap3xxx_l4_core_hwmod = { .name = "l4_core", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 PER */ static struct omap_hwmod omap3xxx_l4_per_hwmod = { .name = "l4_per", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 WKUP */ static struct omap_hwmod omap3xxx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 SEC */ static struct omap_hwmod omap3xxx_l4_sec_hwmod = { .name = "l4_sec", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* MPU */ static struct omap_hwmod omap3xxx_mpu_hwmod = { .name = "mpu", .class = &mpu_hwmod_class, .main_clk = "arm_fck", }; /* IVA2 (IVA2) */ static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { { .name = "logic", .rst_shift = 0, .st_shift = 8 }, { .name = "seq0", .rst_shift = 1, .st_shift = 9 }, { .name = "seq1", .rst_shift = 2, .st_shift = 10 }, }; static struct omap_hwmod omap3xxx_iva_hwmod = { .name = "iva", .class = &iva_hwmod_class, .clkdm_name = "iva2_clkdm", .rst_lines = omap3xxx_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), .main_clk = "iva2_ck", .prcm = { .omap2 = { .module_offs = OMAP3430_IVA2_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, }, }, }; /* * 'debugss' class * debug and emulation sub system */ static struct omap_hwmod_class omap3xxx_debugss_hwmod_class = { .name = "debugss", }; /* debugss */ static struct omap_hwmod omap3xxx_debugss_hwmod = { .name = "debugss", .class = &omap3xxx_debugss_hwmod_class, .clkdm_name = "emu_clkdm", .main_clk = "emu_src_ck", .flags = HWMOD_NO_IDLEST, }; /* timer class */ static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_timer_hwmod_class = { .name = "timer", .sysc = &omap3xxx_timer_sysc, }; /* timer3 */ static struct omap_hwmod omap3xxx_timer3_hwmod = { .name = "timer3", .main_clk = "gpt3_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer4 */ static struct omap_hwmod omap3xxx_timer4_hwmod = { .name = "timer4", .main_clk = "gpt4_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer5 */ static struct omap_hwmod omap3xxx_timer5_hwmod = { .name = "timer5", .main_clk = "gpt5_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer6 */ static struct omap_hwmod omap3xxx_timer6_hwmod = { .name = "timer6", .main_clk = "gpt6_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer7 */ static struct omap_hwmod omap3xxx_timer7_hwmod = { .name = "timer7", .main_clk = "gpt7_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer8 */ static struct omap_hwmod omap3xxx_timer8_hwmod = { .name = "timer8", .main_clk = "gpt8_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT8_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer9 */ static struct omap_hwmod omap3xxx_timer9_hwmod = { .name = "timer9", .main_clk = "gpt9_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT9_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer10 */ static struct omap_hwmod omap3xxx_timer10_hwmod = { .name = "timer10", .main_clk = "gpt10_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT10_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer11 */ static struct omap_hwmod omap3xxx_timer11_hwmod = { .name = "timer11", .main_clk = "gpt11_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT11_SHIFT, }, }, .class = &omap3xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap3xxx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x00, .sysc_offs = 0x20, .syss_offs = 0x10, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap3xxx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, .reset = &omap2_wd_timer_reset, }; static struct omap_hwmod omap3xxx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap3xxx_wd_timer_hwmod_class, .main_clk = "wdt2_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_WDT2_SHIFT, }, }, /* * XXX: Use software supervised mode, HW supervised smartidle seems to * block CORE power domain idle transitions. Maybe a HW bug in wdt2? */ .flags = HWMOD_SWSUP_SIDLE, }; /* UART1 */ static struct omap_hwmod omap3xxx_uart1_hwmod = { .name = "uart1", .main_clk = "uart1_fck", .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE, .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART1_SHIFT, }, }, .class = &omap2_uart_class, }; /* UART2 */ static struct omap_hwmod omap3xxx_uart2_hwmod = { .name = "uart2", .main_clk = "uart2_fck", .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE, .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART2_SHIFT, }, }, .class = &omap2_uart_class, }; /* UART3 */ static struct omap_hwmod omap3xxx_uart3_hwmod = { .name = "uart3", .main_clk = "uart3_fck", .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS | HWMOD_SWSUP_SIDLE, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART3_SHIFT, }, }, .class = &omap2_uart_class, }; /* UART4 */ static struct omap_hwmod omap36xx_uart4_hwmod = { .name = "uart4", .main_clk = "uart4_fck", .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3630_EN_UART4_SHIFT, }, }, .class = &omap2_uart_class, }; /* * XXX AM35xx UART4 cannot complete its softreset without uart1_fck or * uart2_fck being enabled. So we add uart1_fck as an optional clock, * below, and set the HWMOD_CONTROL_OPT_CLKS_IN_RESET. This really * should not be needed. The functional clock structure of the AM35xx * UART4 is extremely unclear and opaque; it is unclear what the role * of uart1/2_fck is for the UART4. Any clarification from either * empirical testing or the AM3505/3517 hardware designers would be * most welcome. */ static struct omap_hwmod_opt_clk am35xx_uart4_opt_clks[] = { { .role = "softreset_uart1_fck", .clk = "uart1_fck" }, }; static struct omap_hwmod am35xx_uart4_hwmod = { .name = "uart4", .main_clk = "uart4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = AM35XX_ST_UART4_SHIFT, }, }, .opt_clks = am35xx_uart4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(am35xx_uart4_opt_clks), .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .class = &omap2_uart_class, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, .reset = &omap_i2c_reset, }; /* dss */ static struct omap_hwmod_opt_clk dss_opt_clks[] = { /* * The DSS HW needs all DSS clocks enabled during reset. The dss_core * driver does not use these clocks. */ { .role = "sys_clk", .clk = "dss2_alwon_fck" }, { .role = "tv_clk", .clk = "dss_tv_fck" }, /* required only on OMAP3430 */ { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, }; static struct omap_hwmod omap3430es1_dss_core_hwmod = { .name = "dss_core", .class = &omap2_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, }; static struct omap_hwmod omap3xxx_dss_core_hwmod = { .name = "dss_core", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .class = &omap2_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), }; /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3_dispc_hwmod_class = { .name = "dispc", .sysc = &omap3_dispc_sysc, }; static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap3_dispc_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, }, }, .flags = HWMOD_NO_IDLEST, .dev_attr = &omap2_3_dss_dispc_dev_attr, }; /* * 'dsi' class * display serial interface controller */ static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { .name = "dsi", .sysc = &omap3xxx_dsi_sysc, }; /* dss_dsi1 */ static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = { { .role = "sys_clk", .clk = "dss2_alwon_fck" }, }; static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { .name = "dss_dsi1", .class = &omap3xxx_dsi_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_dsi1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { { .role = "ick", .clk = "dss_ick" }, }; static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap2_rfbi_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_rfbi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = { /* required only on OMAP3430 */ { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, }; static struct omap_hwmod omap3xxx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap2_venc_hwmod_class, .main_clk = "dss_tv_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_venc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks), .flags = HWMOD_NO_IDLEST, }; /* I2C1 */ static struct omap_hwmod omap3xxx_i2c1_hwmod = { .name = "i2c1", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .main_clk = "i2c1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C1_SHIFT, }, }, .class = &i2c_class, }; /* I2C2 */ static struct omap_hwmod omap3xxx_i2c2_hwmod = { .name = "i2c2", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .main_clk = "i2c2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C2_SHIFT, }, }, .class = &i2c_class, }; /* I2C3 */ static struct omap_hwmod omap3xxx_i2c3_hwmod = { .name = "i2c3", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .main_clk = "i2c3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C3_SHIFT, }, }, .class = &i2c_class, }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap3xxx_gpio_sysc, }; /* gpio1 */ static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "gpio1_dbck", }, }; static struct omap_hwmod omap3xxx_gpio1_hwmod = { .name = "gpio1", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio1_ick", .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO1_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* gpio2 */ static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "gpio2_dbck", }, }; static struct omap_hwmod omap3xxx_gpio2_hwmod = { .name = "gpio2", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio2_ick", .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO2_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* gpio3 */ static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "gpio3_dbck", }, }; static struct omap_hwmod omap3xxx_gpio3_hwmod = { .name = "gpio3", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio3_ick", .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO3_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* gpio4 */ static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "gpio4_dbck", }, }; static struct omap_hwmod omap3xxx_gpio4_hwmod = { .name = "gpio4", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio4_ick", .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO4_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* gpio5 */ static struct omap_hwmod_opt_clk gpio5_opt_clks[] = { { .role = "dbclk", .clk = "gpio5_dbck", }, }; static struct omap_hwmod omap3xxx_gpio5_hwmod = { .name = "gpio5", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio5_ick", .opt_clks = gpio5_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks), .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO5_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* gpio6 */ static struct omap_hwmod_opt_clk gpio6_opt_clks[] = { { .role = "dbclk", .clk = "gpio6_dbck", }, }; static struct omap_hwmod omap3xxx_gpio6_hwmod = { .name = "gpio6", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio6_ick", .opt_clks = gpio6_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks), .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO6_SHIFT, }, }, .class = &omap3xxx_gpio_hwmod_class, }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = { .rev_offs = -ENODEV, .sysc_offs = 0x008c, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap3xxx_mcbsp_sysc, }; /* McBSP functional clock mapping */ static struct omap_hwmod_opt_clk mcbsp15_opt_clks[] = { { .role = "pad_fck", .clk = "mcbsp_clks" }, { .role = "prcm_fck", .clk = "core_96m_fck" }, }; static struct omap_hwmod_opt_clk mcbsp234_opt_clks[] = { { .role = "pad_fck", .clk = "mcbsp_clks" }, { .role = "prcm_fck", .clk = "per_96m_fck" }, }; /* mcbsp1 */ static struct omap_hwmod omap3xxx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap3xxx_mcbsp_hwmod_class, .main_clk = "mcbsp1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT, }, }, .opt_clks = mcbsp15_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp15_opt_clks), }; /* mcbsp2 */ static struct omap_hwmod omap3xxx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap3xxx_mcbsp_hwmod_class, .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT, }, }, .opt_clks = mcbsp234_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks), }; /* mcbsp3 */ static struct omap_hwmod omap3xxx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap3xxx_mcbsp_hwmod_class, .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT, }, }, .opt_clks = mcbsp234_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks), }; /* mcbsp4 */ static struct omap_hwmod omap3xxx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap3xxx_mcbsp_hwmod_class, .main_clk = "mcbsp4_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT, }, }, .opt_clks = mcbsp234_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks), }; /* mcbsp5 */ static struct omap_hwmod omap3xxx_mcbsp5_hwmod = { .name = "mcbsp5", .class = &omap3xxx_mcbsp_hwmod_class, .main_clk = "mcbsp5_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT, }, }, .opt_clks = mcbsp15_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp15_opt_clks), }; /* 'mcbsp sidetone' class */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sidetone_sysc = { .rev_offs = -ENODEV, .sysc_offs = 0x0010, .sysc_flags = SYSC_HAS_AUTOIDLE, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = { .name = "mcbsp_sidetone", .sysc = &omap3xxx_mcbsp_sidetone_sysc, }; /* mcbsp2_sidetone */ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = { .name = "mcbsp2_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .main_clk = "mcbsp2_ick", .flags = HWMOD_NO_IDLEST, }; /* mcbsp3_sidetone */ static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = { .name = "mcbsp3_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .main_clk = "mcbsp3_ick", .flags = HWMOD_NO_IDLEST, }; /* SR common */ static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = { .rev_offs = -ENODEV, .sysc_offs = 0x24, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE), .sysc_fields = &omap34xx_sr_sysc_fields, }; static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap34xx_sr_sysc, }; static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = { .rev_offs = -ENODEV, .sysc_offs = 0x38, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_NO_CACHE), .sysc_fields = &omap36xx_sr_sysc_fields, }; static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap36xx_sr_sysc, }; /* SR1 */ static struct omap_smartreflex_dev_attr sr1_dev_attr = { .sensor_voltdm_name = "mpu_iva", }; static struct omap_hwmod omap34xx_sr1_hwmod = { .name = "smartreflex_mpu_iva", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .dev_attr = &sr1_dev_attr, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr1_hwmod = { .name = "smartreflex_mpu_iva", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .dev_attr = &sr1_dev_attr, }; /* SR2 */ static struct omap_smartreflex_dev_attr sr2_dev_attr = { .sensor_voltdm_name = "core", }; static struct omap_hwmod omap34xx_sr2_hwmod = { .name = "smartreflex_core", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .dev_attr = &sr2_dev_attr, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr2_hwmod = { .name = "smartreflex_core", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .dev_attr = &sr2_dev_attr, }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors * using a queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap3xxx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap3xxx_mailbox_sysc, }; static struct omap_hwmod omap3xxx_mailbox_hwmod = { .name = "mailbox", .class = &omap3xxx_mailbox_hwmod_class, .main_clk = "mailboxes_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MAILBOXES_SHIFT, }, }, }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mcspi_class = { .name = "mcspi", .sysc = &omap34xx_mcspi_sysc, }; /* mcspi1 */ static struct omap_hwmod omap34xx_mcspi1 = { .name = "mcspi1", .main_clk = "mcspi1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT, }, }, .class = &omap34xx_mcspi_class, }; /* mcspi2 */ static struct omap_hwmod omap34xx_mcspi2 = { .name = "mcspi2", .main_clk = "mcspi2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT, }, }, .class = &omap34xx_mcspi_class, }; /* mcspi3 */ static struct omap_hwmod omap34xx_mcspi3 = { .name = "mcspi3", .main_clk = "mcspi3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT, }, }, .class = &omap34xx_mcspi_class, }; /* mcspi4 */ static struct omap_hwmod omap34xx_mcspi4 = { .name = "mcspi4", .main_clk = "mcspi4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT, }, }, .class = &omap34xx_mcspi_class, }; /* MMC/SD/SDIO common */ static struct omap_hwmod_class_sysconfig omap34xx_mmc_sysc = { .rev_offs = 0x1fc, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mmc_class = { .name = "mmc", .sysc = &omap34xx_mmc_sysc, }; /* MMC/SD/SDIO1 */ static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hsmmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; /* See 35xx errata 2.1.1.128 in SPRZ278F */ static struct omap_hsmmc_dev_attr mmc1_pre_es3_dev_attr = { .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT | OMAP_HSMMC_BROKEN_MULTIBLOCK_READ), }; static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = { .name = "mmc1", .opt_clks = omap34xx_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, }, }, .dev_attr = &mmc1_pre_es3_dev_attr, .class = &omap34xx_mmc_class, }; static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = { .name = "mmc1", .opt_clks = omap34xx_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, }, }, .dev_attr = &mmc1_dev_attr, .class = &omap34xx_mmc_class, }; /* MMC/SD/SDIO2 */ static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; /* See 35xx errata 2.1.1.128 in SPRZ278F */ static struct omap_hsmmc_dev_attr mmc2_pre_es3_dev_attr = { .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, }; static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = { .name = "mmc2", .opt_clks = omap34xx_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, }, }, .dev_attr = &mmc2_pre_es3_dev_attr, .class = &omap34xx_mmc_class, }; static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = { .name = "mmc2", .opt_clks = omap34xx_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, }, }, .class = &omap34xx_mmc_class, }; /* MMC/SD/SDIO3 */ static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod omap3xxx_mmc3_hwmod = { .name = "mmc3", .opt_clks = omap34xx_mmc3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks), .main_clk = "mmchs3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC3_SHIFT, }, }, .class = &omap34xx_mmc_class, }; /* * 'usb_host_hs' class * high-speed multi-port usb host controller */ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = { .name = "usb_host_hs", .sysc = &omap3xxx_usb_host_hs_sysc, }; static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { .name = "usb_host_hs", .class = &omap3xxx_usb_host_hs_hwmod_class, .clkdm_name = "usbhost_clkdm", .main_clk = "usbhost_48m_fck", .prcm = { .omap2 = { .module_offs = OMAP3430ES2_USBHOST_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT, }, }, /* * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock * id: i660 * * Description: * In the following configuration : * - USBHOST module is set to smart-idle mode * - PRCM asserts idle_req to the USBHOST module ( This typically * happens when the system is going to a low power mode : all ports * have been suspended, the master part of the USBHOST module has * entered the standby state, and SW has cut the functional clocks) * - an USBHOST interrupt occurs before the module is able to answer * idle_ack, typically a remote wakeup IRQ. * Then the USB HOST module will enter a deadlock situation where it * is no more accessible nor functional. * * Workaround: * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE */ /* * Errata: USB host EHCI may stall when entering smart-standby mode * Id: i571 * * Description: * When the USBHOST module is set to smart-standby mode, and when it is * ready to enter the standby state (i.e. all ports are suspended and * all attached devices are in suspend mode), then it can wrongly assert * the Mstandby signal too early while there are still some residual OCP * transactions ongoing. If this condition occurs, the internal state * machine may go to an undefined state and the USB link may be stuck * upon the next resume. * * Workaround: * Don't use smart standby; use only force standby, * hence HWMOD_SWSUP_MSTANDBY */ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* * 'usb_tll_hs' class * usb_tll_hs module is the adapter on the usb_host_hs ports */ static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = { .name = "usb_tll_hs", .sysc = &omap3xxx_usb_tll_hs_sysc, }; static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = { .name = "usb_tll_hs", .class = &omap3xxx_usb_tll_hs_hwmod_class, .clkdm_name = "core_l4_clkdm", .main_clk = "usbtll_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 3, .idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT, }, }, }; static struct omap_hwmod omap3xxx_hdq1w_hwmod = { .name = "hdq1w", .main_clk = "hdq_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_HDQ_SHIFT, }, }, .class = &omap2_hdq1w_class, }; /* SAD2D */ static struct omap_hwmod_rst_info omap3xxx_sad2d_resets[] = { { .name = "rst_modem_pwron_sw", .rst_shift = 0 }, { .name = "rst_modem_sw", .rst_shift = 1 }, }; static struct omap_hwmod_class omap3xxx_sad2d_class = { .name = "sad2d", }; static struct omap_hwmod omap3xxx_sad2d_hwmod = { .name = "sad2d", .rst_lines = omap3xxx_sad2d_resets, .rst_lines_cnt = ARRAY_SIZE(omap3xxx_sad2d_resets), .main_clk = "sad2d_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_SAD2D_SHIFT, }, }, .class = &omap3xxx_sad2d_class, }; /* * 'gpmc' class * general purpose memory controller */ static struct omap_hwmod_class_sysconfig omap3xxx_gpmc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { .name = "gpmc", .sysc = &omap3xxx_gpmc_sysc, }; static struct omap_hwmod omap3xxx_gpmc_hwmod = { .name = "gpmc", .class = &omap3xxx_gpmc_hwmod_class, .clkdm_name = "core_l3_clkdm", .main_clk = "gpmc_fck", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ .flags = HWMOD_NO_IDLEST | DEBUG_OMAP_GPMC_HWMOD_FLAGS, }; /* * interfaces */ /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_core_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L3 -> L4_PER interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_per_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* MPU -> L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = { .master = &omap3xxx_mpu_hwmod, .slave = &omap3xxx_l3_main_hwmod, .user = OCP_USER_MPU, }; /* l3 -> debugss */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_debugss = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_debugss_hwmod, .user = OCP_USER_MPU, }; /* DSS -> l3 */ static struct omap_hwmod_ocp_if omap3430es1_dss__l3 = { .master = &omap3430es1_dss_core_hwmod, .slave = &omap3xxx_l3_main_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = { .master = &omap3xxx_dss_core_hwmod, .slave = &omap3xxx_l3_main_hwmod, .fw = { .omap2 = { .l3_perm_bit = OMAP3_L3_CORE_FW_INIT_ID_DSS, .flags = OMAP_FIREWALL_L3, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_core -> sad2d interface */ static struct omap_hwmod_ocp_if omap3xxx_sad2d__l3 = { .master = &omap3xxx_sad2d_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_l4_wkup_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> MMC1 interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__pre_es3_mmc1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_pre_es3_mmc1_hwmod, .clk = "mmchs1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__es3plus_mmc1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_es3plus_mmc1_hwmod, .clk = "mmchs1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4, }; /* L4 CORE -> MMC2 interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__pre_es3_mmc2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_pre_es3_mmc2_hwmod, .clk = "mmchs2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__es3plus_mmc2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_es3plus_mmc2_hwmod, .clk = "mmchs2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4, }; /* L4 CORE -> MMC3 interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc3_hwmod, .clk = "mmchs3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4, }; /* L4 CORE -> UART1 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart1_hwmod, .clk = "uart1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART2 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart2_hwmod, .clk = "uart2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART3 interface */ static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_uart3_hwmod, .clk = "uart3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART4 interface */ static struct omap_hwmod_ocp_if omap36xx_l4_per__uart4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap36xx_uart4_hwmod, .clk = "uart4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* AM35xx: L4 CORE -> UART4 interface */ static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_uart4_hwmod, .clk = "uart4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c1_hwmod, .clk = "i2c1_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C1_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C2 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c2_hwmod, .clk = "i2c2_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C2_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C3 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c3_hwmod, .clk = "i2c3_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C3_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> SR1 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr1_hwmod, .clk = "sr_l4_ick", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr1_hwmod, .clk = "sr_l4_ick", .user = OCP_USER_MPU, }; /* L4 CORE -> SR2 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr2_hwmod, .clk = "sr_l4_ick", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr2_hwmod, .clk = "sr_l4_ick", .user = OCP_USER_MPU, }; /* L4_WKUP -> L4_SEC interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__l4_sec = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_l4_sec_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* IVA2 <- L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_l3__iva = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_iva_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer3_hwmod, .clk = "gpt3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer4_hwmod, .clk = "gpt4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer5_hwmod, .clk = "gpt5_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer6 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer6_hwmod, .clk = "gpt6_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer7 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer7_hwmod, .clk = "gpt7_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer8 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer8_hwmod, .clk = "gpt8_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> timer9 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer9_hwmod, .clk = "gpt9_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer10 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer10_hwmod, .clk = "gpt10_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer11 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer11_hwmod, .clk = "gpt11_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_wd_timer2_hwmod, .clk = "wdt2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss */ static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3430es1_dss_core_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_core_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_dispc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dispc_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dsi1_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_rfbi */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_rfbi_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_venc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_venc_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, }, }, .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_gpio1_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio2_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio3_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'mmu' class * The memory management unit performs virtual to physical address translation * for its requestors. */ static struct omap_hwmod_class_sysconfig mmu_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mmu_hwmod_class = { .name = "mmu", .sysc = &mmu_sysc, }; /* mmu isp */ static struct omap_hwmod omap3xxx_mmu_isp_hwmod; /* l4_core -> mmu isp */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmu_isp = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmu_isp_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod omap3xxx_mmu_isp_hwmod = { .name = "mmu_isp", .class = &omap3xxx_mmu_hwmod_class, .main_clk = "cam_ick", .flags = HWMOD_NO_IDLEST, }; /* mmu iva */ static struct omap_hwmod omap3xxx_mmu_iva_hwmod; static struct omap_hwmod_rst_info omap3xxx_mmu_iva_resets[] = { { .name = "mmu", .rst_shift = 1, .st_shift = 9 }, }; /* l3_main -> iva mmu */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__mmu_iva = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_mmu_iva_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod omap3xxx_mmu_iva_hwmod = { .name = "mmu_iva", .class = &omap3xxx_mmu_hwmod_class, .clkdm_name = "iva2_clkdm", .rst_lines = omap3xxx_mmu_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap3xxx_mmu_iva_resets), .main_clk = "iva2_ck", .prcm = { .omap2 = { .module_offs = OMAP3430_IVA2_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, }, }, .flags = HWMOD_NO_IDLEST, }; /* l4_per -> gpio4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio4_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio5_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio6 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio6_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp1_hwmod, .clk = "mcbsp1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> mcbsp2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_hwmod, .clk = "mcbsp2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> mcbsp3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_hwmod, .clk = "mcbsp3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> mcbsp4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp4_hwmod, .clk = "mcbsp4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp5_hwmod, .clk = "mcbsp5_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> mcbsp2_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_sidetone_hwmod, .clk = "mcbsp2_ick", .user = OCP_USER_MPU, }; /* l4_per -> mcbsp3_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_sidetone_hwmod, .clk = "mcbsp3_ick", .user = OCP_USER_MPU, }; /* l4_core -> mailbox */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mailbox_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi1 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi1, .clk = "mcspi1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi2 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi2, .clk = "mcspi2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi3 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi3, .clk = "mcspi3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi4 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi4, .clk = "mcspi4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = { .master = &omap3xxx_usb_host_hs_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usb_host_hs_hwmod, .clk = "usbhost_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usb_tll_hs_hwmod, .clk = "usbtll_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> hdq1w interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__hdq1w = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_hdq1w_hwmod, .clk = "hdq_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE, }; /* am35xx has Davinci MDIO & EMAC */ static struct omap_hwmod_class am35xx_mdio_class = { .name = "davinci_mdio", }; static struct omap_hwmod am35xx_mdio_hwmod = { .name = "davinci_mdio", .class = &am35xx_mdio_class, .flags = HWMOD_NO_IDLEST, }; /* * XXX Should be connected to an IPSS hwmod, not the L3 directly; * but this will probably require some additional hwmod core support, * so is left as a future to-do item. */ static struct omap_hwmod_ocp_if am35xx_mdio__l3 = { .master = &am35xx_mdio_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "emac_fck", .user = OCP_USER_MPU, }; /* l4_core -> davinci mdio */ /* * XXX Should be connected to an IPSS hwmod, not the L4_CORE directly; * but this will probably require some additional hwmod core support, * so is left as a future to-do item. */ static struct omap_hwmod_ocp_if am35xx_l4_core__mdio = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_mdio_hwmod, .clk = "emac_fck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class am35xx_emac_class = { .name = "davinci_emac", }; static struct omap_hwmod am35xx_emac_hwmod = { .name = "davinci_emac", .class = &am35xx_emac_class, /* * According to Mark Greer, the MPU will not return from WFI * when the EMAC signals an interrupt. * https://lore.kernel.org/all/[email protected]/ */ .flags = (HWMOD_NO_IDLEST | HWMOD_BLOCK_WFI), }; /* l3_core -> davinci emac interface */ /* * XXX Should be connected to an IPSS hwmod, not the L3 directly; * but this will probably require some additional hwmod core support, * so is left as a future to-do item. */ static struct omap_hwmod_ocp_if am35xx_emac__l3 = { .master = &am35xx_emac_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "emac_ick", .user = OCP_USER_MPU, }; /* l4_core -> davinci emac */ /* * XXX Should be connected to an IPSS hwmod, not the L4_CORE directly; * but this will probably require some additional hwmod core support, * so is left as a future to-do item. */ static struct omap_hwmod_ocp_if am35xx_l4_core__emac = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_emac_hwmod, .clk = "emac_ick", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if omap3xxx_l3_main__gpmc = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_gpmc_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> SHAM2 (SHA1/MD5) (similar to omap24xx) */ static struct omap_hwmod_class_sysconfig omap3_sham_sysc = { .rev_offs = 0x5c, .sysc_offs = 0x60, .syss_offs = 0x64, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap3_sham_sysc_fields, }; static struct omap_hwmod_class omap3xxx_sham_class = { .name = "sham", .sysc = &omap3_sham_sysc, }; static struct omap_hwmod omap3xxx_sham_hwmod = { .name = "sham", .main_clk = "sha12_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_SHA12_SHIFT, }, }, .class = &omap3xxx_sham_class, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__sham = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_sham_hwmod, .clk = "sha12_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'ssi' class * synchronous serial interface (multichannel and full-duplex serial if) */ static struct omap_hwmod_class_sysconfig omap34xx_ssi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_ssi_hwmod_class = { .name = "ssi", .sysc = &omap34xx_ssi_sysc, }; static struct omap_hwmod omap3xxx_ssi_hwmod = { .name = "ssi", .class = &omap3xxx_ssi_hwmod_class, .clkdm_name = "core_l4_clkdm", .main_clk = "ssi_ssr_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT, }, }, }; /* L4 CORE -> SSI */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__ssi = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_ssi_hwmod, .clk = "ssi_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l3_main__l4_core, &omap3xxx_l3_main__l4_per, &omap3xxx_mpu__l3_main, &omap3xxx_l3_main__l4_debugss, &omap3xxx_l4_core__l4_wkup, &omap3xxx_l4_core__mmc3, &omap3_l4_core__uart1, &omap3_l4_core__uart2, &omap3_l4_per__uart3, &omap3_l4_core__i2c1, &omap3_l4_core__i2c2, &omap3_l4_core__i2c3, &omap3xxx_l4_wkup__l4_sec, &omap3xxx_l4_per__timer3, &omap3xxx_l4_per__timer4, &omap3xxx_l4_per__timer5, &omap3xxx_l4_per__timer6, &omap3xxx_l4_per__timer7, &omap3xxx_l4_per__timer8, &omap3xxx_l4_per__timer9, &omap3xxx_l4_core__timer10, &omap3xxx_l4_core__timer11, &omap3xxx_l4_wkup__wd_timer2, &omap3xxx_l4_wkup__gpio1, &omap3xxx_l4_per__gpio2, &omap3xxx_l4_per__gpio3, &omap3xxx_l4_per__gpio4, &omap3xxx_l4_per__gpio5, &omap3xxx_l4_per__gpio6, &omap3xxx_l4_core__mcbsp1, &omap3xxx_l4_per__mcbsp2, &omap3xxx_l4_per__mcbsp3, &omap3xxx_l4_per__mcbsp4, &omap3xxx_l4_core__mcbsp5, &omap3xxx_l4_per__mcbsp2_sidetone, &omap3xxx_l4_per__mcbsp3_sidetone, &omap34xx_l4_core__mcspi1, &omap34xx_l4_core__mcspi2, &omap34xx_l4_core__mcspi3, &omap34xx_l4_core__mcspi4, &omap3xxx_l3_main__gpmc, NULL, }; /* crypto hwmod links */ static struct omap_hwmod_ocp_if *omap34xx_sham_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l4_core__sham, NULL, }; static struct omap_hwmod_ocp_if *omap36xx_sham_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l4_core__sham, NULL }; /* * Apparently the SHA/MD5 and AES accelerator IP blocks are * only present on some AM35xx chips, and no one knows which * ones. * See https://lore.kernel.org/all/[email protected]/ * So if you need these IP blocks on an AM35xx, try uncommenting * the following lines. */ static struct omap_hwmod_ocp_if *am35xx_sham_hwmod_ocp_ifs[] __initdata = { /* &omap3xxx_l4_core__sham, */ NULL }; /* 3430ES1-only hwmod links */ static struct omap_hwmod_ocp_if *omap3430es1_hwmod_ocp_ifs[] __initdata = { &omap3430es1_dss__l3, &omap3430es1_l4_core__dss, NULL, }; /* 3430ES2+-only hwmod links */ static struct omap_hwmod_ocp_if *omap3430es2plus_hwmod_ocp_ifs[] __initdata = { &omap3xxx_dss__l3, &omap3xxx_l4_core__dss, &omap3xxx_usb_host_hs__l3_main_2, &omap3xxx_l4_core__usb_host_hs, &omap3xxx_l4_core__usb_tll_hs, NULL, }; /* <= 3430ES3-only hwmod links */ static struct omap_hwmod_ocp_if *omap3430_pre_es3_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l4_core__pre_es3_mmc1, &omap3xxx_l4_core__pre_es3_mmc2, NULL, }; /* 3430ES3+-only hwmod links */ static struct omap_hwmod_ocp_if *omap3430_es3plus_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l4_core__es3plus_mmc1, &omap3xxx_l4_core__es3plus_mmc2, NULL, }; /* 34xx-only hwmod links (all ES revisions) */ static struct omap_hwmod_ocp_if *omap34xx_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l3__iva, &omap34xx_l4_core__sr1, &omap34xx_l4_core__sr2, &omap3xxx_l4_core__mailbox, &omap3xxx_l4_core__hdq1w, &omap3xxx_sad2d__l3, &omap3xxx_l4_core__mmu_isp, &omap3xxx_l3_main__mmu_iva, &omap3xxx_l4_core__ssi, NULL, }; /* 36xx-only hwmod links (all ES revisions) */ static struct omap_hwmod_ocp_if *omap36xx_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l3__iva, &omap36xx_l4_per__uart4, &omap3xxx_dss__l3, &omap3xxx_l4_core__dss, &omap36xx_l4_core__sr1, &omap36xx_l4_core__sr2, &omap3xxx_l4_core__mailbox, &omap3xxx_usb_host_hs__l3_main_2, &omap3xxx_l4_core__usb_host_hs, &omap3xxx_l4_core__usb_tll_hs, &omap3xxx_l4_core__es3plus_mmc1, &omap3xxx_l4_core__es3plus_mmc2, &omap3xxx_l4_core__hdq1w, &omap3xxx_sad2d__l3, &omap3xxx_l4_core__mmu_isp, &omap3xxx_l3_main__mmu_iva, &omap3xxx_l4_core__ssi, NULL, }; static struct omap_hwmod_ocp_if *am35xx_hwmod_ocp_ifs[] __initdata = { &omap3xxx_dss__l3, &omap3xxx_l4_core__dss, &am35xx_l4_core__uart4, &omap3xxx_usb_host_hs__l3_main_2, &omap3xxx_l4_core__usb_host_hs, &omap3xxx_l4_core__usb_tll_hs, &omap3xxx_l4_core__es3plus_mmc1, &omap3xxx_l4_core__es3plus_mmc2, &omap3xxx_l4_core__hdq1w, &am35xx_mdio__l3, &am35xx_l4_core__mdio, &am35xx_emac__l3, &am35xx_l4_core__emac, NULL, }; static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l4_core__dss_dispc, &omap3xxx_l4_core__dss_dsi1, &omap3xxx_l4_core__dss_rfbi, &omap3xxx_l4_core__dss_venc, NULL, }; /** * omap3xxx_hwmod_is_hs_ip_block_usable - is a security IP block accessible? * @bus: struct device_node * for the top-level OMAP DT data * @dev_name: device name used in the DT file * * Determine whether a "secure" IP block @dev_name is usable by Linux. * There doesn't appear to be a 100% reliable way to determine this, * so we rely on heuristics. If @bus is null, meaning there's no DT * data, then we only assume the IP block is accessible if the OMAP is * fused as a 'general-purpose' SoC. If however DT data is present, * test to see if the IP block is described in the DT data and set to * 'status = "okay"'. If so then we assume the ODM has configured the * OMAP firewalls to allow access to the IP block. * * Return: 0 if device named @dev_name is not likely to be accessible, * or 1 if it is likely to be accessible. */ static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, const char *dev_name) { struct device_node *node; bool available; if (!bus) return omap_type() == OMAP2_DEVICE_TYPE_GP; node = of_get_child_by_name(bus, dev_name); available = of_device_is_available(node); of_node_put(node); return available; } int __init omap3xxx_hwmod_init(void) { int r; struct omap_hwmod_ocp_if **h = NULL, **h_sham = NULL; struct device_node *bus; unsigned int rev; omap_hwmod_init(); /* Register hwmod links common to all OMAP3 */ r = omap_hwmod_register_links(omap3xxx_hwmod_ocp_ifs); if (r < 0) return r; rev = omap_rev(); /* * Register hwmod links common to individual OMAP3 families, all * silicon revisions (e.g., 34xx, or AM3505/3517, or 36xx) * All possible revisions should be included in this conditional. */ if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap34xx_hwmod_ocp_ifs; h_sham = omap34xx_sham_hwmod_ocp_ifs; } else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { h = am35xx_hwmod_ocp_ifs; h_sham = am35xx_sham_hwmod_ocp_ifs; } else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2) { h = omap36xx_hwmod_ocp_ifs; h_sham = omap36xx_sham_hwmod_ocp_ifs; } else { WARN(1, "OMAP3 hwmod family init: unknown chip type\n"); return -EINVAL; } r = omap_hwmod_register_links(h); if (r < 0) return r; /* * Register crypto hwmod links only if they are not disabled in DT. * If DT information is missing, enable them only for GP devices. */ bus = of_find_node_by_name(NULL, "ocp"); if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { r = omap_hwmod_register_links(h_sham); if (r < 0) goto put_node; } of_node_put(bus); /* * Register hwmod links specific to certain ES levels of a * particular family of silicon (e.g., 34xx ES1.0) */ h = NULL; if (rev == OMAP3430_REV_ES1_0) { h = omap3430es1_hwmod_ocp_ifs; } else if (rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap3430es2plus_hwmod_ocp_ifs; } if (h) { r = omap_hwmod_register_links(h); if (r < 0) return r; } h = NULL; if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1) { h = omap3430_pre_es3_hwmod_ocp_ifs; } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap3430_es3plus_hwmod_ocp_ifs; } if (h) r = omap_hwmod_register_links(h); if (r < 0) return r; /* * DSS code presumes that dss_core hwmod is handled first, * _before_ any other DSS related hwmods so register common * DSS hwmod links last to ensure that dss_core is already * registered. Otherwise some change things may happen, for * ex. if dispc is handled before dss_core and DSS is enabled * in bootloader DISPC will be reset with outputs enabled * which sometimes leads to unrecoverable L3 error. XXX The * long-term fix to this is to ensure hwmods are set up in * dependency order in the hwmod core code. */ r = omap_hwmod_register_links(omap3xxx_dss_hwmod_ocp_ifs); return r; put_node: of_node_put(bus); return r; }
linux-master
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 PRM instance functions * * Copyright (C) 2009 Nokia Corporation * Copyright (C) 2011 Texas Instruments, Inc. * Paul Walmsley */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "iomap.h" #include "common.h" #include "prcm-common.h" #include "prm44xx.h" #include "prm54xx.h" #include "prm7xx.h" #include "prminst44xx.h" #include "prm-regbits-44xx.h" #include "prcm44xx.h" #include "prcm43xx.h" #include "prcm_mpu44xx.h" #include "soc.h" static struct omap_domain_base _prm_bases[OMAP4_MAX_PRCM_PARTITIONS]; static s32 prm_dev_inst = PRM_INSTANCE_UNKNOWN; /** * omap_prm_base_init - Populates the prm partitions * * Populates the base addresses of the _prm_bases * array used for read/write of prm module registers. */ void omap_prm_base_init(void) { memcpy(&_prm_bases[OMAP4430_PRM_PARTITION], &prm_base, sizeof(prm_base)); memcpy(&_prm_bases[OMAP4430_PRCM_MPU_PARTITION], &prcm_mpu_base, sizeof(prcm_mpu_base)); } s32 omap4_prmst_get_prm_dev_inst(void) { return prm_dev_inst; } void omap4_prminst_set_prm_dev_inst(s32 dev_inst) { prm_dev_inst = dev_inst; } /* Read a register in a PRM instance */ u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx) { BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS || part == OMAP4430_INVALID_PRCM_PARTITION || !_prm_bases[part].va); return readl_relaxed(_prm_bases[part].va + inst + idx); } /* Write into a register in a PRM instance */ void omap4_prminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx) { BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS || part == OMAP4430_INVALID_PRCM_PARTITION || !_prm_bases[part].va); writel_relaxed(val, _prm_bases[part].va + inst + idx); } /* Read-modify-write a register in PRM. Caller must lock */ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst, u16 idx) { u32 v; v = omap4_prminst_read_inst_reg(part, inst, idx); v &= ~mask; v |= bits; omap4_prminst_write_inst_reg(v, part, inst, idx); return v; } /** * omap4_prminst_is_hardreset_asserted - read the HW reset line state of * submodules contained in the hwmod module * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to check * * Returns 1 if the (sub)module hardreset line is currently asserted, * 0 if the (sub)module hardreset line is not currently asserted, or * -EINVAL upon parameter error. */ int omap4_prminst_is_hardreset_asserted(u8 shift, u8 part, s16 inst, u16 rstctrl_offs) { u32 v; v = omap4_prminst_read_inst_reg(part, inst, rstctrl_offs); v &= 1 << shift; v >>= shift; return v; } /** * omap4_prminst_assert_hardreset - assert the HW reset line of a submodule * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to assert * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst, u16 rstctrl_offs) { u32 mask = 1 << shift; omap4_prminst_rmw_inst_reg_bits(mask, mask, part, inst, rstctrl_offs); return 0; } /** * omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and * wait * @shift: register bit shift corresponding to the reset line to deassert * @st_shift: status bit offset corresponding to the reset line * @part: PRM partition * @inst: PRM instance offset * @rstctrl_offs: reset register offset * @rstst_offs: reset status register offset * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst, u16 rstctrl_offs, u16 rstst_offs) { int c; u32 mask = 1 << shift; u32 st_mask = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (omap4_prminst_is_hardreset_asserted(shift, part, inst, rstctrl_offs) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst, rstst_offs); /* de-assert the reset control line */ omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs); /* wait the status to be set */ omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part, inst, rstst_offs), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; } void omap4_prminst_global_warm_sw_reset(void) { u32 v; s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return; v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, inst, OMAP4_PRM_RSTCTRL_OFFSET); v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, inst, OMAP4_PRM_RSTCTRL_OFFSET); /* OCP barrier */ v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, inst, OMAP4_PRM_RSTCTRL_OFFSET); }
linux-master
arch/arm/mach-omap2/prminst44xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2xxx PRM module functions * * Copyright (C) 2010-2012 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * Rajendra Nayak <[email protected]> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include "powerdomain.h" #include "clockdomain.h" #include "prm2xxx.h" #include "cm2xxx_3xxx.h" #include "prm-regbits-24xx.h" /* * OMAP24xx PM_PWSTCTRL_*.POWERSTATE and PM_PWSTST_*.LASTSTATEENTERED bits - * these are reversed from the bits used on OMAP3+ */ #define OMAP24XX_PWRDM_POWER_ON 0x0 #define OMAP24XX_PWRDM_POWER_RET 0x1 #define OMAP24XX_PWRDM_POWER_OFF 0x3 /* * omap2xxx_prm_reset_src_map - map from bits in the PRM_RSTST_WKUP * hardware register (which are specific to the OMAP2xxx SoCs) to * reset source ID bit shifts (which is an OMAP SoC-independent * enumeration) */ static struct prm_reset_src_map omap2xxx_prm_reset_src_map[] = { { OMAP_GLOBALCOLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, { OMAP_GLOBALWARM_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, { OMAP24XX_SECU_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, { OMAP24XX_MPU_WD_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP24XX_SECU_WD_RST_SHIFT, OMAP_SECU_WD_RST_SRC_ID_SHIFT }, { OMAP24XX_EXTWMPU_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, { -1, -1 }, }; /** * omap2xxx_prm_read_reset_sources - return the last SoC reset source * * Return a u32 representing the last reset sources of the SoC. The * returned reset source bits are standardized across OMAP SoCs. */ static u32 omap2xxx_prm_read_reset_sources(void) { struct prm_reset_src_map *p; u32 r = 0; u32 v; v = omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST); p = omap2xxx_prm_reset_src_map; while (p->reg_shift >= 0 && p->std_shift >= 0) { if (v & (1 << p->reg_shift)) r |= 1 << p->std_shift; p++; } return r; } /** * omap2xxx_pwrst_to_common_pwrst - convert OMAP2xxx pwrst to common pwrst * @omap2xxx_pwrst: OMAP2xxx hardware power state to convert * * Return the common power state bits corresponding to the OMAP2xxx * hardware power state bits @omap2xxx_pwrst, or -EINVAL upon error. */ static int omap2xxx_pwrst_to_common_pwrst(u8 omap2xxx_pwrst) { u8 pwrst; switch (omap2xxx_pwrst) { case OMAP24XX_PWRDM_POWER_OFF: pwrst = PWRDM_POWER_OFF; break; case OMAP24XX_PWRDM_POWER_RET: pwrst = PWRDM_POWER_RET; break; case OMAP24XX_PWRDM_POWER_ON: pwrst = PWRDM_POWER_ON; break; default: return -EINVAL; } return pwrst; } /** * omap2xxx_prm_dpll_reset - use DPLL reset to reboot the OMAP SoC * * Set the DPLL reset bit, which should reboot the SoC. This is the * recommended way to restart the SoC. No return value. */ static void omap2xxx_prm_dpll_reset(void) { omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, WKUP_MOD, OMAP2_RM_RSTCTRL); /* OCP barrier */ omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTCTRL); } /** * omap2xxx_prm_clear_mod_irqs - clear wakeup status bits for a module * @module: PRM module to clear wakeups from * @regs: register offset to clear * @wkst_mask: wakeup status mask to clear * * Clears wakeup status bits for a given module, so that the device can * re-enter idle. */ static int omap2xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask) { u32 wkst; wkst = omap2_prm_read_mod_reg(module, regs); wkst &= wkst_mask; omap2_prm_write_mod_reg(wkst, module, regs); return 0; } int omap2xxx_clkdm_sleep(struct clockdomain *clkdm) { omap2_prm_set_mod_reg_bits(OMAP24XX_FORCESTATE_MASK, clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } int omap2xxx_clkdm_wakeup(struct clockdomain *clkdm) { omap2_prm_clear_mod_reg_bits(OMAP24XX_FORCESTATE_MASK, clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } static int omap2xxx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { u8 omap24xx_pwrst; switch (pwrst) { case PWRDM_POWER_OFF: omap24xx_pwrst = OMAP24XX_PWRDM_POWER_OFF; break; case PWRDM_POWER_RET: omap24xx_pwrst = OMAP24XX_PWRDM_POWER_RET; break; case PWRDM_POWER_ON: omap24xx_pwrst = OMAP24XX_PWRDM_POWER_ON; break; default: return -EINVAL; } omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK, (omap24xx_pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } static int omap2xxx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u8 omap2xxx_pwrst; omap2xxx_pwrst = omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL, OMAP_POWERSTATE_MASK); return omap2xxx_pwrst_to_common_pwrst(omap2xxx_pwrst); } static int omap2xxx_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u8 omap2xxx_pwrst; omap2xxx_pwrst = omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTST, OMAP_POWERSTATEST_MASK); return omap2xxx_pwrst_to_common_pwrst(omap2xxx_pwrst); } struct pwrdm_ops omap2_pwrdm_operations = { .pwrdm_set_next_pwrst = omap2xxx_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap2xxx_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap2xxx_pwrdm_read_pwrst, .pwrdm_set_logic_retst = omap2_pwrdm_set_logic_retst, .pwrdm_set_mem_onst = omap2_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap2_pwrdm_set_mem_retst, .pwrdm_read_mem_pwrst = omap2_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap2_pwrdm_read_mem_retst, .pwrdm_wait_transition = omap2_pwrdm_wait_transition, }; /* * */ static struct prm_ll_data omap2xxx_prm_ll_data = { .read_reset_sources = &omap2xxx_prm_read_reset_sources, .assert_hardreset = &omap2_prm_assert_hardreset, .deassert_hardreset = &omap2_prm_deassert_hardreset, .is_hardreset_asserted = &omap2_prm_is_hardreset_asserted, .reset_system = &omap2xxx_prm_dpll_reset, .clear_mod_irqs = &omap2xxx_prm_clear_mod_irqs, }; int __init omap2xxx_prm_init(const struct omap_prcm_init_data *data) { return prm_register(&omap2xxx_prm_ll_data); } static void __exit omap2xxx_prm_exit(void) { prm_unregister(&omap2xxx_prm_ll_data); } __exitcall(omap2xxx_prm_exit);
linux-master
arch/arm/mach-omap2/prm2xxx.c
/* * linux/arch/arm/mach-omap2/timer.c * * OMAP2 GP timer support. * * Copyright (C) 2009 Nokia Corporation * * Update to use new clocksource/clockevent layers * Author: Kevin Hilman, MontaVista Software, Inc. <[email protected]> * Copyright (C) 2007 MontaVista Software, Inc. * * Original driver: * Copyright (C) 2005 Nokia Corporation * Author: Paul Mundt <[email protected]> * Juha Yrjölä <[email protected]> * OMAP Dual-mode timer framework support by Timo Teras * * Some parts based off of TI's 24xx code: * * Copyright (C) 2004-2009 Texas Instruments, Inc. * * Roughly modelled after the OMAP1 MPU timer code. * Added OMAP4 support - Santosh Shilimkar <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/clk.h> #include <linux/clocksource.h> #include "soc.h" #include "common.h" #include "control.h" #include "omap-secure.h" #define REALTIME_COUNTER_BASE 0x48243200 #define INCREMENTER_NUMERATOR_OFFSET 0x10 #define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14 #define NUMERATOR_DENUMERATOR_MASK 0xfffff000 static unsigned long arch_timer_freq; void set_cntfreq(void) { omap_smc1(OMAP5_DRA7_MON_SET_CNTFRQ_INDEX, arch_timer_freq); } /* * The realtime counter also called master counter, is a free-running * counter, which is related to real time. It produces the count used * by the CPU local timer peripherals in the MPU cluster. The timer counts * at a rate of 6.144 MHz. Because the device operates on different clocks * in different power modes, the master counter shifts operation between * clocks, adjusting the increment per clock in hardware accordingly to * maintain a constant count rate. */ static void __init realtime_counter_init(void) { void __iomem *base; static struct clk *sys_clk; unsigned long rate; unsigned int reg; unsigned long long num, den; base = ioremap(REALTIME_COUNTER_BASE, SZ_32); if (!base) { pr_err("%s: ioremap failed\n", __func__); return; } sys_clk = clk_get(NULL, "sys_clkin"); if (IS_ERR(sys_clk)) { pr_err("%s: failed to get system clock handle\n", __func__); iounmap(base); return; } rate = clk_get_rate(sys_clk); clk_put(sys_clk); if (soc_is_dra7xx()) { /* * Errata i856 says the 32.768KHz crystal does not start at * power on, so the CPU falls back to an emulated 32KHz clock * based on sysclk / 610 instead. This causes the master counter * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2 * (OR sysclk * 75 / 244) * * This affects at least the DRA7/AM572x 1.0, 1.1 revisions. * Of course any board built without a populated 32.768KHz * crystal would also need this fix even if the CPU is fixed * later. * * Either case can be detected by using the two speedselect bits * If they are not 0, then the 32.768KHz clock driving the * coarse counter that corrects the fine counter every time it * ticks is actually rate/610 rather than 32.768KHz and we * should compensate to avoid the 570ppm (at 20MHz, much worse * at other rates) too fast system time. */ reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP); if (reg & DRA7_SPEEDSELECT_MASK) { num = 75; den = 244; goto sysclk1_based; } } /* Numerator/denumerator values refer TRM Realtime Counter section */ switch (rate) { case 12000000: num = 64; den = 125; break; case 13000000: num = 768; den = 1625; break; case 19200000: num = 8; den = 25; break; case 20000000: num = 192; den = 625; break; case 26000000: num = 384; den = 1625; break; case 27000000: num = 256; den = 1125; break; case 38400000: default: /* Program it for 38.4 MHz */ num = 4; den = 25; break; } sysclk1_based: /* Program numerator and denumerator registers */ reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) & NUMERATOR_DENUMERATOR_MASK; reg |= num; writel_relaxed(reg, base + INCREMENTER_NUMERATOR_OFFSET); reg = readl_relaxed(base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET) & NUMERATOR_DENUMERATOR_MASK; reg |= den; writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET); arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den); set_cntfreq(); iounmap(base); } void __init omap5_realtime_timer_init(void) { omap_clk_init(); realtime_counter_init(); timer_probe(); }
linux-master
arch/arm/mach-omap2/timer.c
// SPDX-License-Identifier: GPL-2.0-only /* * DRA7xx Power domains framework * * Copyright (C) 2009-2013 Texas Instruments, Inc. * Copyright (C) 2009-2011 Nokia Corporation * * Generated by code originally written by: * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "prcm-common.h" #include "prcm44xx.h" #include "prm7xx.h" #include "prcm_mpu7xx.h" #include "soc.h" /* iva_7xx_pwrdm: IVA-HD power domain */ static struct powerdomain iva_7xx_pwrdm = { .name = "iva_pwrdm", .prcm_offs = DRA7XX_PRM_IVA_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 4, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* hwa_mem */ [1] = PWRSTS_ON, /* sl2_mem */ [2] = PWRSTS_ON, /* tcm1_mem */ [3] = PWRSTS_ON, /* tcm2_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* rtc_7xx_pwrdm: */ static struct powerdomain rtc_7xx_pwrdm = { .name = "rtc_pwrdm", .prcm_offs = DRA7XX_PRM_RTC_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; /* custefuse_7xx_pwrdm: Customer efuse controller power domain */ static struct powerdomain custefuse_7xx_pwrdm = { .name = "custefuse_pwrdm", .prcm_offs = DRA7XX_PRM_CUSTEFUSE_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* custefuse_aon_7xx_pwrdm: Customer efuse controller power domain */ static struct powerdomain custefuse_aon_7xx_pwrdm = { .name = "custefuse_pwrdm", .prcm_offs = DRA7XX_PRM_CUSTEFUSE_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; /* ipu_7xx_pwrdm: Audio back end power domain */ static struct powerdomain ipu_7xx_pwrdm = { .name = "ipu_pwrdm", .prcm_offs = DRA7XX_PRM_IPU_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 2, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* aessmem */ [1] = PWRSTS_ON, /* periphmem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* dss_7xx_pwrdm: Display subsystem power domain */ static struct powerdomain dss_7xx_pwrdm = { .name = "dss_pwrdm", .prcm_offs = DRA7XX_PRM_DSS_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* dss_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* l4per_7xx_pwrdm: Target peripherals power domain */ static struct powerdomain l4per_7xx_pwrdm = { .name = "l4per_pwrdm", .prcm_offs = DRA7XX_PRM_L4PER_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 2, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* nonretained_bank */ [1] = PWRSTS_ON, /* retained_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* gpu_7xx_pwrdm: 3D accelerator power domain */ static struct powerdomain gpu_7xx_pwrdm = { .name = "gpu_pwrdm", .prcm_offs = DRA7XX_PRM_GPU_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* gpu_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* wkupaon_7xx_pwrdm: Wake-up power domain */ static struct powerdomain wkupaon_7xx_pwrdm = { .name = "wkupaon_pwrdm", .prcm_offs = DRA7XX_PRM_WKUPAON_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* wkup_bank */ }, }; /* core_7xx_pwrdm: CORE power domain */ static struct powerdomain core_7xx_pwrdm = { .name = "core_pwrdm", .prcm_offs = DRA7XX_PRM_CORE_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 5, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* core_nret_bank */ [1] = PWRSTS_ON, /* core_ocmram */ [2] = PWRSTS_ON, /* core_other_bank */ [3] = PWRSTS_ON, /* ipu_l2ram */ [4] = PWRSTS_ON, /* ipu_unicache */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* coreaon_7xx_pwrdm: Always ON logic that sits in VDD_CORE voltage domain */ static struct powerdomain coreaon_7xx_pwrdm = { .name = "coreaon_pwrdm", .prcm_offs = DRA7XX_PRM_COREAON_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; /* cpu0_7xx_pwrdm: MPU0 processor and Neon coprocessor power domain */ static struct powerdomain cpu0_7xx_pwrdm = { .name = "cpu0_pwrdm", .prcm_offs = DRA7XX_MPU_PRCM_PRM_C0_INST, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu0_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu0_l1 */ }, }; /* cpu1_7xx_pwrdm: MPU1 processor and Neon coprocessor power domain */ static struct powerdomain cpu1_7xx_pwrdm = { .name = "cpu1_pwrdm", .prcm_offs = DRA7XX_MPU_PRCM_PRM_C1_INST, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu1_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu1_l1 */ }, }; /* vpe_7xx_pwrdm: */ static struct powerdomain vpe_7xx_pwrdm = { .name = "vpe_pwrdm", .prcm_offs = DRA7XX_PRM_VPE_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* vpe_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* mpu_7xx_pwrdm: Modena processor and the Neon coprocessor power domain */ static struct powerdomain mpu_7xx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = DRA7XX_PRM_MPU_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* mpu_l2 */ [1] = PWRSTS_RET, /* mpu_ram */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* mpu_l2 */ [1] = PWRSTS_ON, /* mpu_ram */ }, }; /* l3init_7xx_pwrdm: L3 initators pheripherals power domain */ static struct powerdomain l3init_7xx_pwrdm = { .name = "l3init_pwrdm", .prcm_offs = DRA7XX_PRM_L3INIT_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 3, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* gmac_bank */ [1] = PWRSTS_ON, /* l3init_bank1 */ [2] = PWRSTS_ON, /* l3init_bank2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* eve3_7xx_pwrdm: */ static struct powerdomain eve3_7xx_pwrdm = { .name = "eve3_pwrdm", .prcm_offs = DRA7XX_PRM_EVE3_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* eve3_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* emu_7xx_pwrdm: Emulation power domain */ static struct powerdomain emu_7xx_pwrdm = { .name = "emu_pwrdm", .prcm_offs = DRA7XX_PRM_EMU_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* emu_bank */ }, }; /* dsp2_7xx_pwrdm: */ static struct powerdomain dsp2_7xx_pwrdm = { .name = "dsp2_pwrdm", .prcm_offs = DRA7XX_PRM_DSP2_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 3, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* dsp2_edma */ [1] = PWRSTS_ON, /* dsp2_l1 */ [2] = PWRSTS_ON, /* dsp2_l2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* dsp1_7xx_pwrdm: Tesla processor power domain */ static struct powerdomain dsp1_7xx_pwrdm = { .name = "dsp1_pwrdm", .prcm_offs = DRA7XX_PRM_DSP1_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 3, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* dsp1_edma */ [1] = PWRSTS_ON, /* dsp1_l1 */ [2] = PWRSTS_ON, /* dsp1_l2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* cam_7xx_pwrdm: Camera subsystem power domain */ static struct powerdomain cam_7xx_pwrdm = { .name = "cam_pwrdm", .prcm_offs = DRA7XX_PRM_CAM_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* vip_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* eve4_7xx_pwrdm: */ static struct powerdomain eve4_7xx_pwrdm = { .name = "eve4_pwrdm", .prcm_offs = DRA7XX_PRM_EVE4_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* eve4_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* eve2_7xx_pwrdm: */ static struct powerdomain eve2_7xx_pwrdm = { .name = "eve2_pwrdm", .prcm_offs = DRA7XX_PRM_EVE2_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* eve2_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* eve1_7xx_pwrdm: */ static struct powerdomain eve1_7xx_pwrdm = { .name = "eve1_pwrdm", .prcm_offs = DRA7XX_PRM_EVE1_INST, .prcm_partition = DRA7XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* eve1_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* * The following power domains are not under SW control * * mpuaon * mmaon */ /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_dra7xx[] __initdata = { &iva_7xx_pwrdm, &rtc_7xx_pwrdm, &ipu_7xx_pwrdm, &dss_7xx_pwrdm, &l4per_7xx_pwrdm, &gpu_7xx_pwrdm, &wkupaon_7xx_pwrdm, &core_7xx_pwrdm, &coreaon_7xx_pwrdm, &cpu0_7xx_pwrdm, &cpu1_7xx_pwrdm, &vpe_7xx_pwrdm, &mpu_7xx_pwrdm, &l3init_7xx_pwrdm, &eve3_7xx_pwrdm, &emu_7xx_pwrdm, &dsp2_7xx_pwrdm, &dsp1_7xx_pwrdm, &cam_7xx_pwrdm, &eve4_7xx_pwrdm, &eve2_7xx_pwrdm, &eve1_7xx_pwrdm, NULL }; static struct powerdomain *powerdomains_dra76x[] __initdata = { &custefuse_aon_7xx_pwrdm, NULL }; static struct powerdomain *powerdomains_dra74x[] __initdata = { &custefuse_7xx_pwrdm, NULL }; static struct powerdomain *powerdomains_dra72x[] __initdata = { &custefuse_aon_7xx_pwrdm, NULL }; void __init dra7xx_powerdomains_init(void) { pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_dra7xx); if (soc_is_dra76x()) pwrdm_register_pwrdms(powerdomains_dra76x); else if (soc_is_dra74x()) pwrdm_register_pwrdms(powerdomains_dra74x); else if (soc_is_dra72x()) pwrdm_register_pwrdms(powerdomains_dra72x); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains7xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM33XX CM functions * * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ * Vaibhav Hiremath <[email protected]> * * Reference taken from OMAP4 cminst44xx.c */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm33xx.h" #include "cm-regbits-34xx.h" #include "cm-regbits-33xx.h" #include "prm33xx.h" /* * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield: * * 0x0 func: Module is fully functional, including OCP * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep * abortion * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if * using separate functional clock * 0x3 disabled: Module is disabled and cannot be accessed * */ #define CLKCTRL_IDLEST_FUNCTIONAL 0x0 #define CLKCTRL_IDLEST_INTRANSITION 0x1 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2 #define CLKCTRL_IDLEST_DISABLED 0x3 /* Private functions */ /* Read a register in a CM instance */ static inline u32 am33xx_cm_read_reg(u16 inst, u16 idx) { return readl_relaxed(cm_base.va + inst + idx); } /* Write into a register in a CM */ static inline void am33xx_cm_write_reg(u32 val, u16 inst, u16 idx) { writel_relaxed(val, cm_base.va + inst + idx); } /* Read-modify-write a register in CM */ static inline u32 am33xx_cm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx) { u32 v; v = am33xx_cm_read_reg(inst, idx); v &= ~mask; v |= bits; am33xx_cm_write_reg(v, inst, idx); return v; } static inline u32 am33xx_cm_read_reg_bits(u16 inst, s16 idx, u32 mask) { u32 v; v = am33xx_cm_read_reg(inst, idx); v &= mask; v >>= __ffs(mask); return v; } /** * _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Return the IDLEST bitfield of a CM_*_CLKCTRL register, shifted down to * bit 0. */ static u32 _clkctrl_idlest(u16 inst, u16 clkctrl_offs) { u32 v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= AM33XX_IDLEST_MASK; v >>= AM33XX_IDLEST_SHIFT; return v; } /** * _is_module_ready - can module registers be accessed without causing an abort? * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Returns true if the module's CM_*_CLKCTRL.IDLEST bitfield is either * *FUNCTIONAL or *INTERFACE_IDLE; false otherwise. */ static bool _is_module_ready(u16 inst, u16 clkctrl_offs) { u32 v; v = _clkctrl_idlest(inst, clkctrl_offs); return (v == CLKCTRL_IDLEST_FUNCTIONAL || v == CLKCTRL_IDLEST_INTERFACE_IDLE) ? true : false; } /** * _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield * @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted) * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * @c must be the unshifted value for CLKTRCTRL - i.e., this function * will handle the shift itself. */ static void _clktrctrl_write(u8 c, u16 inst, u16 cdoffs) { u32 v; v = am33xx_cm_read_reg(inst, cdoffs); v &= ~AM33XX_CLKTRCTRL_MASK; v |= c << AM33XX_CLKTRCTRL_SHIFT; am33xx_cm_write_reg(v, inst, cdoffs); } /* Public functions */ /** * am33xx_cm_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode? * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Returns true if the clockdomain referred to by (@inst, @cdoffs) * is in hardware-supervised idle mode, or 0 otherwise. */ static bool am33xx_cm_is_clkdm_in_hwsup(u16 inst, u16 cdoffs) { u32 v; v = am33xx_cm_read_reg(inst, cdoffs); v &= AM33XX_CLKTRCTRL_MASK; v >>= AM33XX_CLKTRCTRL_SHIFT; return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false; } /** * am33xx_cm_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into * hardware-supervised idle mode. No return value. */ static void am33xx_cm_clkdm_enable_hwsup(u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst, cdoffs); } /** * am33xx_cm_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into * software-supervised idle mode, i.e., controlled manually by the * Linux OMAP clockdomain code. No return value. */ static void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst, cdoffs); } /** * am33xx_cm_clkdm_force_sleep - try to put a clockdomain into idle * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into idle * No return value. */ static void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst, cdoffs); } /** * am33xx_cm_clkdm_force_wakeup - try to take a clockdomain out of idle * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Take a clockdomain referred to by (@inst, @cdoffs) out of idle, * waking it up. No return value. */ static void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst, cdoffs); } /* * */ /** * am33xx_cm_wait_module_ready - wait for a module to be in 'func' state * @part: PRCM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: bit shift for the register, ignored for AM33xx * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout(_is_module_ready(inst, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * am33xx_cm_wait_module_idle - wait for a module to be in 'disabled' * state * @part: CM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: bit shift for the register, ignored for AM33xx * * Wait for the module IDLEST to be disabled. Some PRCM transition, * like reset assertion or parent clock de-activation must wait the * module to be fully disabled. */ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) == CLKCTRL_IDLEST_DISABLED), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * am33xx_cm_module_enable - Enable the modulemode inside CLKCTRL * @mode: Module mode (SW or HW) * @part: CM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ static void am33xx_cm_module_enable(u8 mode, u8 part, u16 inst, u16 clkctrl_offs) { u32 v; v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= ~AM33XX_MODULEMODE_MASK; v |= mode << AM33XX_MODULEMODE_SHIFT; am33xx_cm_write_reg(v, inst, clkctrl_offs); } /** * am33xx_cm_module_disable - Disable the module inside CLKCTRL * @part: CM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ static void am33xx_cm_module_disable(u8 part, u16 inst, u16 clkctrl_offs) { u32 v; v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= ~AM33XX_MODULEMODE_MASK; am33xx_cm_write_reg(v, inst, clkctrl_offs); } /* * Clockdomain low-level functions */ static int am33xx_clkdm_sleep(struct clockdomain *clkdm) { am33xx_cm_clkdm_force_sleep(clkdm->cm_inst, clkdm->clkdm_offs); return 0; } static int am33xx_clkdm_wakeup(struct clockdomain *clkdm) { am33xx_cm_clkdm_force_wakeup(clkdm->cm_inst, clkdm->clkdm_offs); return 0; } static void am33xx_clkdm_allow_idle(struct clockdomain *clkdm) { am33xx_cm_clkdm_enable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); } static void am33xx_clkdm_deny_idle(struct clockdomain *clkdm) { am33xx_cm_clkdm_disable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); } static int am33xx_clkdm_clk_enable(struct clockdomain *clkdm) { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) return am33xx_clkdm_wakeup(clkdm); return 0; } static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) am33xx_clkdm_sleep(clkdm); return 0; } static u32 am33xx_cm_xlate_clkctrl(u8 part, u16 inst, u16 offset) { return cm_base.pa + inst + offset; } /** * am33xx_clkdm_save_context - Save the clockdomain transition context * @clkdm: The clockdomain pointer whose context needs to be saved * * Save the clockdomain transition context. */ static int am33xx_clkdm_save_context(struct clockdomain *clkdm) { clkdm->context = am33xx_cm_read_reg_bits(clkdm->cm_inst, clkdm->clkdm_offs, AM33XX_CLKTRCTRL_MASK); return 0; } /** * am33xx_restore_save_context - Restore the clockdomain transition context * @clkdm: The clockdomain pointer whose context needs to be restored * * Restore the clockdomain transition context. */ static int am33xx_clkdm_restore_context(struct clockdomain *clkdm) { switch (clkdm->context) { case OMAP34XX_CLKSTCTRL_DISABLE_AUTO: am33xx_clkdm_deny_idle(clkdm); break; case OMAP34XX_CLKSTCTRL_FORCE_SLEEP: am33xx_clkdm_sleep(clkdm); break; case OMAP34XX_CLKSTCTRL_FORCE_WAKEUP: am33xx_clkdm_wakeup(clkdm); break; case OMAP34XX_CLKSTCTRL_ENABLE_AUTO: am33xx_clkdm_allow_idle(clkdm); break; } return 0; } struct clkdm_ops am33xx_clkdm_operations = { .clkdm_sleep = am33xx_clkdm_sleep, .clkdm_wakeup = am33xx_clkdm_wakeup, .clkdm_allow_idle = am33xx_clkdm_allow_idle, .clkdm_deny_idle = am33xx_clkdm_deny_idle, .clkdm_clk_enable = am33xx_clkdm_clk_enable, .clkdm_clk_disable = am33xx_clkdm_clk_disable, .clkdm_save_context = am33xx_clkdm_save_context, .clkdm_restore_context = am33xx_clkdm_restore_context, }; static const struct cm_ll_data am33xx_cm_ll_data = { .wait_module_ready = &am33xx_cm_wait_module_ready, .wait_module_idle = &am33xx_cm_wait_module_idle, .module_enable = &am33xx_cm_module_enable, .module_disable = &am33xx_cm_module_disable, .xlate_clkctrl = &am33xx_cm_xlate_clkctrl, }; int __init am33xx_cm_init(const struct omap_prcm_init_data *data) { return cm_register(&am33xx_cm_ll_data); } static void __exit am33xx_cm_exit(void) { cm_unregister(&am33xx_cm_ll_data); } __exitcall(am33xx_cm_exit);
linux-master
arch/arm/mach-omap2/cm33xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/board-n8x0.c * * Copyright (C) 2005-2009 Nokia Corporation * Author: Juha Yrjola <[email protected]> * * Modified from mach-omap2/board-generic.c */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/machine.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/stddef.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/usb/musb.h> #include <linux/mmc/host.h> #include <linux/platform_data/spi-omap2-mcspi.h> #include <linux/platform_data/mmc-omap.h> #include <linux/mfd/menelaus.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include "common.h" #include "mmc.h" #include "usb-tusb6010.h" #include "soc.h" #include "common-board-devices.h" #define TUSB6010_ASYNC_CS 1 #define TUSB6010_SYNC_CS 4 #define TUSB6010_DMACHAN 0x3f #define NOKIA_N810_WIMAX (1 << 2) #define NOKIA_N810 (1 << 1) #define NOKIA_N800 (1 << 0) static u32 board_caps; #define board_is_n800() (board_caps & NOKIA_N800) #define board_is_n810() (board_caps & NOKIA_N810) #define board_is_n810_wimax() (board_caps & NOKIA_N810_WIMAX) static void board_check_revision(void) { if (of_machine_is_compatible("nokia,n800")) board_caps = NOKIA_N800; else if (of_machine_is_compatible("nokia,n810")) board_caps = NOKIA_N810; else if (of_machine_is_compatible("nokia,n810-wimax")) board_caps = NOKIA_N810_WIMAX; if (!board_caps) pr_err("Unknown board\n"); } #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010) static struct musb_hdrc_config musb_config = { .multipoint = 1, .dyn_fifo = 1, .num_eps = 16, .ram_bits = 12, }; static struct musb_hdrc_platform_data tusb_data = { .mode = MUSB_OTG, .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ .power = 100, /* Max 100 mA VBUS for host mode */ .config = &musb_config, }; static struct gpiod_lookup_table tusb_gpio_table = { .dev_id = "musb-tusb", .table = { GPIO_LOOKUP("gpio-0-15", 0, "enable", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("gpio-48-63", 10, "int", GPIO_ACTIVE_HIGH), { } }, }; static void __init n8x0_usb_init(void) { int ret = 0; gpiod_add_lookup_table(&tusb_gpio_table); ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2, TUSB6010_ASYNC_CS, TUSB6010_SYNC_CS, TUSB6010_DMACHAN); if (ret != 0) return; pr_info("TUSB 6010\n"); return; } #else static void __init n8x0_usb_init(void) {} #endif /*CONFIG_USB_MUSB_TUSB6010 */ static struct omap2_mcspi_device_config p54spi_mcspi_config = { .turbo_mode = 0, }; static struct spi_board_info n800_spi_board_info[] __initdata = { { .modalias = "p54spi", .bus_num = 2, .chip_select = 0, .max_speed_hz = 48000000, .controller_data = &p54spi_mcspi_config, }, }; #if defined(CONFIG_MENELAUS) && IS_ENABLED(CONFIG_MMC_OMAP) /* * On both N800 and N810, only the first of the two MMC controllers is in use. * The two MMC slots are multiplexed via Menelaus companion chip over I2C. * On N800, both slots are powered via Menelaus. On N810, only one of the * slots is powered via Menelaus. The N810 EMMC is powered via GPIO. * * VMMC slot 1 on both N800 and N810 * VDCDC3_APE and VMCS2_APE slot 2 on N800 * GPIO23 and GPIO9 slot 2 EMMC on N810 * */ static int slot1_cover_open; static int slot2_cover_open; static struct device *mmc_device; static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = { .dev_id = "mmci-omap.0", .table = { /* Slot switch, GPIO 96 */ GPIO_LOOKUP("gpio-80-111", 16, "switch", GPIO_ACTIVE_HIGH), { } }, }; static struct gpiod_lookup_table nokia810_mmc_gpio_table = { .dev_id = "mmci-omap.0", .table = { /* Slot index 1, VSD power, GPIO 23 */ GPIO_LOOKUP_IDX("gpio-16-31", 7, "vsd", 1, GPIO_ACTIVE_HIGH), /* Slot index 1, VIO power, GPIO 9 */ GPIO_LOOKUP_IDX("gpio-0-15", 9, "vio", 1, GPIO_ACTIVE_HIGH), { } }, }; static int n8x0_mmc_set_power_menelaus(struct device *dev, int slot, int power_on, int vdd) { int mV; #ifdef CONFIG_MMC_DEBUG dev_dbg(dev, "Set slot %d power: %s (vdd %d)\n", slot + 1, power_on ? "on" : "off", vdd); #endif if (slot == 0) { if (!power_on) return menelaus_set_vmmc(0); switch (1 << vdd) { case MMC_VDD_33_34: case MMC_VDD_32_33: case MMC_VDD_31_32: mV = 3100; break; case MMC_VDD_30_31: mV = 3000; break; case MMC_VDD_28_29: mV = 2800; break; case MMC_VDD_165_195: mV = 1850; break; default: BUG(); } return menelaus_set_vmmc(mV); } else { if (!power_on) return menelaus_set_vdcdc(3, 0); switch (1 << vdd) { case MMC_VDD_33_34: case MMC_VDD_32_33: mV = 3300; break; case MMC_VDD_30_31: case MMC_VDD_29_30: mV = 3000; break; case MMC_VDD_28_29: case MMC_VDD_27_28: mV = 2800; break; case MMC_VDD_24_25: case MMC_VDD_23_24: mV = 2400; break; case MMC_VDD_22_23: case MMC_VDD_21_22: mV = 2200; break; case MMC_VDD_20_21: mV = 2000; break; case MMC_VDD_165_195: mV = 1800; break; default: BUG(); } return menelaus_set_vdcdc(3, mV); } return 0; } static int n8x0_mmc_set_power(struct device *dev, int slot, int power_on, int vdd) { if (board_is_n800() || slot == 0) return n8x0_mmc_set_power_menelaus(dev, slot, power_on, vdd); /* The n810 power will be handled by GPIO code in the driver */ return 0; } static int n8x0_mmc_set_bus_mode(struct device *dev, int slot, int bus_mode) { int r; dev_dbg(dev, "Set slot %d bus mode %s\n", slot + 1, bus_mode == MMC_BUSMODE_OPENDRAIN ? "open-drain" : "push-pull"); BUG_ON(slot != 0 && slot != 1); slot++; switch (bus_mode) { case MMC_BUSMODE_OPENDRAIN: r = menelaus_set_mmc_opendrain(slot, 1); break; case MMC_BUSMODE_PUSHPULL: r = menelaus_set_mmc_opendrain(slot, 0); break; default: BUG(); } if (r != 0 && printk_ratelimit()) dev_err(dev, "MMC: unable to set bus mode for slot %d\n", slot); return r; } static int n8x0_mmc_get_cover_state(struct device *dev, int slot) { slot++; BUG_ON(slot != 1 && slot != 2); if (slot == 1) return slot1_cover_open; else return slot2_cover_open; } static void n8x0_mmc_callback(void *data, u8 card_mask) { #ifdef CONFIG_MMC_OMAP int bit, *openp, index; if (board_is_n800()) { bit = 1 << 1; openp = &slot2_cover_open; index = 1; } else { bit = 1; openp = &slot1_cover_open; index = 0; } if (card_mask & bit) *openp = 1; else *openp = 0; omap_mmc_notify_cover_event(mmc_device, index, *openp); #else pr_warn("MMC: notify cover event not available\n"); #endif } static int n8x0_mmc_late_init(struct device *dev) { int r, bit, *openp; int vs2sel; mmc_device = dev; r = menelaus_set_slot_sel(1); if (r < 0) return r; if (board_is_n800()) vs2sel = 0; else vs2sel = 2; r = menelaus_set_mmc_slot(2, 0, vs2sel, 1); if (r < 0) return r; n8x0_mmc_set_power(dev, 0, MMC_POWER_ON, 16); /* MMC_VDD_28_29 */ n8x0_mmc_set_power(dev, 1, MMC_POWER_ON, 16); r = menelaus_set_mmc_slot(1, 1, 0, 1); if (r < 0) return r; r = menelaus_set_mmc_slot(2, 1, vs2sel, 1); if (r < 0) return r; r = menelaus_get_slot_pin_states(); if (r < 0) return r; if (board_is_n800()) { bit = 1 << 1; openp = &slot2_cover_open; } else { bit = 1; openp = &slot1_cover_open; slot2_cover_open = 0; } /* All slot pin bits seem to be inversed until first switch change */ if (r == 0xf || r == (0xf & ~bit)) r = ~r; if (r & bit) *openp = 1; else *openp = 0; r = menelaus_register_mmc_callback(n8x0_mmc_callback, NULL); return r; } static void n8x0_mmc_shutdown(struct device *dev) { int vs2sel; if (board_is_n800()) vs2sel = 0; else vs2sel = 2; menelaus_set_mmc_slot(1, 0, 0, 0); menelaus_set_mmc_slot(2, 0, vs2sel, 0); } static void n8x0_mmc_cleanup(struct device *dev) { menelaus_unregister_mmc_callback(); } /* * MMC controller1 has two slots that are multiplexed via I2C. * MMC controller2 is not in use. */ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 0, .init = n8x0_mmc_late_init, .cleanup = n8x0_mmc_cleanup, .shutdown = n8x0_mmc_shutdown, .max_freq = 24000000, .slots[0] = { .wires = 4, .set_power = n8x0_mmc_set_power, .set_bus_mode = n8x0_mmc_set_bus_mode, .get_cover_state = n8x0_mmc_get_cover_state, .ocr_mask = MMC_VDD_165_195 | MMC_VDD_30_31 | MMC_VDD_32_33 | MMC_VDD_33_34, .name = "internal", }, .slots[1] = { .set_power = n8x0_mmc_set_power, .set_bus_mode = n8x0_mmc_set_bus_mode, .get_cover_state = n8x0_mmc_get_cover_state, .ocr_mask = MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 | MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_32_33 | MMC_VDD_33_34, .name = "external", }, }; static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC]; static void __init n8x0_mmc_init(void) { gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table); if (board_is_n810()) { mmc1_data.slots[0].name = "external"; /* * Some Samsung Movinand chips do not like open-ended * multi-block reads and fall to braind-dead state * while doing so. Reducing the number of blocks in * the transfer or delays in clock disable do not help */ mmc1_data.slots[1].name = "internal"; mmc1_data.slots[1].ban_openended = 1; gpiod_add_lookup_table(&nokia810_mmc_gpio_table); } mmc1_data.nr_slots = 2; mmc_data[0] = &mmc1_data; } #else static struct omap_mmc_platform_data mmc1_data; static void __init n8x0_mmc_init(void) { } #endif /* CONFIG_MMC_OMAP */ #ifdef CONFIG_MENELAUS static int n8x0_auto_sleep_regulators(void) { u32 val; int ret; val = EN_VPLL_SLEEP | EN_VMMC_SLEEP \ | EN_VAUX_SLEEP | EN_VIO_SLEEP \ | EN_VMEM_SLEEP | EN_DC3_SLEEP \ | EN_VC_SLEEP | EN_DC2_SLEEP; ret = menelaus_set_regulator_sleep(1, val); if (ret < 0) { pr_err("Could not set regulators to sleep on menelaus: %u\n", ret); return ret; } return 0; } static int n8x0_auto_voltage_scale(void) { int ret; ret = menelaus_set_vcore_hw(1400, 1050); if (ret < 0) { pr_err("Could not set VCORE voltage on menelaus: %u\n", ret); return ret; } return 0; } static int n8x0_menelaus_late_init(struct device *dev) { int ret; ret = n8x0_auto_voltage_scale(); if (ret < 0) return ret; ret = n8x0_auto_sleep_regulators(); if (ret < 0) return ret; return 0; } #else static int n8x0_menelaus_late_init(struct device *dev) { return 0; } #endif struct menelaus_platform_data n8x0_menelaus_platform_data = { .late_init = n8x0_menelaus_late_init, }; static int __init n8x0_late_initcall(void) { if (!board_caps) return -ENODEV; n8x0_mmc_init(); n8x0_usb_init(); return 0; } omap_late_initcall(n8x0_late_initcall); /* * Legacy init pdata init for n8x0. Note that we want to follow the * I2C bus numbering starting at 0 for device tree like other omaps. */ void * __init n8x0_legacy_init(void) { board_check_revision(); spi_register_board_info(n800_spi_board_info, ARRAY_SIZE(n800_spi_board_info)); return &mmc1_data; }
linux-master
arch/arm/mach-omap2/board-n8x0.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_device implementation * * Copyright (C) 2009-2010 Nokia Corporation * Paul Walmsley, Kevin Hilman * * Developed in collaboration with (alphabetical order): Benoit * Cousson, Thara Gopinath, Tony Lindgren, Rajendra Nayak, Vikram * Pandita, Sakari Poussa, Anand Sawant, Santosh Shilimkar, Richard * Woodruff * * This code provides a consistent interface for OMAP device drivers * to control power management and interconnect properties of their * devices. * * In the medium- to long-term, this code should be implemented as a * proper omap_bus/omap_device in Linux, no more platform_data func * pointers */ #undef DEBUG #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/notifier.h> #include "common.h" #include "soc.h" #include "omap_device.h" #include "omap_hwmod.h" static struct omap_device *omap_device_alloc(struct platform_device *pdev, struct omap_hwmod **ohs, int oh_cnt); static void omap_device_delete(struct omap_device *od); static struct dev_pm_domain omap_device_fail_pm_domain; static struct dev_pm_domain omap_device_pm_domain; /* Private functions */ static void _add_clkdev(struct omap_device *od, const char *clk_alias, const char *clk_name) { struct clk *r; int rc; if (!clk_alias || !clk_name) return; dev_dbg(&od->pdev->dev, "Creating %s -> %s\n", clk_alias, clk_name); r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias); if (!IS_ERR(r)) { dev_dbg(&od->pdev->dev, "alias %s already exists\n", clk_alias); clk_put(r); return; } r = clk_get_sys(NULL, clk_name); if (IS_ERR(r)) { struct of_phandle_args clkspec; clkspec.np = of_find_node_by_name(NULL, clk_name); r = of_clk_get_from_provider(&clkspec); rc = clk_register_clkdev(r, clk_alias, dev_name(&od->pdev->dev)); } else { rc = clk_add_alias(clk_alias, dev_name(&od->pdev->dev), clk_name, NULL); } if (rc) { if (rc == -ENODEV || rc == -ENOMEM) dev_err(&od->pdev->dev, "clkdev_alloc for %s failed\n", clk_alias); else dev_err(&od->pdev->dev, "clk_get for %s failed\n", clk_name); } } /** * _add_hwmod_clocks_clkdev - Add clkdev entry for hwmod optional clocks * and main clock * @od: struct omap_device *od * @oh: struct omap_hwmod *oh * * For the main clock and every optional clock present per hwmod per * omap_device, this function adds an entry in the clkdev table of the * form <dev-id=dev_name, con-id=role> if it does not exist already. * * This allows drivers to get a pointer to its optional clocks based on its role * by calling clk_get(<dev*>, <role>). * In the case of the main clock, a "fck" alias is used. * * No return value. */ static void _add_hwmod_clocks_clkdev(struct omap_device *od, struct omap_hwmod *oh) { int i; _add_clkdev(od, "fck", oh->main_clk); for (i = 0; i < oh->opt_clks_cnt; i++) _add_clkdev(od, oh->opt_clks[i].role, oh->opt_clks[i].clk); } /** * omap_device_build_from_dt - build an omap_device with multiple hwmods * @pdev: The platform device to update. * * Function for building an omap_device already registered from device-tree * * Returns 0 or PTR_ERR() on error. */ static int omap_device_build_from_dt(struct platform_device *pdev) { struct omap_hwmod **hwmods; struct omap_device *od; struct omap_hwmod *oh; struct device_node *node = pdev->dev.of_node; struct resource res; const char *oh_name; int oh_cnt, i, ret = 0; bool device_active = false, skip_pm_domain = false; oh_cnt = of_property_count_strings(node, "ti,hwmods"); if (oh_cnt <= 0) { dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n"); return -ENODEV; } /* SDMA still needs special handling for omap_device_build() */ ret = of_property_read_string_index(node, "ti,hwmods", 0, &oh_name); if (!ret && (!strncmp("dma_system", oh_name, 10) || !strncmp("dma", oh_name, 3))) skip_pm_domain = true; /* Use ti-sysc driver instead of omap_device? */ if (!skip_pm_domain && !omap_hwmod_parse_module_range(NULL, node, &res)) return -ENODEV; hwmods = kcalloc(oh_cnt, sizeof(struct omap_hwmod *), GFP_KERNEL); if (!hwmods) { ret = -ENOMEM; goto odbfd_exit; } for (i = 0; i < oh_cnt; i++) { of_property_read_string_index(node, "ti,hwmods", i, &oh_name); oh = omap_hwmod_lookup(oh_name); if (!oh) { dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n", oh_name); ret = -EINVAL; goto odbfd_exit1; } hwmods[i] = oh; if (oh->flags & HWMOD_INIT_NO_IDLE) device_active = true; } od = omap_device_alloc(pdev, hwmods, oh_cnt); if (IS_ERR(od)) { dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n", oh_name); ret = PTR_ERR(od); goto odbfd_exit1; } /* Fix up missing resource names */ for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); } if (!skip_pm_domain) { dev_pm_domain_set(&pdev->dev, &omap_device_pm_domain); if (device_active) { omap_device_enable(pdev); pm_runtime_set_active(&pdev->dev); } } odbfd_exit1: kfree(hwmods); odbfd_exit: /* if data/we are at fault.. load up a fail handler */ if (ret) dev_pm_domain_set(&pdev->dev, &omap_device_fail_pm_domain); return ret; } static int _omap_device_notifier_call(struct notifier_block *nb, unsigned long event, void *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od; int err; switch (event) { case BUS_NOTIFY_REMOVED_DEVICE: if (pdev->archdata.od) omap_device_delete(pdev->archdata.od); break; case BUS_NOTIFY_UNBOUND_DRIVER: od = to_omap_device(pdev); if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) { dev_info(dev, "enabled after unload, idling\n"); err = omap_device_idle(pdev); if (err) dev_err(dev, "failed to idle\n"); } break; case BUS_NOTIFY_BIND_DRIVER: od = to_omap_device(pdev); if (od) { od->_driver_status = BUS_NOTIFY_BIND_DRIVER; if (od->_state == OMAP_DEVICE_STATE_ENABLED && pm_runtime_status_suspended(dev)) { pm_runtime_set_active(dev); } } break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); fallthrough; default: od = to_omap_device(pdev); if (od) od->_driver_status = event; } return NOTIFY_DONE; } /** * _omap_device_enable_hwmods - call omap_hwmod_enable() on all hwmods * @od: struct omap_device *od * * Enable all underlying hwmods. Returns 0. */ static int _omap_device_enable_hwmods(struct omap_device *od) { int ret = 0; int i; for (i = 0; i < od->hwmods_cnt; i++) ret |= omap_hwmod_enable(od->hwmods[i]); return ret; } /** * _omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods * @od: struct omap_device *od * * Idle all underlying hwmods. Returns 0. */ static int _omap_device_idle_hwmods(struct omap_device *od) { int ret = 0; int i; for (i = 0; i < od->hwmods_cnt; i++) ret |= omap_hwmod_idle(od->hwmods[i]); return ret; } /* Public functions for use by core code */ /** * omap_device_alloc - allocate an omap_device * @pdev: platform_device that will be included in this omap_device * @ohs: ptr to the omap_hwmod for this omap_device * @oh_cnt: the size of the ohs list * * Convenience function for allocating an omap_device structure and filling * hwmods, and resources. * * Returns an struct omap_device pointer or ERR_PTR() on error; */ static struct omap_device *omap_device_alloc(struct platform_device *pdev, struct omap_hwmod **ohs, int oh_cnt) { int ret = -ENOMEM; struct omap_device *od; int i; struct omap_hwmod **hwmods; od = kzalloc(sizeof(struct omap_device), GFP_KERNEL); if (!od) goto oda_exit1; od->hwmods_cnt = oh_cnt; hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL); if (!hwmods) goto oda_exit2; od->hwmods = hwmods; od->pdev = pdev; pdev->archdata.od = od; for (i = 0; i < oh_cnt; i++) { hwmods[i]->od = od; _add_hwmod_clocks_clkdev(od, hwmods[i]); } return od; oda_exit2: kfree(od); oda_exit1: dev_err(&pdev->dev, "omap_device: build failed (%d)\n", ret); return ERR_PTR(ret); } static void omap_device_delete(struct omap_device *od) { if (!od) return; od->pdev->archdata.od = NULL; kfree(od->hwmods); kfree(od); } #ifdef CONFIG_PM static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int ret; ret = pm_generic_runtime_suspend(dev); if (ret) return ret; return omap_device_idle(pdev); } static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int ret; ret = omap_device_enable(pdev); if (ret) { dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n"); return ret; } return pm_generic_runtime_resume(dev); } static int _od_fail_runtime_suspend(struct device *dev) { dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); return -ENODEV; } static int _od_fail_runtime_resume(struct device *dev) { dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); return -ENODEV; } #endif #ifdef CONFIG_SUSPEND static int _od_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); int ret; /* Don't attempt late suspend on a driver that is not bound */ if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) return 0; ret = pm_generic_suspend_noirq(dev); if (!ret && !pm_runtime_status_suspended(dev)) { if (pm_generic_runtime_suspend(dev) == 0) { omap_device_idle(pdev); od->flags |= OMAP_DEVICE_SUSPENDED; } } return ret; } static int _od_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); if (od->flags & OMAP_DEVICE_SUSPENDED) { od->flags &= ~OMAP_DEVICE_SUSPENDED; omap_device_enable(pdev); pm_generic_runtime_resume(dev); } return pm_generic_resume_noirq(dev); } #else #define _od_suspend_noirq NULL #define _od_resume_noirq NULL #endif static struct dev_pm_domain omap_device_fail_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend, _od_fail_runtime_resume, NULL) } }; static struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) } }; /* Public functions for use by device drivers through struct platform_data */ /** * omap_device_enable - fully activate an omap_device * @pdev: the platform device to activate * * Do whatever is necessary for the hwmods underlying omap_device @od * to be accessible and ready to operate. This generally involves * enabling clocks, setting SYSCONFIG registers; and in the future may * involve remuxing pins. Device drivers should call this function * indirectly via pm_runtime_get*(). Returns -EINVAL if called when * the omap_device is already enabled, or passes along the return * value of _omap_device_enable_hwmods(). */ int omap_device_enable(struct platform_device *pdev) { int ret; struct omap_device *od; od = to_omap_device(pdev); if (od->_state == OMAP_DEVICE_STATE_ENABLED) { dev_warn(&pdev->dev, "omap_device: %s() called from invalid state %d\n", __func__, od->_state); return -EINVAL; } ret = _omap_device_enable_hwmods(od); if (ret == 0) od->_state = OMAP_DEVICE_STATE_ENABLED; return ret; } /** * omap_device_idle - idle an omap_device * @pdev: The platform_device (omap_device) to idle * * Idle omap_device @od. Device drivers call this function indirectly * via pm_runtime_put*(). Returns -EINVAL if the omap_device is not * currently enabled, or passes along the return value of * _omap_device_idle_hwmods(). */ int omap_device_idle(struct platform_device *pdev) { int ret; struct omap_device *od; od = to_omap_device(pdev); if (od->_state != OMAP_DEVICE_STATE_ENABLED) { dev_warn(&pdev->dev, "omap_device: %s() called from invalid state %d\n", __func__, od->_state); return -EINVAL; } ret = _omap_device_idle_hwmods(od); if (ret == 0) od->_state = OMAP_DEVICE_STATE_IDLE; return ret; } /** * omap_device_assert_hardreset - set a device's hardreset line * @pdev: struct platform_device * to reset * @name: const char * name of the reset line * * Set the hardreset line identified by @name on the IP blocks * associated with the hwmods backing the platform_device @pdev. All * of the hwmods associated with @pdev must have the same hardreset * line linked to them for this to work. Passes along the return value * of omap_hwmod_assert_hardreset() in the event of any failure, or * returns 0 upon success. */ int omap_device_assert_hardreset(struct platform_device *pdev, const char *name) { struct omap_device *od = to_omap_device(pdev); int ret = 0; int i; for (i = 0; i < od->hwmods_cnt; i++) { ret = omap_hwmod_assert_hardreset(od->hwmods[i], name); if (ret) break; } return ret; } /** * omap_device_deassert_hardreset - release a device's hardreset line * @pdev: struct platform_device * to reset * @name: const char * name of the reset line * * Release the hardreset line identified by @name on the IP blocks * associated with the hwmods backing the platform_device @pdev. All * of the hwmods associated with @pdev must have the same hardreset * line linked to them for this to work. Passes along the return * value of omap_hwmod_deassert_hardreset() in the event of any * failure, or returns 0 upon success. */ int omap_device_deassert_hardreset(struct platform_device *pdev, const char *name) { struct omap_device *od = to_omap_device(pdev); int ret = 0; int i; for (i = 0; i < od->hwmods_cnt; i++) { ret = omap_hwmod_deassert_hardreset(od->hwmods[i], name); if (ret) break; } return ret; } static struct notifier_block platform_nb = { .notifier_call = _omap_device_notifier_call, }; static int __init omap_device_init(void) { bus_register_notifier(&platform_bus_type, &platform_nb); return 0; } omap_postcore_initcall(omap_device_init); /** * omap_device_late_idle - idle devices without drivers * @dev: struct device * associated with omap_device * @data: unused * * Check the driver bound status of this device, and idle it * if there is no driver attached. */ static int __init omap_device_late_idle(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); int i; if (!od) return 0; /* * If omap_device state is enabled, but has no driver bound, * idle it. */ /* * Some devices (like memory controllers) are always kept * enabled, and should not be idled even with no drivers. */ for (i = 0; i < od->hwmods_cnt; i++) if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) return 0; if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER && od->_driver_status != BUS_NOTIFY_BIND_DRIVER) { if (od->_state == OMAP_DEVICE_STATE_ENABLED) { dev_warn(dev, "%s: enabled but no driver. Idling\n", __func__); omap_device_idle(pdev); } } return 0; } static int __init omap_device_late_init(void) { bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); return 0; } omap_late_initcall_sync(omap_device_late_init);
linux-master
arch/arm/mach-omap2/omap_device.c
// SPDX-License-Identifier: GPL-2.0 /* * TI AM33XX and AM43XX PM Assembly Offsets * * Copyright (C) 2017-2018 Texas Instruments Inc. */ #include <linux/kbuild.h> #include <linux/platform_data/pm33xx.h> #include <linux/ti-emif-sram.h> int main(void) { ti_emif_asm_offsets(); DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, offsetof(struct am33xx_pm_sram_data, wfi_flags)); DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, offsetof(struct am33xx_pm_sram_data, l2_aux_ctrl_val)); DEFINE(AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET, offsetof(struct am33xx_pm_sram_data, l2_prefetch_ctrl_val)); DEFINE(AMX3_PM_SRAM_DATA_SIZE, sizeof(struct am33xx_pm_sram_data)); BLANK(); DEFINE(AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET, offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_virt)); DEFINE(AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET, offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_phys)); DEFINE(AMX3_PM_RTC_BASE_VIRT_OFFSET, offsetof(struct am33xx_pm_ro_sram_data, rtc_base_virt)); DEFINE(AMX3_PM_RO_SRAM_DATA_SIZE, sizeof(struct am33xx_pm_ro_sram_data)); return 0; }
linux-master
arch/arm/mach-omap2/pm-asm-offsets.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3xxx PRM module functions * * Copyright (C) 2010-2012 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * Rajendra Nayak <[email protected]> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/of_irq.h> #include "soc.h" #include "common.h" #include "vp.h" #include "powerdomain.h" #include "prm3xxx.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "prm-regbits-34xx.h" #include "cm3xxx.h" #include "cm-regbits-34xx.h" #include "clock.h" static void omap3xxx_prm_read_pending_irqs(unsigned long *events); static void omap3xxx_prm_ocp_barrier(void); static void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask); static void omap3xxx_prm_restore_irqen(u32 *saved_mask); static void omap3xxx_prm_iva_idle(void); static const struct omap_prcm_irq omap3_prcm_irqs[] = { OMAP_PRCM_IRQ("wkup", 0, 0), OMAP_PRCM_IRQ("io", 9, 1), }; static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET, .nr_regs = 1, .irqs = omap3_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs), .irq = 11 + OMAP_INTC_START, .read_pending_irqs = &omap3xxx_prm_read_pending_irqs, .ocp_barrier = &omap3xxx_prm_ocp_barrier, .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, .restore_irqen = &omap3xxx_prm_restore_irqen, .reconfigure_io_chain = NULL, }; /* * omap3_prm_reset_src_map - map from bits in the PRM_RSTST hardware * register (which are specific to OMAP3xxx SoCs) to reset source ID * bit shifts (which is an OMAP SoC-independent enumeration) */ static struct prm_reset_src_map omap3xxx_prm_reset_src_map[] = { { OMAP3430_GLOBAL_COLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, { OMAP3430_GLOBAL_SW_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, { OMAP3430_SECURITY_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, { OMAP3430_MPU_WD_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP3430_SECURE_WD_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP3430_EXTERNAL_WARM_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, { OMAP3430_VDD1_VOLTAGE_MANAGER_RST_SHIFT, OMAP_VDD_MPU_VM_RST_SRC_ID_SHIFT }, { OMAP3430_VDD2_VOLTAGE_MANAGER_RST_SHIFT, OMAP_VDD_CORE_VM_RST_SRC_ID_SHIFT }, { OMAP3430_ICEPICK_RST_SHIFT, OMAP_ICEPICK_RST_SRC_ID_SHIFT }, { OMAP3430_ICECRUSHER_RST_SHIFT, OMAP_ICECRUSHER_RST_SRC_ID_SHIFT }, { -1, -1 }, }; /* PRM VP */ /* * struct omap3_vp - OMAP3 VP register access description. * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg */ struct omap3_vp { u32 tranxdone_status; }; static struct omap3_vp omap3_vp[] = { [OMAP3_VP_VDD_MPU_ID] = { .tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK, }, [OMAP3_VP_VDD_CORE_ID] = { .tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK, }, }; #define MAX_VP_ID ARRAY_SIZE(omap3_vp); static u32 omap3_prm_vp_check_txdone(u8 vp_id) { struct omap3_vp *vp = &omap3_vp[vp_id]; u32 irqstatus; irqstatus = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); return irqstatus & vp->tranxdone_status; } static void omap3_prm_vp_clear_txdone(u8 vp_id) { struct omap3_vp *vp = &omap3_vp[vp_id]; omap2_prm_write_mod_reg(vp->tranxdone_status, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); } u32 omap3_prm_vcvp_read(u8 offset) { return omap2_prm_read_mod_reg(OMAP3430_GR_MOD, offset); } void omap3_prm_vcvp_write(u32 val, u8 offset) { omap2_prm_write_mod_reg(val, OMAP3430_GR_MOD, offset); } u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) { return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset); } /** * omap3xxx_prm_dpll3_reset - use DPLL3 reset to reboot the OMAP SoC * * Set the DPLL3 reset bit, which should reboot the SoC. This is the * recommended way to restart the SoC, considering Errata i520. No * return value. */ static void omap3xxx_prm_dpll3_reset(void) { omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, OMAP3430_GR_MOD, OMAP2_RM_RSTCTRL); /* OCP barrier */ omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP2_RM_RSTCTRL); } /** * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events * @events: ptr to a u32, preallocated by caller * * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM * MPU IRQs, and store the result into the u32 pointed to by @events. * No return value. */ static void omap3xxx_prm_read_pending_irqs(unsigned long *events) { u32 mask, st; /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */ mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); events[0] = mask & st; } /** * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete * * Force any buffered writes to the PRM IP block to complete. Needed * by the PRM IRQ handler, which reads and writes directly to the IP * block, to avoid race conditions after acknowledging or clearing IRQ * bits. No return value. */ static void omap3xxx_prm_ocp_barrier(void) { omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); } /** * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg * @saved_mask: ptr to a u32 array to save IRQENABLE bits * * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask * must be allocated by the caller. Intended to be used in the PRM * interrupt handler suspend callback. The OCP barrier is needed to * ensure the write to disable PRM interrupts reaches the PRM before * returning; otherwise, spurious interrupts might occur. No return * value. */ static void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); /* OCP barrier */ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); } /** * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously * * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended * to be used in the PRM interrupt handler resume callback to restore * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP * barrier should be needed here; any pending PRM interrupts will fire * once the writes reach the PRM. No return value. */ static void omap3xxx_prm_restore_irqen(u32 *saved_mask) { omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); } /** * omap3xxx_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt * @module: PRM module to clear wakeups from * @regs: register set to clear, 1 or 3 * @wkst_mask: wkst bits to clear * * The purpose of this function is to clear any wake-up events latched * in the PRCM PM_WKST_x registers. It is possible that a wake-up event * may occur whilst attempting to clear a PM_WKST_x register and thus * set another bit in this register. A while loop is used to ensure * that any peripheral wake-up events occurring while attempting to * clear the PM_WKST_x are detected and cleared. */ static int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask) { u32 wkst, fclk, iclk, clken; u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; u16 grpsel_off = (regs == 3) ? OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; int c = 0; wkst = omap2_prm_read_mod_reg(module, wkst_off); wkst &= omap2_prm_read_mod_reg(module, grpsel_off); wkst &= wkst_mask; if (wkst) { iclk = omap2_cm_read_mod_reg(module, iclk_off); fclk = omap2_cm_read_mod_reg(module, fclk_off); while (wkst) { clken = wkst; omap2_cm_set_mod_reg_bits(clken, module, iclk_off); /* * For USBHOST, we don't know whether HOST1 or * HOST2 woke us up, so enable both f-clocks */ if (module == OMAP3430ES2_USBHOST_MOD) clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; omap2_cm_set_mod_reg_bits(clken, module, fclk_off); omap2_prm_write_mod_reg(wkst, module, wkst_off); wkst = omap2_prm_read_mod_reg(module, wkst_off); wkst &= wkst_mask; c++; } omap2_cm_write_mod_reg(iclk, module, iclk_off); omap2_cm_write_mod_reg(fclk, module, fclk_off); } return c; } /** * omap3_prm_reset_modem - toggle reset signal for modem * * Toggles the reset signal to modem IP block. Required to allow * OMAP3430 without stacked modem to idle properly. */ static void __init omap3_prm_reset_modem(void) { omap2_prm_write_mod_reg( OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK | OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK, CORE_MOD, OMAP2_RM_RSTCTRL); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL); } /** * omap3_prm_init_pm - initialize PM related registers for PRM * @has_uart4: SoC has UART4 * @has_iva: SoC has IVA * * Initializes PRM registers for PM use. Called from PM init. */ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) { u32 en_uart4_mask; u32 grpsel_uart4_mask; /* * Enable control of expternal oscillator through * sys_clkreq. In the long run clock framework should * take care of this. */ omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK, 1 << OMAP_AUTOEXTCLKMODE_SHIFT, OMAP3430_GR_MOD, OMAP3_PRM_CLKSRC_CTRL_OFFSET); /* setup wakup source */ omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK | OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK, WKUP_MOD, PM_WKEN); /* No need to write EN_IO, that is always enabled */ omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK | OMAP3430_GRPSEL_GPT1_MASK | OMAP3430_GRPSEL_GPT12_MASK, WKUP_MOD, OMAP3430_PM_MPUGRPSEL); /* Enable PM_WKEN to support DSS LPR */ omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK, OMAP3430_DSS_MOD, PM_WKEN); if (has_uart4) { en_uart4_mask = OMAP3630_EN_UART4_MASK; grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; } else { en_uart4_mask = 0; grpsel_uart4_mask = 0; } /* Enable wakeups in PER */ omap2_prm_write_mod_reg(en_uart4_mask | OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK | OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK | OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK | OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK | OMAP3430_EN_MCBSP4_MASK, OMAP3430_PER_MOD, PM_WKEN); /* and allow them to wake up MPU */ omap2_prm_write_mod_reg(grpsel_uart4_mask | OMAP3430_GRPSEL_GPIO2_MASK | OMAP3430_GRPSEL_GPIO3_MASK | OMAP3430_GRPSEL_GPIO4_MASK | OMAP3430_GRPSEL_GPIO5_MASK | OMAP3430_GRPSEL_GPIO6_MASK | OMAP3430_GRPSEL_UART3_MASK | OMAP3430_GRPSEL_MCBSP2_MASK | OMAP3430_GRPSEL_MCBSP3_MASK | OMAP3430_GRPSEL_MCBSP4_MASK, OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); /* Don't attach IVA interrupts */ if (has_iva) { omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL); } /* Clear any pending 'reset' flags */ omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST); /* Clear any pending PRCM interrupts */ omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); /* We need to idle iva2_pwrdm even on am3703 with no iva2. */ omap3xxx_prm_iva_idle(); omap3_prm_reset_modem(); } /** * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain * * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only * thing we can do is toggle EN_IO bit for earlier omaps. */ static void omap3430_pre_es3_1_reconfigure_io_chain(void) { omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); } /** * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No * return value. These registers are only available in 3430 es3.1 and later. */ static void omap3_prm_reconfigure_io_chain(void) { int i = 0; omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, PM_WKEN); omap_test_timeout(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST) & OMAP3430_ST_IO_CHAIN_MASK, MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line assertion timed out\n"); omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, PM_WKEN); omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, WKUP_MOD, PM_WKST); omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST); } /** * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by * those latches to signal a wakeup event to the PRCM. For I/O * wakeups to occur, WAKEUPENABLE bits must be set in the pad mux * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. * No return value. */ static void omap3xxx_prm_enable_io_wakeup(void) { if (prm_features & PRM_HAS_IO_WAKEUP) omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); } /** * omap3xxx_prm_read_reset_sources - return the last SoC reset source * * Return a u32 representing the last reset sources of the SoC. The * returned reset source bits are standardized across OMAP SoCs. */ static u32 omap3xxx_prm_read_reset_sources(void) { struct prm_reset_src_map *p; u32 r = 0; u32 v; v = omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST); p = omap3xxx_prm_reset_src_map; while (p->reg_shift >= 0 && p->std_shift >= 0) { if (v & (1 << p->reg_shift)) r |= 1 << p->std_shift; p++; } return r; } /** * omap3xxx_prm_iva_idle - ensure IVA is in idle so it can be put into retention * * In cases where IVA2 is activated by bootcode, it may prevent * full-chip retention or off-mode because it is not idle. This * function forces the IVA2 into idle state so it can go * into retention/off and thus allow full-chip retention/off. */ static void omap3xxx_prm_iva_idle(void) { /* ensure IVA2 clock is disabled */ omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); /* if no clock activity, nothing else to do */ if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) & OMAP3430_CLKACTIVITY_IVA2_MASK)) return; /* Reset IVA2 */ omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); /* Enable IVA2 clock */ omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK, OMAP3430_IVA2_MOD, CM_FCLKEN); /* Un-reset IVA2 */ omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); /* Disable IVA2 clock */ omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); /* Reset IVA2 */ omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); } /** * omap3xxx_prm_clear_global_cold_reset - checks the global cold reset status * and clears it if asserted * * Checks if cold-reset has occurred and clears the status bit if yes. Returns * 1 if cold-reset has occurred, 0 otherwise. */ int omap3xxx_prm_clear_global_cold_reset(void) { if (omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) & OMAP3430_GLOBAL_COLD_RST_MASK) { omap2_prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK, OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET); return 1; } return 0; } void omap3_prm_save_scratchpad_contents(u32 *ptr) { *ptr++ = omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_CLKSRC_CTRL_OFFSET); *ptr++ = omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_CLKSEL_OFFSET); } /* Powerdomain low-level functions */ static int omap3_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } static int omap3_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL, OMAP_POWERSTATE_MASK); } static int omap3_pwrdm_read_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTST, OMAP_POWERSTATEST_MASK); } /* Applicable only for OMAP3. Not supported on OMAP2 */ static int omap3_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST, OMAP3430_LASTPOWERSTATEENTERED_MASK); } static int omap3_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTST, OMAP3430_LOGICSTATEST_MASK); } static int omap3_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL, OMAP3430_LOGICSTATEST_MASK); } static int omap3_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST, OMAP3430_LASTLOGICSTATEENTERED_MASK); } static int omap3_get_mem_bank_lastmemst_mask(u8 bank) { switch (bank) { case 0: return OMAP3430_LASTMEM1STATEENTERED_MASK; case 1: return OMAP3430_LASTMEM2STATEENTERED_MASK; case 2: return OMAP3430_LASTSHAREDL2CACHEFLATSTATEENTERED_MASK; case 3: return OMAP3430_LASTL2FLATMEMSTATEENTERED_MASK; default: WARN_ON(1); /* should never happen */ return -EEXIST; } return 0; } static int omap3_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m; m = omap3_get_mem_bank_lastmemst_mask(bank); return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST, m); } static int omap3_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { omap2_prm_write_mod_reg(0, pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST); return 0; } static int omap3_pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm) { return omap2_prm_rmw_mod_reg_bits(0, 1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT, pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); } static int omap3_pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm) { return omap2_prm_rmw_mod_reg_bits(1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT, 0, pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); } struct pwrdm_ops omap3_pwrdm_operations = { .pwrdm_set_next_pwrst = omap3_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap3_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap3_pwrdm_read_pwrst, .pwrdm_read_prev_pwrst = omap3_pwrdm_read_prev_pwrst, .pwrdm_set_logic_retst = omap2_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = omap3_pwrdm_read_logic_pwrst, .pwrdm_read_logic_retst = omap3_pwrdm_read_logic_retst, .pwrdm_read_prev_logic_pwrst = omap3_pwrdm_read_prev_logic_pwrst, .pwrdm_set_mem_onst = omap2_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap2_pwrdm_set_mem_retst, .pwrdm_read_mem_pwrst = omap2_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap2_pwrdm_read_mem_retst, .pwrdm_read_prev_mem_pwrst = omap3_pwrdm_read_prev_mem_pwrst, .pwrdm_clear_all_prev_pwrst = omap3_pwrdm_clear_all_prev_pwrst, .pwrdm_enable_hdwr_sar = omap3_pwrdm_enable_hdwr_sar, .pwrdm_disable_hdwr_sar = omap3_pwrdm_disable_hdwr_sar, .pwrdm_wait_transition = omap2_pwrdm_wait_transition, }; /* * */ static int omap3xxx_prm_late_init(void); static struct prm_ll_data omap3xxx_prm_ll_data = { .read_reset_sources = &omap3xxx_prm_read_reset_sources, .late_init = &omap3xxx_prm_late_init, .assert_hardreset = &omap2_prm_assert_hardreset, .deassert_hardreset = &omap2_prm_deassert_hardreset, .is_hardreset_asserted = &omap2_prm_is_hardreset_asserted, .reset_system = &omap3xxx_prm_dpll3_reset, .clear_mod_irqs = &omap3xxx_prm_clear_mod_irqs, .vp_check_txdone = &omap3_prm_vp_check_txdone, .vp_clear_txdone = &omap3_prm_vp_clear_txdone, }; int __init omap3xxx_prm_init(const struct omap_prcm_init_data *data) { omap2_clk_legacy_provider_init(TI_CLKM_PRM, prm_base.va + OMAP3430_IVA2_MOD); if (omap3_has_io_wakeup()) prm_features |= PRM_HAS_IO_WAKEUP; return prm_register(&omap3xxx_prm_ll_data); } static const struct of_device_id omap3_prm_dt_match_table[] = { { .compatible = "ti,omap3-prm" }, { } }; static int omap3xxx_prm_late_init(void) { struct device_node *np; int irq_num; if (!(prm_features & PRM_HAS_IO_WAKEUP)) return 0; if (omap3_has_io_chain_ctrl()) omap3_prcm_irq_setup.reconfigure_io_chain = omap3_prm_reconfigure_io_chain; else omap3_prcm_irq_setup.reconfigure_io_chain = omap3430_pre_es3_1_reconfigure_io_chain; np = of_find_matching_node(NULL, omap3_prm_dt_match_table); if (!np) { pr_err("PRM: no device tree node for interrupt?\n"); return -ENODEV; } irq_num = of_irq_get(np, 0); of_node_put(np); if (irq_num == -EPROBE_DEFER) return irq_num; omap3_prcm_irq_setup.irq = irq_num; omap3xxx_prm_enable_io_wakeup(); return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); } static void __exit omap3xxx_prm_exit(void) { prm_unregister(&omap3xxx_prm_ll_data); } __exitcall(omap3xxx_prm_exit);
linux-master
arch/arm/mach-omap2/prm3xxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM43xx Power domains framework * * Copyright (C) 2013 Texas Instruments, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "prcm-common.h" #include "prcm44xx.h" #include "prcm43xx.h" static struct powerdomain gfx_43xx_pwrdm = { .name = "gfx_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM43XX_PRM_GFX_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* gfx_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; static struct powerdomain mpu_43xx_pwrdm = { .name = "mpu_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = AM43XX_PRM_MPU_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* mpu_l1 */ [1] = PWRSTS_OFF_RET, /* mpu_l2 */ [2] = PWRSTS_OFF_RET, /* mpu_ram */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* mpu_l1 */ [1] = PWRSTS_ON, /* mpu_l2 */ [2] = PWRSTS_ON, /* mpu_ram */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; static struct powerdomain rtc_43xx_pwrdm = { .name = "rtc_pwrdm", .voltdm = { .name = "rtc" }, .prcm_offs = AM43XX_PRM_RTC_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; static struct powerdomain wkup_43xx_pwrdm = { .name = "wkup_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM43XX_PRM_WKUP_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 1, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* debugss_mem */ }, }; static struct powerdomain tamper_43xx_pwrdm = { .name = "tamper_pwrdm", .voltdm = { .name = "tamper" }, .prcm_offs = AM43XX_PRM_TAMPER_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; static struct powerdomain cefuse_43xx_pwrdm = { .name = "cefuse_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM43XX_PRM_CEFUSE_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; static struct powerdomain per_43xx_pwrdm = { .name = "per_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM43XX_PRM_PER_INST, .prcm_partition = AM43XX_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* icss_mem */ [1] = PWRSTS_OFF_RET, /* per_mem */ [2] = PWRSTS_OFF_RET, /* ram1_mem */ [3] = PWRSTS_OFF_RET, /* ram2_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* icss_mem */ [1] = PWRSTS_ON, /* per_mem */ [2] = PWRSTS_ON, /* ram1_mem */ [3] = PWRSTS_ON, /* ram2_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; static struct powerdomain *powerdomains_am43xx[] __initdata = { &gfx_43xx_pwrdm, &mpu_43xx_pwrdm, &rtc_43xx_pwrdm, &wkup_43xx_pwrdm, &tamper_43xx_pwrdm, &cefuse_43xx_pwrdm, &per_43xx_pwrdm, NULL }; static int am43xx_check_vcvp(void) { return 0; } void __init am43xx_powerdomains_init(void) { omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_am43xx); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains43xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 SMP cpu-hotplug support * * Copyright (C) 2010 Texas Instruments, Inc. * Author: * Santosh Shilimkar <[email protected]> * * Platform file needed for the OMAP4 SMP. This file is based on arm * realview smp platform. * Copyright (c) 2002 ARM Limited. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/io.h> #include "omap-wakeupgen.h" #include "common.h" #include "powerdomain.h" /* * platform-specific code to shutdown a CPU * Called with IRQs disabled */ void omap4_cpu_die(unsigned int cpu) { unsigned int boot_cpu = 0; void __iomem *base = omap_get_wakeupgen_base(); /* * we're ready for shutdown now, so do it */ if (omap_secure_apis_support()) { if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) pr_err("Secure clear status failed\n"); } else { writel_relaxed(0, base + OMAP_AUX_CORE_BOOT_0); } for (;;) { /* * Enter into low power state */ omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); if (omap_secure_apis_support()) boot_cpu = omap_read_auxcoreboot0() >> 9; else boot_cpu = readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; if (boot_cpu == smp_processor_id()) { /* * OK, proper wakeup, we're done */ break; } pr_debug("CPU%u: spurious wakeup call\n", cpu); } } /* Needed by kexec and platform_can_cpu_hotplug() */ int omap4_cpu_kill(unsigned int cpu) { return 1; }
linux-master
arch/arm/mach-omap2/omap-hotplug.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4+ CPU idle Routines * * Copyright (C) 2011-2013 Texas Instruments, Inc. * Santosh Shilimkar <[email protected]> * Rajendra Nayak <[email protected]> */ #include <linux/sched.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> #include <linux/tick.h> #include <asm/cpuidle.h> #include "common.h" #include "pm.h" #include "prm.h" #include "soc.h" #include "clockdomain.h" #define MAX_CPUS 2 /* Machine specific information */ struct idle_statedata { u32 cpu_state; u32 mpu_logic_state; u32 mpu_state; u32 mpu_state_vote; }; static struct idle_statedata omap4_idle_data[] = { { .cpu_state = PWRDM_POWER_ON, .mpu_state = PWRDM_POWER_ON, .mpu_logic_state = PWRDM_POWER_RET, }, { .cpu_state = PWRDM_POWER_OFF, .mpu_state = PWRDM_POWER_RET, .mpu_logic_state = PWRDM_POWER_RET, }, { .cpu_state = PWRDM_POWER_OFF, .mpu_state = PWRDM_POWER_RET, .mpu_logic_state = PWRDM_POWER_OFF, }, }; static struct idle_statedata omap5_idle_data[] = { { .cpu_state = PWRDM_POWER_ON, .mpu_state = PWRDM_POWER_ON, .mpu_logic_state = PWRDM_POWER_ON, }, { .cpu_state = PWRDM_POWER_RET, .mpu_state = PWRDM_POWER_RET, .mpu_logic_state = PWRDM_POWER_RET, }, }; static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; static struct clockdomain *cpu_clkdm[MAX_CPUS]; static atomic_t abort_barrier; static bool cpu_done[MAX_CPUS]; static struct idle_statedata *state_ptr = &omap4_idle_data[0]; static DEFINE_RAW_SPINLOCK(mpu_lock); /* Private functions */ /** * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions * @dev: cpuidle device * @drv: cpuidle driver * @index: the index of state to be entered * * Called from the CPUidle framework to program the device to the * specified low power state selected by the governor. * Returns the amount of time spent in the low power state. */ static int omap_enter_idle_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { omap_do_wfi(); return index; } static int omap_enter_idle_smp(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct idle_statedata *cx = state_ptr + index; unsigned long flag; raw_spin_lock_irqsave(&mpu_lock, flag); cx->mpu_state_vote++; if (cx->mpu_state_vote == num_online_cpus()) { pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); omap_set_pwrdm_state(mpu_pd, cx->mpu_state); } raw_spin_unlock_irqrestore(&mpu_lock, flag); omap4_enter_lowpower(dev->cpu, cx->cpu_state, true); raw_spin_lock_irqsave(&mpu_lock, flag); if (cx->mpu_state_vote == num_online_cpus()) omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON); cx->mpu_state_vote--; raw_spin_unlock_irqrestore(&mpu_lock, flag); return index; } static int omap_enter_idle_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; int error; /* * CPU0 has to wait and stay ON until CPU1 is OFF state. * This is necessary to honour hardware recommondation * of triggeing all the possible low power modes once CPU1 is * out of coherency and in OFF mode. */ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { cpu_relax(); /* * CPU1 could have already entered & exited idle * without hitting off because of a wakeup * or a failed attempt to hit off mode. Check for * that here, otherwise we could spin forever * waiting for CPU1 off. */ if (cpu_done[1]) goto fail; } } mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); /* Enter broadcast mode for periodic timers */ tick_broadcast_enable(); /* Enter broadcast mode for one-shot timers */ tick_broadcast_enter(); /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ error = cpu_pm_enter(); if (error) goto cpu_pm_out; if (dev->cpu == 0) { pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); omap_set_pwrdm_state(mpu_pd, cx->mpu_state); /* * Call idle CPU cluster PM enter notifier chain * to save GIC and wakeupgen context. */ if (mpuss_can_lose_context) { error = cpu_cluster_pm_enter(); if (error) { index = 0; cx = state_ptr + index; pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); omap_set_pwrdm_state(mpu_pd, cx->mpu_state); mpuss_can_lose_context = 0; } } } omap4_enter_lowpower(dev->cpu, cx->cpu_state, true); cpu_done[dev->cpu] = true; /* Wakeup CPU1 only if it is not offlined */ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && mpuss_can_lose_context) gic_dist_disable(); clkdm_deny_idle(cpu_clkdm[1]); omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON); clkdm_allow_idle(cpu_clkdm[1]); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && mpuss_can_lose_context) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); } } /* * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. */ if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); /* * Call idle CPU PM exit notifier chain to restore * VFP and per CPU IRQ context. */ cpu_pm_exit(); cpu_pm_out: tick_broadcast_exit(); fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpu_done[dev->cpu] = false; return index; } static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, .states = { { /* C1 - CPU0 ON + CPU1 ON + MPU ON */ .exit_latency = 2 + 2, .target_residency = 5, .enter = omap_enter_idle_simple, .name = "C1", .desc = "CPUx ON, MPUSS ON" }, { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, .flags = CPUIDLE_FLAG_COUPLED | CPUIDLE_FLAG_RCU_IDLE, .enter = omap_enter_idle_coupled, .name = "C2", .desc = "CPUx OFF, MPUSS CSWR", }, { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, .flags = CPUIDLE_FLAG_COUPLED | CPUIDLE_FLAG_RCU_IDLE, .enter = omap_enter_idle_coupled, .name = "C3", .desc = "CPUx OFF, MPUSS OSWR", }, }, .state_count = ARRAY_SIZE(omap4_idle_data), .safe_state_index = 0, }; static struct cpuidle_driver omap5_idle_driver = { .name = "omap5_idle", .owner = THIS_MODULE, .states = { { /* C1 - CPU0 ON + CPU1 ON + MPU ON */ .exit_latency = 2 + 2, .target_residency = 5, .enter = omap_enter_idle_simple, .name = "C1", .desc = "CPUx WFI, MPUSS ON" }, { /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */ .exit_latency = 48 + 60, .target_residency = 100, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .enter = omap_enter_idle_smp, .name = "C2", .desc = "CPUx CSWR, MPUSS CSWR", }, }, .state_count = ARRAY_SIZE(omap5_idle_data), .safe_state_index = 0, }; /* Public functions */ /** * omap4_idle_init - Init routine for OMAP4+ idle * * Registers the OMAP4+ specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init omap4_idle_init(void) { struct cpuidle_driver *idle_driver; if (soc_is_omap54xx()) { state_ptr = &omap5_idle_data[0]; idle_driver = &omap5_idle_driver; } else { state_ptr = &omap4_idle_data[0]; idle_driver = &omap4_idle_driver; } mpu_pd = pwrdm_lookup("mpu_pwrdm"); cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) return -ENODEV; cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; return cpuidle_register(idle_driver, cpu_online_mask); }
linux-master
arch/arm/mach-omap2/cpuidle44xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2XXX powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander */ #include <linux/kernel.h> #include <linux/init.h> #include "soc.h" #include "powerdomain.h" #include "powerdomains2xxx_3xxx_data.h" #include "prcm-common.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-24xx.h" /* 24XX powerdomains and dependencies */ /* Powerdomains */ static struct powerdomain dsp_pwrdm = { .name = "dsp_pwrdm", .prcm_offs = OMAP24XX_DSP_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "core" }, }; static struct powerdomain mpu_24xx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "core" }, }; static struct powerdomain core_24xx_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ [2] = PWRSTS_OFF_RET, /* MEM3RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ [2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */ }, .voltdm = { .name = "core" }, }; /* * 2430-specific powerdomains */ /* XXX 2430 KILLDOMAINWKUP bit? No current users apparently */ static struct powerdomain mdm_pwrdm = { .name = "mdm_pwrdm", .prcm_offs = OMAP2430_MDM_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; /* * */ static struct powerdomain *powerdomains_omap24xx[] __initdata = { &wkup_omap2_pwrdm, &gfx_omap2_pwrdm, &dsp_pwrdm, &mpu_24xx_pwrdm, &core_24xx_pwrdm, NULL }; static struct powerdomain *powerdomains_omap2430[] __initdata = { &mdm_pwrdm, NULL }; void __init omap242x_powerdomains_init(void) { if (!cpu_is_omap2420()) return; pwrdm_register_platform_funcs(&omap2_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap24xx); pwrdm_complete_init(); } void __init omap243x_powerdomains_init(void) { if (!cpu_is_omap2430()) return; pwrdm_register_platform_funcs(&omap2_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap24xx); pwrdm_register_pwrdms(powerdomains_omap2430); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains2xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * pmic-cpcap.c - CPCAP-specific functions for the OPP code * * Adapted from Motorola Mapphone Android Linux kernel * Copyright (C) 2011 Motorola, Inc. */ #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include "soc.h" #include "pm.h" #include "voltage.h" #include <linux/init.h> #include "vc.h" /** * omap_cpcap_vsel_to_vdc - convert CPCAP VSEL value to microvolts DC * @vsel: CPCAP VSEL value to convert * * Returns the microvolts DC that the CPCAP PMIC should generate when * programmed with @vsel. */ static unsigned long omap_cpcap_vsel_to_uv(unsigned char vsel) { if (vsel > 0x44) vsel = 0x44; return (((vsel * 125) + 6000)) * 100; } /** * omap_cpcap_uv_to_vsel - convert microvolts DC to CPCAP VSEL value * @uv: microvolts DC to convert * * Returns the VSEL value necessary for the CPCAP PMIC to * generate an output voltage equal to or greater than @uv microvolts DC. */ static unsigned char omap_cpcap_uv_to_vsel(unsigned long uv) { if (uv < 600000) uv = 600000; else if (uv > 1450000) uv = 1450000; return DIV_ROUND_UP(uv - 600000, 12500); } static struct omap_voltdm_pmic omap_cpcap_core = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 900000, .vddmax = 1350000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = 0x02, .volt_reg_addr = 0x00, .cmd_reg_addr = 0x01, .i2c_high_speed = false, .vsel_to_uv = omap_cpcap_vsel_to_uv, .uv_to_vsel = omap_cpcap_uv_to_vsel, }; static struct omap_voltdm_pmic omap_cpcap_iva = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 900000, .vddmax = 1375000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = 0x44, .volt_reg_addr = 0x0, .cmd_reg_addr = 0x01, .i2c_high_speed = false, .vsel_to_uv = omap_cpcap_vsel_to_uv, .uv_to_vsel = omap_cpcap_uv_to_vsel, }; /** * omap_max8952_vsel_to_vdc - convert MAX8952 VSEL value to microvolts DC * @vsel: MAX8952 VSEL value to convert * * Returns the microvolts DC that the MAX8952 Regulator should generate when * programmed with @vsel. */ static unsigned long omap_max8952_vsel_to_uv(unsigned char vsel) { if (vsel > 0x3F) vsel = 0x3F; return (((vsel * 100) + 7700)) * 100; } /** * omap_max8952_uv_to_vsel - convert microvolts DC to MAX8952 VSEL value * @uv: microvolts DC to convert * * Returns the VSEL value necessary for the MAX8952 Regulator to * generate an output voltage equal to or greater than @uv microvolts DC. */ static unsigned char omap_max8952_uv_to_vsel(unsigned long uv) { if (uv < 770000) uv = 770000; else if (uv > 1400000) uv = 1400000; return DIV_ROUND_UP(uv - 770000, 10000); } static struct omap_voltdm_pmic omap443x_max8952_mpu = { .slew_rate = 16000, .step_size = 10000, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 900000, .vddmax = 1400000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = 0x60, .volt_reg_addr = 0x03, .cmd_reg_addr = 0x03, .i2c_high_speed = false, .vsel_to_uv = omap_max8952_vsel_to_uv, .uv_to_vsel = omap_max8952_uv_to_vsel, }; /** * omap_fan5355_vsel_to_vdc - convert FAN535503 VSEL value to microvolts DC * @vsel: FAN535503 VSEL value to convert * * Returns the microvolts DC that the FAN535503 Regulator should generate when * programmed with @vsel. */ static unsigned long omap_fan535503_vsel_to_uv(unsigned char vsel) { /* Extract bits[5:0] */ vsel &= 0x3F; return (((vsel * 125) + 7500)) * 100; } /** * omap_fan535508_vsel_to_vdc - convert FAN535508 VSEL value to microvolts DC * @vsel: FAN535508 VSEL value to convert * * Returns the microvolts DC that the FAN535508 Regulator should generate when * programmed with @vsel. */ static unsigned long omap_fan535508_vsel_to_uv(unsigned char vsel) { /* Extract bits[5:0] */ vsel &= 0x3F; if (vsel > 0x37) vsel = 0x37; return (((vsel * 125) + 7500)) * 100; } /** * omap_fan535503_uv_to_vsel - convert microvolts DC to FAN535503 VSEL value * @uv: microvolts DC to convert * * Returns the VSEL value necessary for the MAX8952 Regulator to * generate an output voltage equal to or greater than @uv microvolts DC. */ static unsigned char omap_fan535503_uv_to_vsel(unsigned long uv) { unsigned char vsel; if (uv < 750000) uv = 750000; else if (uv > 1537500) uv = 1537500; vsel = DIV_ROUND_UP(uv - 750000, 12500); return vsel | 0xC0; } /** * omap_fan535508_uv_to_vsel - convert microvolts DC to FAN535508 VSEL value * @uv: microvolts DC to convert * * Returns the VSEL value necessary for the MAX8952 Regulator to * generate an output voltage equal to or greater than @uv microvolts DC. */ static unsigned char omap_fan535508_uv_to_vsel(unsigned long uv) { unsigned char vsel; if (uv < 750000) uv = 750000; else if (uv > 1437500) uv = 1437500; vsel = DIV_ROUND_UP(uv - 750000, 12500); return vsel | 0xC0; } /* fan5335-core */ static struct omap_voltdm_pmic omap4_fan_core = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 850000, .vddmax = 1375000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = 0x4A, .i2c_high_speed = false, .volt_reg_addr = 0x01, .cmd_reg_addr = 0x01, .vsel_to_uv = omap_fan535508_vsel_to_uv, .uv_to_vsel = omap_fan535508_uv_to_vsel, }; /* fan5335 iva */ static struct omap_voltdm_pmic omap4_fan_iva = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 850000, .vddmax = 1375000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = 0x48, .volt_reg_addr = 0x01, .cmd_reg_addr = 0x01, .i2c_high_speed = false, .vsel_to_uv = omap_fan535503_vsel_to_uv, .uv_to_vsel = omap_fan535503_uv_to_vsel, }; int __init omap4_cpcap_init(void) { struct voltagedomain *voltdm; if (!of_find_compatible_node(NULL, NULL, "motorola,cpcap")) return -ENODEV; voltdm = voltdm_lookup("mpu"); omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu); if (of_machine_is_compatible("motorola,droid-bionic")) { voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap_cpcap_core); voltdm = voltdm_lookup("iva"); omap_voltage_register_pmic(voltdm, &omap_cpcap_iva); } else { voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap4_fan_core); voltdm = voltdm_lookup("iva"); omap_voltage_register_pmic(voltdm, &omap4_fan_iva); } return 0; } static int __init cpcap_late_init(void) { omap4_vc_set_pmic_signaling(PWRDM_POWER_RET); return 0; } omap_late_initcall(cpcap_late_init);
linux-master
arch/arm/mach-omap2/pmic-cpcap.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/io.c * * OMAP2 I/O mapping code * * Copyright (C) 2005 Nokia Corporation * Copyright (C) 2007-2009 Texas Instruments * * Author: * Juha Yrjola <[email protected]> * Syed Khasim <[email protected]> * * Added OMAP4 support - Santosh Shilimkar <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/clk.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include <linux/omap-dma.h> #include "omap_hwmod.h" #include "soc.h" #include "iomap.h" #include "voltage.h" #include "powerdomain.h" #include "clockdomain.h" #include "common.h" #include "clock.h" #include "sdrc.h" #include "control.h" #include "sram.h" #include "cm2xxx.h" #include "cm3xxx.h" #include "cm33xx.h" #include "cm44xx.h" #include "prm.h" #include "cm.h" #include "prcm_mpu44xx.h" #include "prminst44xx.h" #include "prm2xxx.h" #include "prm3xxx.h" #include "prm33xx.h" #include "prm44xx.h" #include "opp2xxx.h" #include "omap-secure.h" /* * omap_clk_soc_init: points to a function that does the SoC-specific * clock initializations */ static int (*omap_clk_soc_init)(void); /* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) static struct map_desc omap24xx_io_desc[] __initdata = { { .virtual = L3_24XX_VIRT, .pfn = __phys_to_pfn(L3_24XX_PHYS), .length = L3_24XX_SIZE, .type = MT_DEVICE }, { .virtual = L4_24XX_VIRT, .pfn = __phys_to_pfn(L4_24XX_PHYS), .length = L4_24XX_SIZE, .type = MT_DEVICE }, }; #ifdef CONFIG_SOC_OMAP2420 static struct map_desc omap242x_io_desc[] __initdata = { { .virtual = DSP_MEM_2420_VIRT, .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS), .length = DSP_MEM_2420_SIZE, .type = MT_DEVICE }, { .virtual = DSP_IPI_2420_VIRT, .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS), .length = DSP_IPI_2420_SIZE, .type = MT_DEVICE }, { .virtual = DSP_MMU_2420_VIRT, .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS), .length = DSP_MMU_2420_SIZE, .type = MT_DEVICE }, }; #endif #ifdef CONFIG_SOC_OMAP2430 static struct map_desc omap243x_io_desc[] __initdata = { { .virtual = L4_WK_243X_VIRT, .pfn = __phys_to_pfn(L4_WK_243X_PHYS), .length = L4_WK_243X_SIZE, .type = MT_DEVICE }, { .virtual = OMAP243X_GPMC_VIRT, .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS), .length = OMAP243X_GPMC_SIZE, .type = MT_DEVICE }, { .virtual = OMAP243X_SDRC_VIRT, .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS), .length = OMAP243X_SDRC_SIZE, .type = MT_DEVICE }, { .virtual = OMAP243X_SMS_VIRT, .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS), .length = OMAP243X_SMS_SIZE, .type = MT_DEVICE }, }; #endif #endif #ifdef CONFIG_ARCH_OMAP3 static struct map_desc omap34xx_io_desc[] __initdata = { { .virtual = L3_34XX_VIRT, .pfn = __phys_to_pfn(L3_34XX_PHYS), .length = L3_34XX_SIZE, .type = MT_DEVICE }, { .virtual = L4_34XX_VIRT, .pfn = __phys_to_pfn(L4_34XX_PHYS), .length = L4_34XX_SIZE, .type = MT_DEVICE }, { .virtual = OMAP34XX_GPMC_VIRT, .pfn = __phys_to_pfn(OMAP34XX_GPMC_PHYS), .length = OMAP34XX_GPMC_SIZE, .type = MT_DEVICE }, { .virtual = OMAP343X_SMS_VIRT, .pfn = __phys_to_pfn(OMAP343X_SMS_PHYS), .length = OMAP343X_SMS_SIZE, .type = MT_DEVICE }, { .virtual = OMAP343X_SDRC_VIRT, .pfn = __phys_to_pfn(OMAP343X_SDRC_PHYS), .length = OMAP343X_SDRC_SIZE, .type = MT_DEVICE }, { .virtual = L4_PER_34XX_VIRT, .pfn = __phys_to_pfn(L4_PER_34XX_PHYS), .length = L4_PER_34XX_SIZE, .type = MT_DEVICE }, { .virtual = L4_EMU_34XX_VIRT, .pfn = __phys_to_pfn(L4_EMU_34XX_PHYS), .length = L4_EMU_34XX_SIZE, .type = MT_DEVICE }, }; #endif #ifdef CONFIG_SOC_TI81XX static struct map_desc omapti81xx_io_desc[] __initdata = { { .virtual = L4_34XX_VIRT, .pfn = __phys_to_pfn(L4_34XX_PHYS), .length = L4_34XX_SIZE, .type = MT_DEVICE } }; #endif #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static struct map_desc omapam33xx_io_desc[] __initdata = { { .virtual = L4_34XX_VIRT, .pfn = __phys_to_pfn(L4_34XX_PHYS), .length = L4_34XX_SIZE, .type = MT_DEVICE }, { .virtual = L4_WK_AM33XX_VIRT, .pfn = __phys_to_pfn(L4_WK_AM33XX_PHYS), .length = L4_WK_AM33XX_SIZE, .type = MT_DEVICE } }; #endif #ifdef CONFIG_ARCH_OMAP4 static struct map_desc omap44xx_io_desc[] __initdata = { { .virtual = L3_44XX_VIRT, .pfn = __phys_to_pfn(L3_44XX_PHYS), .length = L3_44XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_44XX_VIRT, .pfn = __phys_to_pfn(L4_44XX_PHYS), .length = L4_44XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_PER_44XX_VIRT, .pfn = __phys_to_pfn(L4_PER_44XX_PHYS), .length = L4_PER_44XX_SIZE, .type = MT_DEVICE, }, }; #endif #ifdef CONFIG_SOC_OMAP5 static struct map_desc omap54xx_io_desc[] __initdata = { { .virtual = L3_54XX_VIRT, .pfn = __phys_to_pfn(L3_54XX_PHYS), .length = L3_54XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_54XX_VIRT, .pfn = __phys_to_pfn(L4_54XX_PHYS), .length = L4_54XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_WK_54XX_VIRT, .pfn = __phys_to_pfn(L4_WK_54XX_PHYS), .length = L4_WK_54XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_PER_54XX_VIRT, .pfn = __phys_to_pfn(L4_PER_54XX_PHYS), .length = L4_PER_54XX_SIZE, .type = MT_DEVICE, }, }; #endif #ifdef CONFIG_SOC_DRA7XX static struct map_desc dra7xx_io_desc[] __initdata = { { .virtual = L4_CFG_MPU_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_CFG_MPU_DRA7XX_PHYS), .length = L4_CFG_MPU_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L3_MAIN_SN_DRA7XX_VIRT, .pfn = __phys_to_pfn(L3_MAIN_SN_DRA7XX_PHYS), .length = L3_MAIN_SN_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_PER1_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_PER1_DRA7XX_PHYS), .length = L4_PER1_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_PER2_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_PER2_DRA7XX_PHYS), .length = L4_PER2_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_PER3_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_PER3_DRA7XX_PHYS), .length = L4_PER3_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_CFG_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_CFG_DRA7XX_PHYS), .length = L4_CFG_DRA7XX_SIZE, .type = MT_DEVICE, }, { .virtual = L4_WKUP_DRA7XX_VIRT, .pfn = __phys_to_pfn(L4_WKUP_DRA7XX_PHYS), .length = L4_WKUP_DRA7XX_SIZE, .type = MT_DEVICE, }, }; #endif #ifdef CONFIG_SOC_OMAP2420 void __init omap242x_map_io(void) { iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc)); } #endif #ifdef CONFIG_SOC_OMAP2430 void __init omap243x_map_io(void) { iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc)); } #endif #ifdef CONFIG_ARCH_OMAP3 void __init omap3_map_io(void) { iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc)); } #endif #ifdef CONFIG_SOC_TI81XX void __init ti81xx_map_io(void) { iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc)); } #endif #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) void __init am33xx_map_io(void) { iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc)); } #endif #ifdef CONFIG_ARCH_OMAP4 void __init omap4_map_io(void) { iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); omap_barriers_init(); } #endif #ifdef CONFIG_SOC_OMAP5 void __init omap5_map_io(void) { iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); omap_barriers_init(); } #endif #ifdef CONFIG_SOC_DRA7XX void __init dra7xx_map_io(void) { iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc)); omap_barriers_init(); } #endif /* * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters * * Sets the CORE DPLL3 M2 divider to the same value that it's at * currently. This has the effect of setting the SDRC SDRAM AC timing * registers to the values currently defined by the kernel. Currently * only defined for OMAP3; will return 0 if called on OMAP2. Returns * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2, * or passes along the return value of clk_set_rate(). */ static int __init _omap2_init_reprogram_sdrc(void) { struct clk *dpll3_m2_ck; int v = -EINVAL; long rate; if (!cpu_is_omap34xx()) return 0; dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck"); if (IS_ERR(dpll3_m2_ck)) return -EINVAL; rate = clk_get_rate(dpll3_m2_ck); pr_info("Reprogramming SDRC clock to %ld Hz\n", rate); v = clk_set_rate(dpll3_m2_ck, rate); if (v) pr_err("dpll3_m2_clk rate change failed: %d\n", v); clk_put(dpll3_m2_ck); return v; } #ifdef CONFIG_OMAP_HWMOD static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data) { return omap_hwmod_set_postsetup_state(oh, *(u8 *)data); } static void __init __maybe_unused omap_hwmod_init_postsetup(void) { u8 postsetup_state = _HWMOD_STATE_DEFAULT; /* Set the default postsetup state for all hwmods */ omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state); } #else static inline void omap_hwmod_init_postsetup(void) { } #endif #ifdef CONFIG_SOC_OMAP2420 void __init omap2420_init_early(void) { omap2_set_globals_tap(OMAP242X_CLASS, OMAP2_L4_IO_ADDRESS(0x48014000)); omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP2420_SDRC_BASE), OMAP2_L3_IO_ADDRESS(OMAP2420_SMS_BASE)); omap2_control_base_init(); omap2xxx_check_revision(); omap2_prcm_base_init(); omap2xxx_voltagedomains_init(); omap242x_powerdomains_init(); omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); omap_clk_soc_init = omap2420_dt_clk_init; rate_table = omap2420_rate_table; } #endif #ifdef CONFIG_SOC_OMAP2430 void __init omap2430_init_early(void) { omap2_set_globals_tap(OMAP243X_CLASS, OMAP2_L4_IO_ADDRESS(0x4900a000)); omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP243X_SDRC_BASE), OMAP2_L3_IO_ADDRESS(OMAP243X_SMS_BASE)); omap2_control_base_init(); omap2xxx_check_revision(); omap2_prcm_base_init(); omap2xxx_voltagedomains_init(); omap243x_powerdomains_init(); omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); omap_clk_soc_init = omap2430_dt_clk_init; rate_table = omap2430_rate_table; } #endif /* * Currently only board-omap3beagle.c should call this because of the * same machine_id for 34xx and 36xx beagle.. Will get fixed with DT. */ #ifdef CONFIG_ARCH_OMAP3 static void __init omap3_init_early(void) { omap2_set_globals_tap(OMAP343X_CLASS, OMAP2_L4_IO_ADDRESS(0x4830A000)); omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP343X_SDRC_BASE), OMAP2_L3_IO_ADDRESS(OMAP343X_SMS_BASE)); omap2_control_base_init(); omap3xxx_check_revision(); omap3xxx_check_features(); omap2_prcm_base_init(); omap3xxx_voltagedomains_init(); omap3xxx_powerdomains_init(); omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); omap_secure_init(); } void __init omap3430_init_early(void) { omap3_init_early(); omap_clk_soc_init = omap3430_dt_clk_init; } void __init omap3630_init_early(void) { omap3_init_early(); omap_clk_soc_init = omap3630_dt_clk_init; } void __init am35xx_init_early(void) { omap3_init_early(); omap_clk_soc_init = am35xx_dt_clk_init; } void __init omap3_init_late(void) { omap_pm_soc_init = omap3_pm_init; } void __init ti81xx_init_late(void) { omap_pm_soc_init = omap_pm_nop_init; } #endif #ifdef CONFIG_SOC_TI81XX void __init ti814x_init_early(void) { omap2_set_globals_tap(TI814X_CLASS, OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE)); omap2_control_base_init(); omap3xxx_check_revision(); ti81xx_check_features(); omap2_prcm_base_init(); omap3xxx_voltagedomains_init(); omap3xxx_powerdomains_init(); ti814x_clockdomains_init(); dm814x_hwmod_init(); omap_hwmod_init_postsetup(); omap_clk_soc_init = dm814x_dt_clk_init; omap_secure_init(); } void __init ti816x_init_early(void) { omap2_set_globals_tap(TI816X_CLASS, OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE)); omap2_control_base_init(); omap3xxx_check_revision(); ti81xx_check_features(); omap2_prcm_base_init(); omap3xxx_voltagedomains_init(); omap3xxx_powerdomains_init(); ti816x_clockdomains_init(); dm816x_hwmod_init(); omap_hwmod_init_postsetup(); omap_clk_soc_init = dm816x_dt_clk_init; omap_secure_init(); } #endif #ifdef CONFIG_SOC_AM33XX void __init am33xx_init_early(void) { omap2_set_globals_tap(AM335X_CLASS, AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE)); omap2_control_base_init(); omap3xxx_check_revision(); am33xx_check_features(); omap2_prcm_base_init(); am33xx_powerdomains_init(); am33xx_clockdomains_init(); omap_clk_soc_init = am33xx_dt_clk_init; omap_secure_init(); } void __init am33xx_init_late(void) { omap_pm_soc_init = amx3_common_pm_init; } #endif #ifdef CONFIG_SOC_AM43XX void __init am43xx_init_early(void) { omap2_set_globals_tap(AM335X_CLASS, AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE)); omap2_control_base_init(); omap3xxx_check_revision(); am33xx_check_features(); omap2_prcm_base_init(); am43xx_powerdomains_init(); am43xx_clockdomains_init(); omap_l2_cache_init(); omap_clk_soc_init = am43xx_dt_clk_init; omap_secure_init(); } void __init am43xx_init_late(void) { omap_pm_soc_init = amx3_common_pm_init; } #endif #ifdef CONFIG_ARCH_OMAP4 void __init omap4430_init_early(void) { omap2_set_globals_tap(OMAP443X_CLASS, OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE)); omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE)); omap2_control_base_init(); omap4xxx_check_revision(); omap4xxx_check_features(); omap2_prcm_base_init(); omap4_sar_ram_init(); omap4_mpuss_early_init(); omap4_pm_init_early(); omap44xx_voltagedomains_init(); omap44xx_powerdomains_init(); omap44xx_clockdomains_init(); omap_l2_cache_init(); omap_clk_soc_init = omap4xxx_dt_clk_init; omap_secure_init(); } void __init omap4430_init_late(void) { omap_pm_soc_init = omap4_pm_init; } #endif #ifdef CONFIG_SOC_OMAP5 void __init omap5_init_early(void) { omap2_set_globals_tap(OMAP54XX_CLASS, OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE)); omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); omap2_control_base_init(); omap2_prcm_base_init(); omap5xxx_check_revision(); omap4_sar_ram_init(); omap4_mpuss_early_init(); omap4_pm_init_early(); omap54xx_voltagedomains_init(); omap54xx_powerdomains_init(); omap54xx_clockdomains_init(); omap_clk_soc_init = omap5xxx_dt_clk_init; omap_secure_init(); } void __init omap5_init_late(void) { omap_pm_soc_init = omap4_pm_init; } #endif #ifdef CONFIG_SOC_DRA7XX void __init dra7xx_init_early(void) { omap2_set_globals_tap(DRA7XX_CLASS, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); omap2_control_base_init(); omap4_pm_init_early(); omap2_prcm_base_init(); dra7xxx_check_revision(); dra7xx_powerdomains_init(); dra7xx_clockdomains_init(); omap_clk_soc_init = dra7xx_dt_clk_init; omap_secure_init(); } void __init dra7xx_init_late(void) { omap_pm_soc_init = omap4_pm_init; } #endif void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1) { omap_sram_init(); if (cpu_is_omap24xx() || omap3_has_sdrc()) { omap2_sdrc_init(sdrc_cs0, sdrc_cs1); _omap2_init_reprogram_sdrc(); } } int __init omap_clk_init(void) { int ret = 0; if (!omap_clk_soc_init) return 0; ti_clk_init_features(); omap2_clk_setup_ll_ops(); ret = omap_control_init(); if (ret) return ret; ret = omap_prcm_init(); if (ret) return ret; of_clk_init(NULL); ti_dt_clk_init_retry_clks(); ti_dt_clockdomains_setup(); ret = omap_clk_soc_init(); return ret; }
linux-master
arch/arm/mach-omap2/io.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 SMP source file. It contains platform specific functions * needed for the linux smp kernel. * * Copyright (C) 2009 Texas Instruments, Inc. * * Author: * Santosh Shilimkar <[email protected]> * * Platform file needed for the OMAP4 SMP. This file is based on arm * realview smp platform. * * Copyright (c) 2002 ARM Limited. */ #include <linux/init.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <asm/sections.h> #include <asm/smp_scu.h> #include <asm/virt.h> #include "omap-secure.h" #include "omap-wakeupgen.h" #include <asm/cputype.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "clockdomain.h" #include "pm.h" #define CPU_MASK 0xff0ffff0 #define CPU_CORTEX_A9 0x410FC090 #define CPU_CORTEX_A15 0x410FC0F0 #define OMAP5_CORE_COUNT 0x2 #define AUX_CORE_BOOT0_GP_RELEASE 0x020 #define AUX_CORE_BOOT0_HS_RELEASE 0x200 struct omap_smp_config { unsigned long cpu1_rstctrl_pa; void __iomem *cpu1_rstctrl_va; void __iomem *scu_base; void __iomem *wakeupgen_base; void *startup_addr; }; static struct omap_smp_config cfg; static const struct omap_smp_config omap443x_cfg __initconst = { .cpu1_rstctrl_pa = 0x4824380c, .startup_addr = omap4_secondary_startup, }; static const struct omap_smp_config omap446x_cfg __initconst = { .cpu1_rstctrl_pa = 0x4824380c, .startup_addr = omap4460_secondary_startup, }; static const struct omap_smp_config omap5_cfg __initconst = { .cpu1_rstctrl_pa = 0x48243810, .startup_addr = omap5_secondary_startup, }; void __iomem *omap4_get_scu_base(void) { return cfg.scu_base; } #ifdef CONFIG_OMAP5_ERRATA_801819 static void omap5_erratum_workaround_801819(void) { u32 acr, revidr; u32 acr_mask; /* REVIDR[3] indicates erratum fix available on silicon */ asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr)); if (revidr & (0x1 << 3)) return; asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); /* * BIT(27) - Disables streaming. All write-allocate lines allocate in * the L1 or L2 cache. * BIT(25) - Disables streaming. All write-allocate lines allocate in * the L1 cache. */ acr_mask = (0x3 << 25) | (0x3 << 27); /* do we already have it done.. if yes, skip expensive smc */ if ((acr & acr_mask) == acr_mask) return; acr |= acr_mask; omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n", __func__, smp_processor_id()); } #else static inline void omap5_erratum_workaround_801819(void) { } #endif #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR /* * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with * ICIALLU) to activate the workaround for secondary Core. * NOTE: it is assumed that the primary core's configuration is done * by the boot loader (kernel will detect a misconfiguration and complain * if this is not done). * * In General Purpose(GP) devices, ACR bit settings can only be done * by ROM code in "secure world" using the smc call and there is no * option to update the "firmware" on such devices. This also works for * High security(HS) devices, as a backup option in case the * "update" is not done in the "security firmware". */ static void omap5_secondary_harden_predictor(void) { u32 acr, acr_mask; asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); /* * ACTLR[0] (Enable invalidates of BTB with ICIALLU) */ acr_mask = BIT(0); /* Do we already have it done.. if yes, skip expensive smc */ if ((acr & acr_mask) == acr_mask) return; acr |= acr_mask; omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", __func__, smp_processor_id()); } #else static inline void omap5_secondary_harden_predictor(void) { } #endif static void omap4_secondary_init(unsigned int cpu) { /* * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA * init and for CPU1, a secure PPA API provided. CPU0 must be ON * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. * OMAP443X GP devices- SMP bit isn't accessible. * OMAP446X GP devices - SMP bit access is enabled on both CPUs. */ if (soc_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, 4, 0, 0, 0, 0, 0); if (soc_is_omap54xx() || soc_is_dra7xx()) { /* * Configure the CNTFRQ register for the secondary cpu's which * indicates the frequency of the cpu local timers. */ set_cntfreq(); /* Configure ACR to disable streaming WA for 801819 */ omap5_erratum_workaround_801819(); /* Enable ACR to allow for ICUALLU workaround */ omap5_secondary_harden_predictor(); } } static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap4_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE, 0xfffffdff); else writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE, cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); } /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. The clockdomain is then put back to * hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted && cpu1_pwrdm && cpu1_clkdm) { /* * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { local_irq_disable(); gic_dist_disable(); } /* * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ clkdm_deny_idle_nolock(cpu1_clkdm); pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); clkdm_allow_idle_nolock(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); local_irq_enable(); } } else { dsb_sev(); booted = true; } arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init omap4_smp_init_cpus(void) { unsigned int i = 0, ncores = 1, cpu_id; /* Use ARM cpuid check here, as SoC detection will not work so early */ cpu_id = read_cpuid_id() & CPU_MASK; if (cpu_id == CPU_CORTEX_A9) { /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ cfg.scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base()); BUG_ON(!cfg.scu_base); ncores = scu_get_core_count(cfg.scu_base); } else if (cpu_id == CPU_CORTEX_A15) { ncores = OMAP5_CORE_COUNT; } /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } /* * For now, just make sure the start-up address is not within the booting * kernel space as that means we just overwrote whatever secondary_startup() * code there was. */ static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr) { if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start))) return false; return true; } /* * We may need to reset CPU1 before configuring, otherwise kexec boot can end * up trying to use old kernel startup address or suspend-resume will * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper * idle states. */ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) { unsigned long cpu1_startup_pa, cpu1_ns_pa_addr; bool needs_reset = false; u32 released; if (omap_secure_apis_support()) released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE; else released = readl_relaxed(cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0) & AUX_CORE_BOOT0_GP_RELEASE; if (released) { pr_warn("smp: CPU1 not parked?\n"); return; } cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); /* Did the configured secondary_startup() get overwritten? */ if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) needs_reset = true; /* * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a * deeper idle state in WFI and will wake to an invalid address. */ if ((soc_is_omap44xx() || soc_is_omap54xx())) { cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) needs_reset = true; } else { cpu1_ns_pa_addr = 0; } if (!needs_reset || !c->cpu1_rstctrl_va) return; pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n", cpu1_startup_pa, cpu1_ns_pa_addr); writel_relaxed(1, c->cpu1_rstctrl_va); readl_relaxed(c->cpu1_rstctrl_va); writel_relaxed(0, c->cpu1_rstctrl_va); } static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) { const struct omap_smp_config *c = NULL; if (soc_is_omap443x()) c = &omap443x_cfg; else if (soc_is_omap446x()) c = &omap446x_cfg; else if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) c = &omap5_cfg; if (!c) { pr_err("%s Unknown SMP SoC?\n", __func__); return; } /* Must preserve cfg.scu_base set earlier */ cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; cfg.startup_addr = c->startup_addr; cfg.wakeupgen_base = omap_get_wakeupgen_base(); if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) { if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) cfg.startup_addr = omap5_secondary_hyp_startup; omap5_erratum_workaround_801819(); } cfg.cpu1_rstctrl_va = ioremap(cfg.cpu1_rstctrl_pa, 4); if (!cfg.cpu1_rstctrl_va) return; /* * Initialise the SCU and wake up the secondary core using * wakeup_secondary(). */ if (cfg.scu_base) scu_enable(cfg.scu_base); omap4_smp_maybe_reset_cpu1(&cfg); /* * Write the address of secondary startup routine into the * AuxCoreBoot1 where ROM code will jump and start executing * on secondary core once out of WFE * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); else writel_relaxed(__pa_symbol(cfg.startup_addr), cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); } const struct smp_operations omap4_smp_ops __initconst = { .smp_init_cpus = omap4_smp_init_cpus, .smp_prepare_cpus = omap4_smp_prepare_cpus, .smp_secondary_init = omap4_secondary_init, .smp_boot_secondary = omap4_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = omap4_cpu_die, .cpu_kill = omap4_cpu_kill, #endif };
linux-master
arch/arm/mach-omap2/omap-smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap3-restart.c - Code common to all OMAP3xxx machines. * * Copyright (C) 2009, 2012 Texas Instruments * Copyright (C) 2010 Nokia Corporation * Tony Lindgren <[email protected]> * Santosh Shilimkar <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/reboot.h> #include "common.h" #include "control.h" #include "prm.h" /* Global address base setup code */ /** * omap3xxx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ void omap3xxx_restart(enum reboot_mode mode, const char *cmd) { omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); omap_prm_reset_system(); }
linux-master
arch/arm/mach-omap2/omap3-restart.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2-specific DPLL control functions * * Copyright (C) 2011 Nokia Corporation * Paul Walmsley */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include "clock.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" /* Private functions */ /** * _allow_idle - enable DPLL autoidle bits * @clk: struct clk * of the DPLL to operate on * * Enable DPLL automatic idle control. The DPLL will enter low-power * stop when its downstream clocks are gated. No return value. * REVISIT: DPLL can optionally enter low-power bypass by writing 0x1 * instead. Add some mechanism to optionally enter this mode. */ static void _allow_idle(struct clk_hw_omap *clk) { if (!clk || !clk->dpll_data) return; omap2xxx_cm_set_dpll_auto_low_power_stop(); } /** * _deny_idle - prevent DPLL from automatically idling * @clk: struct clk * of the DPLL to operate on * * Disable DPLL automatic idle control. No return value. */ static void _deny_idle(struct clk_hw_omap *clk) { if (!clk || !clk->dpll_data) return; omap2xxx_cm_set_dpll_disable_autoidle(); } /* Public data */ const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll = { .allow_idle = _allow_idle, .deny_idle = _deny_idle, };
linux-master
arch/arm/mach-omap2/clkt2xxx_dpll.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2/3 common powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander */ /* * The names for the DSP/IVA2 powerdomains are confusing. * * Most OMAP chips have an on-board DSP. * * On the 2420, this is a 'C55 DSP called, simply, the DSP. Its * powerdomain is called the "DSP power domain." On the 2430, the * on-board DSP is a 'C64 DSP, now called (along with its hardware * accelerators) the IVA2 or IVA2.1. Its powerdomain is still called * the "DSP power domain." On the 3430, the DSP is a 'C64 DSP like the * 2430, also known as the IVA2; but its powerdomain is now called the * "IVA2 power domain." * * The 2420 also has something called the IVA, which is a separate ARM * core, and has nothing to do with the DSP/IVA2. * * Ideally the DSP/IVA2 could just be the same powerdomain, but the PRCM * address offset is different between the C55 and C64 DSPs. */ #include "powerdomain.h" #include "prcm-common.h" #include "prm.h" /* OMAP2/3-common powerdomains */ /* * The GFX powerdomain is not present on 3430ES2, but currently we do not * have a macro to filter it out at compile-time. */ struct powerdomain gfx_omap2_pwrdm = { .name = "gfx_pwrdm", .prcm_offs = GFX_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; struct powerdomain wkup_omap2_pwrdm = { .name = "wkup_pwrdm", .prcm_offs = WKUP_MOD, .pwrsts = PWRSTS_ON, .voltdm = { .name = "wakeup" }, };
linux-master
arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 Voltage Controller (VC) data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/io.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "voltage.h" #include "vc.h" /* * VC data common to 44xx chips * XXX This stuff presumably belongs in the vc3xxx.c or vc.c file. */ static const struct omap_vc_common omap4_vc_common = { .bypass_val_reg = OMAP4_PRM_VC_VAL_BYPASS_OFFSET, .data_shift = OMAP4430_DATA_SHIFT, .slaveaddr_shift = OMAP4430_SLAVEADDR_SHIFT, .regaddr_shift = OMAP4430_REGADDR_SHIFT, .valid = OMAP4430_VALID_MASK, .cmd_on_shift = OMAP4430_ON_SHIFT, .cmd_on_mask = OMAP4430_ON_MASK, .cmd_onlp_shift = OMAP4430_ONLP_SHIFT, .cmd_ret_shift = OMAP4430_RET_SHIFT, .cmd_off_shift = OMAP4430_OFF_SHIFT, .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK, .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, }; /* VC instance data for each controllable voltage line */ struct omap_vc_channel omap4_vc_mpu = { .flags = OMAP_VC_CHANNEL_DEFAULT | OMAP_VC_CHANNEL_CFG_MUTANT, .common = &omap4_vc_common, .smps_sa_reg = OMAP4_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET, .smps_cmdra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET, .cfg_channel_reg = OMAP4_PRM_VC_CFG_CHANNEL_OFFSET, .cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET, .smps_sa_mask = OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK, .smps_volra_mask = OMAP4430_VOLRA_VDD_MPU_L_MASK, .smps_cmdra_mask = OMAP4430_CMDRA_VDD_MPU_L_MASK, .cfg_channel_sa_shift = OMAP4430_SA_VDD_MPU_L_SHIFT, }; struct omap_vc_channel omap4_vc_iva = { .common = &omap4_vc_common, .smps_sa_reg = OMAP4_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET, .smps_cmdra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET, .cfg_channel_reg = OMAP4_PRM_VC_CFG_CHANNEL_OFFSET, .cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET, .smps_sa_mask = OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK, .smps_volra_mask = OMAP4430_VOLRA_VDD_IVA_L_MASK, .smps_cmdra_mask = OMAP4430_CMDRA_VDD_IVA_L_MASK, .cfg_channel_sa_shift = OMAP4430_SA_VDD_IVA_L_SHIFT, }; struct omap_vc_channel omap4_vc_core = { .common = &omap4_vc_common, .smps_sa_reg = OMAP4_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET, .smps_cmdra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET, .cfg_channel_reg = OMAP4_PRM_VC_CFG_CHANNEL_OFFSET, .cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET, .smps_sa_mask = OMAP4430_SA_VDD_CORE_L_0_6_MASK, .smps_volra_mask = OMAP4430_VOLRA_VDD_CORE_L_MASK, .smps_cmdra_mask = OMAP4430_CMDRA_VDD_CORE_L_MASK, .cfg_channel_sa_shift = OMAP4430_SA_VDD_CORE_L_SHIFT, }; /* * Voltage levels for different operating modes: on, sleep, retention and off */ #define OMAP4_ON_VOLTAGE_UV 1375000 #define OMAP4_ONLP_VOLTAGE_UV 1375000 #define OMAP4_RET_VOLTAGE_UV 837500 #define OMAP4_OFF_VOLTAGE_UV 0 struct omap_vc_param omap4_mpu_vc_data = { .on = OMAP4_ON_VOLTAGE_UV, .onlp = OMAP4_ONLP_VOLTAGE_UV, .ret = OMAP4_RET_VOLTAGE_UV, .off = OMAP4_OFF_VOLTAGE_UV, }; struct omap_vc_param omap4_iva_vc_data = { .on = OMAP4_ON_VOLTAGE_UV, .onlp = OMAP4_ONLP_VOLTAGE_UV, .ret = OMAP4_RET_VOLTAGE_UV, .off = OMAP4_OFF_VOLTAGE_UV, }; struct omap_vc_param omap4_core_vc_data = { .on = OMAP4_ON_VOLTAGE_UV, .onlp = OMAP4_ONLP_VOLTAGE_UV, .ret = OMAP4_RET_VOLTAGE_UV, .off = OMAP4_OFF_VOLTAGE_UV, };
linux-master
arch/arm/mach-omap2/vc44xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM33XX Power domain data * * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "prcm-common.h" #include "prm-regbits-33xx.h" #include "prm33xx.h" static struct powerdomain gfx_33xx_pwrdm = { .name = "gfx_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM33XX_PRM_GFX_MOD, .pwrstctrl_offs = AM33XX_PM_GFX_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_GFX_PWRSTST_OFFSET, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, .banks = 1, .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK, .mem_on_mask = { [0] = AM33XX_GFX_MEM_ONSTATE_MASK, /* gfx_mem */ }, .mem_ret_mask = { [0] = AM33XX_GFX_MEM_RETSTATE_MASK, /* gfx_mem */ }, .mem_pwrst_mask = { [0] = AM33XX_GFX_MEM_STATEST_MASK, /* gfx_mem */ }, .mem_retst_mask = { [0] = AM33XX_GFX_MEM_RETSTATE_MASK, /* gfx_mem */ }, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* gfx_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* gfx_mem */ }, }; static struct powerdomain rtc_33xx_pwrdm = { .name = "rtc_pwrdm", .voltdm = { .name = "rtc" }, .prcm_offs = AM33XX_PRM_RTC_MOD, .pwrstctrl_offs = AM33XX_PM_RTC_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_RTC_PWRSTST_OFFSET, .pwrsts = PWRSTS_ON, .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK, }; static struct powerdomain wkup_33xx_pwrdm = { .name = "wkup_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM33XX_PRM_WKUP_MOD, .pwrstctrl_offs = AM33XX_PM_WKUP_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_WKUP_PWRSTST_OFFSET, .pwrsts = PWRSTS_ON, .logicretstate_mask = AM33XX_LOGICRETSTATE_3_3_MASK, }; static struct powerdomain per_33xx_pwrdm = { .name = "per_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM33XX_PRM_PER_MOD, .pwrstctrl_offs = AM33XX_PM_PER_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_PER_PWRSTST_OFFSET, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, .banks = 3, .logicretstate_mask = AM33XX_LOGICRETSTATE_3_3_MASK, .mem_on_mask = { [0] = AM33XX_PRUSS_MEM_ONSTATE_MASK, /* pruss_mem */ [1] = AM33XX_PER_MEM_ONSTATE_MASK, /* per_mem */ [2] = AM33XX_RAM_MEM_ONSTATE_MASK, /* ram_mem */ }, .mem_ret_mask = { [0] = AM33XX_PRUSS_MEM_RETSTATE_MASK, /* pruss_mem */ [1] = AM33XX_PER_MEM_RETSTATE_MASK, /* per_mem */ [2] = AM33XX_RAM_MEM_RETSTATE_MASK, /* ram_mem */ }, .mem_pwrst_mask = { [0] = AM33XX_PRUSS_MEM_STATEST_MASK, /* pruss_mem */ [1] = AM33XX_PER_MEM_STATEST_MASK, /* per_mem */ [2] = AM33XX_RAM_MEM_STATEST_MASK, /* ram_mem */ }, .mem_retst_mask = { [0] = AM33XX_PRUSS_MEM_RETSTATE_MASK, /* pruss_mem */ [1] = AM33XX_PER_MEM_RETSTATE_MASK, /* per_mem */ [2] = AM33XX_RAM_MEM_RETSTATE_MASK, /* ram_mem */ }, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* pruss_mem */ [1] = PWRSTS_OFF_RET, /* per_mem */ [2] = PWRSTS_OFF_RET, /* ram_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* pruss_mem */ [1] = PWRSTS_ON, /* per_mem */ [2] = PWRSTS_ON, /* ram_mem */ }, }; static struct powerdomain mpu_33xx_pwrdm = { .name = "mpu_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = AM33XX_PRM_MPU_MOD, .pwrstctrl_offs = AM33XX_PM_MPU_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_MPU_PWRSTST_OFFSET, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, .banks = 3, .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK, .mem_on_mask = { [0] = AM33XX_MPU_L1_ONSTATE_MASK, /* mpu_l1 */ [1] = AM33XX_MPU_L2_ONSTATE_MASK, /* mpu_l2 */ [2] = AM33XX_MPU_RAM_ONSTATE_MASK, /* mpu_ram */ }, .mem_ret_mask = { [0] = AM33XX_MPU_L1_RETSTATE_MASK, /* mpu_l1 */ [1] = AM33XX_MPU_L2_RETSTATE_MASK, /* mpu_l2 */ [2] = AM33XX_MPU_RAM_RETSTATE_MASK, /* mpu_ram */ }, .mem_pwrst_mask = { [0] = AM33XX_MPU_L1_STATEST_MASK, /* mpu_l1 */ [1] = AM33XX_MPU_L2_STATEST_MASK, /* mpu_l2 */ [2] = AM33XX_MPU_RAM_STATEST_MASK, /* mpu_ram */ }, .mem_retst_mask = { [0] = AM33XX_MPU_L1_RETSTATE_MASK, /* mpu_l1 */ [1] = AM33XX_MPU_L2_RETSTATE_MASK, /* mpu_l2 */ [2] = AM33XX_MPU_RAM_RETSTATE_MASK, /* mpu_ram */ }, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* mpu_l1 */ [1] = PWRSTS_OFF_RET, /* mpu_l2 */ [2] = PWRSTS_OFF_RET, /* mpu_ram */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* mpu_l1 */ [1] = PWRSTS_ON, /* mpu_l2 */ [2] = PWRSTS_ON, /* mpu_ram */ }, }; static struct powerdomain cefuse_33xx_pwrdm = { .name = "cefuse_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = AM33XX_PRM_CEFUSE_MOD, .pwrstctrl_offs = AM33XX_PM_CEFUSE_PWRSTCTRL_OFFSET, .pwrstst_offs = AM33XX_PM_CEFUSE_PWRSTST_OFFSET, .pwrsts = PWRSTS_OFF_ON, }; static struct powerdomain *powerdomains_am33xx[] __initdata = { &gfx_33xx_pwrdm, &rtc_33xx_pwrdm, &wkup_33xx_pwrdm, &per_33xx_pwrdm, &mpu_33xx_pwrdm, &cefuse_33xx_pwrdm, NULL, }; void __init am33xx_powerdomains_init(void) { pwrdm_register_platform_funcs(&am33xx_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_am33xx); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains33xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * MSDI IP block reset * * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * XXX What about pad muxing? */ #include <linux/kernel.h> #include <linux/err.h> #include "prm.h" #include "common.h" #include "control.h" #include "omap_hwmod.h" #include "omap_device.h" #include "mmc.h" /* * MSDI_CON_OFFSET: offset in bytes of the MSDI IP block's CON register * from the IP block's base address */ #define MSDI_CON_OFFSET 0x0c /* Register bitfields in the CON register */ #define MSDI_CON_POW_MASK BIT(11) #define MSDI_CON_CLKD_MASK (0x3f << 0) #define MSDI_CON_CLKD_SHIFT 0 /* MSDI_TARGET_RESET_CLKD: clock divisor to use throughout the reset */ #define MSDI_TARGET_RESET_CLKD 0x3ff /** * omap_msdi_reset - reset the MSDI IP block * @oh: struct omap_hwmod * * * The MSDI IP block on OMAP2420 has to have both the POW and CLKD * fields set inside its CON register for a reset to complete * successfully. This is not documented in the TRM. For CLKD, we use * the value that results in the lowest possible clock rate, to attempt * to avoid disturbing any cards. */ int omap_msdi_reset(struct omap_hwmod *oh) { u16 v = 0; int c = 0; /* Write to the SOFTRESET bit */ omap_hwmod_softreset(oh); /* Enable the MSDI core and internal clock */ v |= MSDI_CON_POW_MASK; v |= MSDI_TARGET_RESET_CLKD << MSDI_CON_CLKD_SHIFT; omap_hwmod_write(v, oh, MSDI_CON_OFFSET); /* Poll on RESETDONE bit */ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warn("%s: %s: softreset failed (waited %d usec)\n", __func__, oh->name, MAX_MODULE_SOFTRESET_WAIT); else pr_debug("%s: %s: softreset in %d usec\n", __func__, oh->name, c); /* Disable the MSDI internal clock */ v &= ~MSDI_CON_CLKD_MASK; omap_hwmod_write(v, oh, MSDI_CON_OFFSET); return 0; }
linux-master
arch/arm/mach-omap2/msdi.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 Power domains framework * * Copyright (C) 2009-2011 Texas Instruments, Inc. * Copyright (C) 2009-2011 Nokia Corporation * * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "prcm-common.h" #include "prcm44xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "prcm_mpu44xx.h" /* core_44xx_pwrdm: CORE power domain */ static struct powerdomain core_44xx_pwrdm = { .name = "core_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_CORE_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 5, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* core_nret_bank */ [1] = PWRSTS_RET, /* core_ocmram */ [2] = PWRSTS_RET, /* core_other_bank */ [3] = PWRSTS_OFF_RET, /* ducati_l2ram */ [4] = PWRSTS_OFF_RET, /* ducati_unicache */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* core_nret_bank */ [1] = PWRSTS_ON, /* core_ocmram */ [2] = PWRSTS_ON, /* core_other_bank */ [3] = PWRSTS_ON, /* ducati_l2ram */ [4] = PWRSTS_ON, /* ducati_unicache */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* gfx_44xx_pwrdm: 3D accelerator power domain */ static struct powerdomain gfx_44xx_pwrdm = { .name = "gfx_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_GFX_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* gfx_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* gfx_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* abe_44xx_pwrdm: Audio back end power domain */ static struct powerdomain abe_44xx_pwrdm = { .name = "abe_pwrdm", .voltdm = { .name = "iva" }, .prcm_offs = OMAP4430_PRM_ABE_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* aessmem */ [1] = PWRSTS_OFF, /* periphmem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* aessmem */ [1] = PWRSTS_ON, /* periphmem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* dss_44xx_pwrdm: Display subsystem power domain */ static struct powerdomain dss_44xx_pwrdm = { .name = "dss_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_DSS_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* dss_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* dss_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* tesla_44xx_pwrdm: Tesla processor power domain */ static struct powerdomain tesla_44xx_pwrdm = { .name = "tesla_pwrdm", .voltdm = { .name = "iva" }, .prcm_offs = OMAP4430_PRM_TESLA_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* tesla_edma */ [1] = PWRSTS_OFF_RET, /* tesla_l1 */ [2] = PWRSTS_OFF_RET, /* tesla_l2 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* tesla_edma */ [1] = PWRSTS_ON, /* tesla_l1 */ [2] = PWRSTS_ON, /* tesla_l2 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* wkup_44xx_pwrdm: Wake-up power domain */ static struct powerdomain wkup_44xx_pwrdm = { .name = "wkup_pwrdm", .voltdm = { .name = "wakeup" }, .prcm_offs = OMAP4430_PRM_WKUP_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* wkup_bank */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* wkup_bank */ }, }; /* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */ static struct powerdomain cpu0_44xx_pwrdm = { .name = "cpu0_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP4430_PRCM_MPU_CPU0_INST, .prcm_partition = OMAP4430_PRCM_MPU_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu0_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu0_l1 */ }, }; /* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */ static struct powerdomain cpu1_44xx_pwrdm = { .name = "cpu1_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP4430_PRCM_MPU_CPU1_INST, .prcm_partition = OMAP4430_PRCM_MPU_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* cpu1_l1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cpu1_l1 */ }, }; /* emu_44xx_pwrdm: Emulation power domain */ static struct powerdomain emu_44xx_pwrdm = { .name = "emu_pwrdm", .voltdm = { .name = "wakeup" }, .prcm_offs = OMAP4430_PRM_EMU_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* emu_bank */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* emu_bank */ }, }; /* mpu_44xx_pwrdm: Modena processor and the Neon coprocessor power domain */ static struct powerdomain mpu_44xx_pwrdm = { .name = "mpu_pwrdm", .voltdm = { .name = "mpu" }, .prcm_offs = OMAP4430_PRM_MPU_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* mpu_l1 */ [1] = PWRSTS_OFF_RET, /* mpu_l2 */ [2] = PWRSTS_RET, /* mpu_ram */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* mpu_l1 */ [1] = PWRSTS_ON, /* mpu_l2 */ [2] = PWRSTS_ON, /* mpu_ram */ }, }; /* ivahd_44xx_pwrdm: IVA-HD power domain */ static struct powerdomain ivahd_44xx_pwrdm = { .name = "ivahd_pwrdm", .voltdm = { .name = "iva" }, .prcm_offs = OMAP4430_PRM_IVAHD_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* hwa_mem */ [1] = PWRSTS_OFF_RET, /* sl2_mem */ [2] = PWRSTS_OFF_RET, /* tcm1_mem */ [3] = PWRSTS_OFF_RET, /* tcm2_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* hwa_mem */ [1] = PWRSTS_ON, /* sl2_mem */ [2] = PWRSTS_ON, /* tcm1_mem */ [3] = PWRSTS_ON, /* tcm2_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* cam_44xx_pwrdm: Camera subsystem power domain */ static struct powerdomain cam_44xx_pwrdm = { .name = "cam_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_CAM_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* cam_mem */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* cam_mem */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* l3init_44xx_pwrdm: L3 initators pheripherals power domain */ static struct powerdomain l3init_44xx_pwrdm = { .name = "l3init_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_L3INIT_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* l3init_bank1 */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* l3init_bank1 */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* l4per_44xx_pwrdm: Target peripherals power domain */ static struct powerdomain l4per_44xx_pwrdm = { .name = "l4per_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_L4PER_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF, /* nonretained_bank */ [1] = PWRSTS_RET, /* retained_bank */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* nonretained_bank */ [1] = PWRSTS_ON, /* retained_bank */ }, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* * always_on_core_44xx_pwrdm: Always ON logic that sits in VDD_CORE voltage * domain */ static struct powerdomain always_on_core_44xx_pwrdm = { .name = "always_on_core_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_ALWAYS_ON_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_ON, }; /* cefuse_44xx_pwrdm: Customer efuse controller power domain */ static struct powerdomain cefuse_44xx_pwrdm = { .name = "cefuse_pwrdm", .voltdm = { .name = "core" }, .prcm_offs = OMAP4430_PRM_CEFUSE_INST, .prcm_partition = OMAP4430_PRM_PARTITION, .pwrsts = PWRSTS_OFF_ON, .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, }; /* * The following power domains are not under SW control * * always_on_iva * always_on_mpu * stdefuse */ /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_omap44xx[] __initdata = { &core_44xx_pwrdm, &gfx_44xx_pwrdm, &abe_44xx_pwrdm, &dss_44xx_pwrdm, &tesla_44xx_pwrdm, &wkup_44xx_pwrdm, &cpu0_44xx_pwrdm, &cpu1_44xx_pwrdm, &emu_44xx_pwrdm, &mpu_44xx_pwrdm, &ivahd_44xx_pwrdm, &cam_44xx_pwrdm, &l3init_44xx_pwrdm, &l4per_44xx_pwrdm, &always_on_core_44xx_pwrdm, &cefuse_44xx_pwrdm, NULL }; void __init omap44xx_powerdomains_init(void) { pwrdm_register_platform_funcs(&omap4_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap44xx); pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains44xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_common_ipblock_data.c - common IP block data for OMAP2+ * * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley */ #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" /* * 'dss' class * display sub-system */ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2_dss_hwmod_class = { .name = "dss", .sysc = &omap2_dss_sysc, .reset = omap_dss_reset, }; /* * 'rfbi' class * remote frame buffer interface */ static struct omap_hwmod_class_sysconfig omap2_rfbi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2_rfbi_hwmod_class = { .name = "rfbi", .sysc = &omap2_rfbi_sysc, };
linux-master
arch/arm/mach-omap2/omap_hwmod_common_ipblock_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP Voltage Controller (VC) interface * * Copyright (C) 2011 Texas Instruments, Inc. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/io.h> #include <asm/div64.h> #include "iomap.h" #include "soc.h" #include "voltage.h" #include "vc.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "pm.h" #include "scrm44xx.h" #include "control.h" #define OMAP4430_VDD_IVA_I2C_DISABLE BIT(14) #define OMAP4430_VDD_MPU_I2C_DISABLE BIT(13) #define OMAP4430_VDD_CORE_I2C_DISABLE BIT(12) #define OMAP4430_VDD_IVA_PRESENCE BIT(9) #define OMAP4430_VDD_MPU_PRESENCE BIT(8) #define OMAP4430_AUTO_CTRL_VDD_IVA(x) ((x) << 4) #define OMAP4430_AUTO_CTRL_VDD_MPU(x) ((x) << 2) #define OMAP4430_AUTO_CTRL_VDD_CORE(x) ((x) << 0) #define OMAP4430_AUTO_CTRL_VDD_RET 2 #define OMAP4430_VDD_I2C_DISABLE_MASK \ (OMAP4430_VDD_IVA_I2C_DISABLE | \ OMAP4430_VDD_MPU_I2C_DISABLE | \ OMAP4430_VDD_CORE_I2C_DISABLE) #define OMAP4_VDD_DEFAULT_VAL \ (OMAP4430_VDD_I2C_DISABLE_MASK | \ OMAP4430_VDD_IVA_PRESENCE | OMAP4430_VDD_MPU_PRESENCE | \ OMAP4430_AUTO_CTRL_VDD_IVA(OMAP4430_AUTO_CTRL_VDD_RET) | \ OMAP4430_AUTO_CTRL_VDD_MPU(OMAP4430_AUTO_CTRL_VDD_RET) | \ OMAP4430_AUTO_CTRL_VDD_CORE(OMAP4430_AUTO_CTRL_VDD_RET)) #define OMAP4_VDD_RET_VAL \ (OMAP4_VDD_DEFAULT_VAL & ~OMAP4430_VDD_I2C_DISABLE_MASK) /** * struct omap_vc_channel_cfg - describe the cfg_channel bitfield * @sa: bit for slave address * @rav: bit for voltage configuration register * @rac: bit for command configuration register * @racen: enable bit for RAC * @cmd: bit for command value set selection * * Channel configuration bits, common for OMAP3+ * OMAP3 register: PRM_VC_CH_CONF * OMAP4 register: PRM_VC_CFG_CHANNEL * OMAP5 register: PRM_VC_SMPS_<voltdm>_CONFIG */ struct omap_vc_channel_cfg { u8 sa; u8 rav; u8 rac; u8 racen; u8 cmd; }; static struct omap_vc_channel_cfg vc_default_channel_cfg = { .sa = BIT(0), .rav = BIT(1), .rac = BIT(2), .racen = BIT(3), .cmd = BIT(4), }; /* * On OMAP3+, all VC channels have the above default bitfield * configuration, except the OMAP4 MPU channel. This appears * to be a freak accident as every other VC channel has the * default configuration, thus creating a mutant channel config. */ static struct omap_vc_channel_cfg vc_mutant_channel_cfg = { .sa = BIT(0), .rav = BIT(2), .rac = BIT(3), .racen = BIT(4), .cmd = BIT(1), }; static struct omap_vc_channel_cfg *vc_cfg_bits; /* Default I2C trace length on pcb, 6.3cm. Used for capacitance calculations. */ static u32 sr_i2c_pcb_length = 63; #define CFG_CHANNEL_MASK 0x1f /** * omap_vc_config_channel - configure VC channel to PMIC mappings * @voltdm: pointer to voltagdomain defining the desired VC channel * * Configures the VC channel to PMIC mappings for the following * PMIC settings * - i2c slave address (SA) * - voltage configuration address (RAV) * - command configuration address (RAC) and enable bit (RACEN) * - command values for ON, ONLP, RET and OFF (CMD) * * This function currently only allows flexible configuration of the * non-default channel. Starting with OMAP4, there are more than 2 * channels, with one defined as the default (on OMAP4, it's MPU.) * Only the non-default channel can be configured. */ static int omap_vc_config_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; /* * For default channel, the only configurable bit is RACEN. * All others must stay at zero (see function comment above.) */ if (vc->flags & OMAP_VC_CHANNEL_DEFAULT) vc->cfg_channel &= vc_cfg_bits->racen; voltdm->rmw(CFG_CHANNEL_MASK << vc->cfg_channel_sa_shift, vc->cfg_channel << vc->cfg_channel_sa_shift, vc->cfg_channel_reg); return 0; } /* Voltage scale and accessory APIs */ int omap_vc_pre_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 *target_vsel, u8 *current_vsel) { struct omap_vc_channel *vc = voltdm->vc; u32 vc_cmdval; /* Check if sufficient pmic info is available for this vdd */ if (!voltdm->pmic) { pr_err("%s: Insufficient pmic info to scale the vdd_%s\n", __func__, voltdm->name); return -EINVAL; } if (!voltdm->pmic->uv_to_vsel) { pr_err("%s: PMIC function to convert voltage in uV to vsel not registered. Hence unable to scale voltage for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return -EINVAL; } *target_vsel = voltdm->pmic->uv_to_vsel(target_volt); *current_vsel = voltdm->pmic->uv_to_vsel(voltdm->nominal_volt); /* Setting the ON voltage to the new target voltage */ vc_cmdval = voltdm->read(vc->cmdval_reg); vc_cmdval &= ~vc->common->cmd_on_mask; vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift); voltdm->write(vc_cmdval, vc->cmdval_reg); voltdm->vc_param->on = target_volt; omap_vp_update_errorgain(voltdm, target_volt); return 0; } void omap_vc_post_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 target_vsel, u8 current_vsel) { u32 smps_steps = 0, smps_delay = 0; smps_steps = abs(target_vsel - current_vsel); /* SMPS slew rate / step size. 2us added as buffer. */ smps_delay = ((smps_steps * voltdm->pmic->step_size) / voltdm->pmic->slew_rate) + 2; udelay(smps_delay); } /* vc_bypass_scale - VC bypass method of voltage scaling */ int omap_vc_bypass_scale(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_vc_channel *vc = voltdm->vc; u32 loop_cnt = 0, retries_cnt = 0; u32 vc_valid, vc_bypass_val_reg, vc_bypass_value; u8 target_vsel, current_vsel; int ret; ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, &current_vsel); if (ret) return ret; vc_valid = vc->common->valid; vc_bypass_val_reg = vc->common->bypass_val_reg; vc_bypass_value = (target_vsel << vc->common->data_shift) | (vc->volt_reg_addr << vc->common->regaddr_shift) | (vc->i2c_slave_addr << vc->common->slaveaddr_shift); voltdm->write(vc_bypass_value, vc_bypass_val_reg); voltdm->write(vc_bypass_value | vc_valid, vc_bypass_val_reg); vc_bypass_value = voltdm->read(vc_bypass_val_reg); /* * Loop till the bypass command is acknowledged from the SMPS. * NOTE: This is legacy code. The loop count and retry count needs * to be revisited. */ while (!(vc_bypass_value & vc_valid)) { loop_cnt++; if (retries_cnt > 10) { pr_warn("%s: Retry count exceeded\n", __func__); return -ETIMEDOUT; } if (loop_cnt > 50) { retries_cnt++; loop_cnt = 0; udelay(10); } vc_bypass_value = voltdm->read(vc_bypass_val_reg); } omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel); return 0; } /* Convert microsecond value to number of 32kHz clock cycles */ static inline u32 omap_usec_to_32k(u32 usec) { return DIV_ROUND_UP_ULL(32768ULL * (u64)usec, 1000000ULL); } struct omap3_vc_timings { u32 voltsetup1; u32 voltsetup2; }; struct omap3_vc { struct voltagedomain *vd; u32 voltctrl; u32 voltsetup1; u32 voltsetup2; struct omap3_vc_timings timings[2]; }; static struct omap3_vc vc; void omap3_vc_set_pmic_signaling(int core_next_state) { struct voltagedomain *vd = vc.vd; struct omap3_vc_timings *c = vc.timings; u32 voltctrl, voltsetup1, voltsetup2; voltctrl = vc.voltctrl; voltsetup1 = vc.voltsetup1; voltsetup2 = vc.voltsetup2; switch (core_next_state) { case PWRDM_POWER_OFF: voltctrl &= ~(OMAP3430_PRM_VOLTCTRL_AUTO_RET | OMAP3430_PRM_VOLTCTRL_AUTO_SLEEP); voltctrl |= OMAP3430_PRM_VOLTCTRL_AUTO_OFF; if (voltctrl & OMAP3430_PRM_VOLTCTRL_SEL_OFF) voltsetup2 = c->voltsetup2; else voltsetup1 = c->voltsetup1; break; case PWRDM_POWER_RET: default: c++; voltctrl &= ~(OMAP3430_PRM_VOLTCTRL_AUTO_OFF | OMAP3430_PRM_VOLTCTRL_AUTO_SLEEP); voltctrl |= OMAP3430_PRM_VOLTCTRL_AUTO_RET; voltsetup1 = c->voltsetup1; break; } if (voltctrl != vc.voltctrl) { vd->write(voltctrl, OMAP3_PRM_VOLTCTRL_OFFSET); vc.voltctrl = voltctrl; } if (voltsetup1 != vc.voltsetup1) { vd->write(c->voltsetup1, OMAP3_PRM_VOLTSETUP1_OFFSET); vc.voltsetup1 = voltsetup1; } if (voltsetup2 != vc.voltsetup2) { vd->write(c->voltsetup2, OMAP3_PRM_VOLTSETUP2_OFFSET); vc.voltsetup2 = voltsetup2; } } void omap4_vc_set_pmic_signaling(int core_next_state) { struct voltagedomain *vd = vc.vd; u32 val; if (!vd) return; switch (core_next_state) { case PWRDM_POWER_RET: val = OMAP4_VDD_RET_VAL; break; default: val = OMAP4_VDD_DEFAULT_VAL; break; } vd->write(val, OMAP4_PRM_VOLTCTRL_OFFSET); } /* * Configure signal polarity for sys_clkreq and sys_off_mode pins * as the default values are wrong and can cause the system to hang * if any twl4030 scripts are loaded. */ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) { u32 val; if (vc.vd) return; vc.vd = voltdm; val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) { val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", val); voltdm->write(val, OMAP3_PRM_POLCTRL_OFFSET); } /* * By default let's use I2C4 signaling for retention idle * and sys_off_mode pin signaling for off idle. This way we * have sys_clk_req pin go down for retention and both * sys_clk_req and sys_off_mode pins will go down for off * idle. And we can also scale voltages to zero for off-idle. * Note that no actual voltage scaling during off-idle will * happen unless the board specific twl4030 PMIC scripts are * loaded. See also omap_vc_i2c_init for comments regarding * erratum i531. */ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { val |= OMAP3430_PRM_VOLTCTRL_SEL_OFF; pr_debug("PM: setting voltctrl sys_off_mode signaling to 0x%x\n", val); voltdm->write(val, OMAP3_PRM_VOLTCTRL_OFFSET); } vc.voltctrl = val; omap3_vc_set_pmic_signaling(PWRDM_POWER_ON); } static void omap3_init_voltsetup1(struct voltagedomain *voltdm, struct omap3_vc_timings *c, u32 idle) { unsigned long val; val = (voltdm->vc_param->on - idle) / voltdm->pmic->slew_rate; val *= voltdm->sys_clk.rate / 8 / 1000000 + 1; val <<= __ffs(voltdm->vfsm->voltsetup_mask); c->voltsetup1 &= ~voltdm->vfsm->voltsetup_mask; c->voltsetup1 |= val; } /** * omap3_set_i2c_timings - sets i2c sleep timings for a channel * @voltdm: channel to configure * @off_mode: select whether retention or off mode values used * * Calculates and sets up voltage controller to use I2C based * voltage scaling for sleep modes. This can be used for either off mode * or retention. Off mode has additionally an option to use sys_off_mode * pad, which uses a global signal to program the whole power IC to * off-mode. * * Note that pmic is not controlling the voltage scaling during * retention signaled over I2C4, so we can keep voltsetup2 as 0. * And the oscillator is not shut off over I2C4, so no need to * set clksetup. */ static void omap3_set_i2c_timings(struct voltagedomain *voltdm) { struct omap3_vc_timings *c = vc.timings; /* Configure PRWDM_POWER_OFF over I2C4 */ omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->off); c++; /* Configure PRWDM_POWER_RET over I2C4 */ omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->ret); } /** * omap3_set_off_timings - sets off-mode timings for a channel * @voltdm: channel to configure * * Calculates and sets up off-mode timings for a channel. Off-mode * can use either I2C based voltage scaling, or alternatively * sys_off_mode pad can be used to send a global command to power IC.n, * sys_off_mode has the additional benefit that voltages can be * scaled to zero volt level with TWL4030 / TWL5030, I2C can only * scale to 600mV. * * Note that omap is not controlling the voltage scaling during * off idle signaled by sys_off_mode, so we can keep voltsetup1 * as 0. */ static void omap3_set_off_timings(struct voltagedomain *voltdm) { struct omap3_vc_timings *c = vc.timings; u32 tstart, tshut, clksetup, voltoffset; if (c->voltsetup2) return; omap_pm_get_oscillator(&tstart, &tshut); if (tstart == ULONG_MAX) { pr_debug("PM: oscillator start-up time not initialized, using 10ms\n"); clksetup = omap_usec_to_32k(10000); } else { clksetup = omap_usec_to_32k(tstart); } /* * For twl4030 errata 27, we need to allow minimum ~488.32 us wait to * switch from HFCLKIN to internal oscillator. That means timings * have voltoffset fixed to 0xa in rounded up 32 KiHz cycles. And * that means we can calculate the value based on the oscillator * start-up time since voltoffset2 = clksetup - voltoffset. */ voltoffset = omap_usec_to_32k(488); c->voltsetup2 = clksetup - voltoffset; voltdm->write(clksetup, OMAP3_PRM_CLKSETUP_OFFSET); voltdm->write(voltoffset, OMAP3_PRM_VOLTOFFSET_OFFSET); } static void __init omap3_vc_init_channel(struct voltagedomain *voltdm) { omap3_vc_init_pmic_signaling(voltdm); omap3_set_off_timings(voltdm); omap3_set_i2c_timings(voltdm); } /** * omap4_calc_volt_ramp - calculates voltage ramping delays on omap4 * @voltdm: channel to calculate values for * @voltage_diff: voltage difference in microvolts * * Calculates voltage ramp prescaler + counter values for a voltage * difference on omap4. Returns a field value suitable for writing to * VOLTSETUP register for a channel in following format: * bits[8:9] prescaler ... bits[0:5] counter. See OMAP4 TRM for reference. */ static u32 omap4_calc_volt_ramp(struct voltagedomain *voltdm, u32 voltage_diff) { u32 prescaler; u32 cycles; u32 time; time = voltage_diff / voltdm->pmic->slew_rate; cycles = voltdm->sys_clk.rate / 1000 * time / 1000; cycles /= 64; prescaler = 0; /* shift to next prescaler until no overflow */ /* scale for div 256 = 64 * 4 */ if (cycles > 63) { cycles /= 4; prescaler++; } /* scale for div 512 = 256 * 2 */ if (cycles > 63) { cycles /= 2; prescaler++; } /* scale for div 2048 = 512 * 4 */ if (cycles > 63) { cycles /= 4; prescaler++; } /* check for overflow => invalid ramp time */ if (cycles > 63) { pr_warn("%s: invalid setuptime for vdd_%s\n", __func__, voltdm->name); return 0; } cycles++; return (prescaler << OMAP4430_RAMP_UP_PRESCAL_SHIFT) | (cycles << OMAP4430_RAMP_UP_COUNT_SHIFT); } /** * omap4_usec_to_val_scrm - convert microsecond value to SCRM module bitfield * @usec: microseconds * @shift: number of bits to shift left * @mask: bitfield mask * * Converts microsecond value to OMAP4 SCRM bitfield. Bitfield is * shifted to requested position, and checked agains the mask value. * If larger, forced to the max value of the field (i.e. the mask itself.) * Returns the SCRM bitfield value. */ static u32 omap4_usec_to_val_scrm(u32 usec, int shift, u32 mask) { u32 val; val = omap_usec_to_32k(usec) << shift; /* Check for overflow, if yes, force to max value */ if (val > mask) val = mask; return val; } /** * omap4_set_timings - set voltage ramp timings for a channel * @voltdm: channel to configure * @off_mode: whether off-mode values are used * * Calculates and sets the voltage ramp up / down values for a channel. */ static void omap4_set_timings(struct voltagedomain *voltdm, bool off_mode) { u32 val; u32 ramp; int offset; u32 tstart, tshut; if (off_mode) { ramp = omap4_calc_volt_ramp(voltdm, voltdm->vc_param->on - voltdm->vc_param->off); offset = voltdm->vfsm->voltsetup_off_reg; } else { ramp = omap4_calc_volt_ramp(voltdm, voltdm->vc_param->on - voltdm->vc_param->ret); offset = voltdm->vfsm->voltsetup_reg; } if (!ramp) return; val = voltdm->read(offset); val |= ramp << OMAP4430_RAMP_DOWN_COUNT_SHIFT; val |= ramp << OMAP4430_RAMP_UP_COUNT_SHIFT; voltdm->write(val, offset); omap_pm_get_oscillator(&tstart, &tshut); val = omap4_usec_to_val_scrm(tstart, OMAP4_SETUPTIME_SHIFT, OMAP4_SETUPTIME_MASK); val |= omap4_usec_to_val_scrm(tshut, OMAP4_DOWNTIME_SHIFT, OMAP4_DOWNTIME_MASK); writel_relaxed(val, OMAP4_SCRM_CLKSETUPTIME); } static void __init omap4_vc_init_pmic_signaling(struct voltagedomain *voltdm) { if (vc.vd) return; vc.vd = voltdm; voltdm->write(OMAP4_VDD_DEFAULT_VAL, OMAP4_PRM_VOLTCTRL_OFFSET); } /* OMAP4 specific voltage init functions */ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm) { omap4_vc_init_pmic_signaling(voltdm); omap4_set_timings(voltdm, true); omap4_set_timings(voltdm, false); } struct i2c_init_data { u8 loadbits; u8 load; u8 hsscll_38_4; u8 hsscll_26; u8 hsscll_19_2; u8 hsscll_16_8; u8 hsscll_12; }; static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = { { .load = 50, .loadbits = 0x3, .hsscll_38_4 = 13, .hsscll_26 = 11, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 25, .loadbits = 0x2, .hsscll_38_4 = 13, .hsscll_26 = 11, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 12, .loadbits = 0x1, .hsscll_38_4 = 11, .hsscll_26 = 10, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 0, .loadbits = 0x0, .hsscll_38_4 = 12, .hsscll_26 = 10, .hsscll_19_2 = 9, .hsscll_16_8 = 8, .hsscll_12 = 8, }, }; /** * omap4_vc_i2c_timing_init - sets up board I2C timing parameters * @voltdm: voltagedomain pointer to get data from * * Use PMIC + board supplied settings for calculating the total I2C * channel capacitance and set the timing parameters based on this. * Pre-calculated values are provided in data tables, as it is not * too straightforward to calculate these runtime. */ static void __init omap4_vc_i2c_timing_init(struct voltagedomain *voltdm) { u32 capacitance; u32 val; u16 hsscll; const struct i2c_init_data *i2c_data; if (!voltdm->pmic->i2c_high_speed) { pr_info("%s: using bootloader low-speed timings\n", __func__); return; } /* PCB trace capacitance, 0.125pF / mm => mm / 8 */ capacitance = DIV_ROUND_UP(sr_i2c_pcb_length, 8); /* OMAP pad capacitance */ capacitance += 4; /* PMIC pad capacitance */ capacitance += voltdm->pmic->i2c_pad_load; /* Search for capacitance match in the table */ i2c_data = omap4_i2c_timing_data; while (i2c_data->load > capacitance) i2c_data++; /* Select proper values based on sysclk frequency */ switch (voltdm->sys_clk.rate) { case 38400000: hsscll = i2c_data->hsscll_38_4; break; case 26000000: hsscll = i2c_data->hsscll_26; break; case 19200000: hsscll = i2c_data->hsscll_19_2; break; case 16800000: hsscll = i2c_data->hsscll_16_8; break; case 12000000: hsscll = i2c_data->hsscll_12; break; default: pr_warn("%s: unsupported sysclk rate: %d!\n", __func__, voltdm->sys_clk.rate); return; } /* Loadbits define pull setup for the I2C channels */ val = i2c_data->loadbits << 25 | i2c_data->loadbits << 29; /* Write to SYSCTRL_PADCONF_WKUP_CTRL_I2C_2 to setup I2C pull */ writel_relaxed(val, OMAP2_L4_IO_ADDRESS(OMAP4_CTRL_MODULE_PAD_WKUP + OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_I2C_2)); /* HSSCLH can always be zero */ val = hsscll << OMAP4430_HSSCLL_SHIFT; val |= (0x28 << OMAP4430_SCLL_SHIFT | 0x2c << OMAP4430_SCLH_SHIFT); /* Write setup times to I2C config register */ voltdm->write(val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET); } /** * omap_vc_i2c_init - initialize I2C interface to PMIC * @voltdm: voltage domain containing VC data * * Use PMIC supplied settings for I2C high-speed mode and * master code (if set) and program the VC I2C configuration * register. * * The VC I2C configuration is common to all VC channels, * so this function only configures I2C for the first VC * channel registers. All other VC channels will use the * same configuration. */ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; static bool initialized; static bool i2c_high_speed; u8 mcode; if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n", __func__, voltdm->name, i2c_high_speed); return; } /* * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around * erratum i531 "Extra Power Consumed When Repeated Start Operation * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)". * Otherwise I2C4 eventually leads into about 23mW extra power being * consumed even during off idle using VMODE. */ i2c_high_speed = voltdm->pmic->i2c_high_speed; if (i2c_high_speed) voltdm->rmw(vc->common->i2c_cfg_clear_mask, vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_reg); mcode = voltdm->pmic->i2c_mcode; if (mcode) voltdm->rmw(vc->common->i2c_mcode_mask, mcode << __ffs(vc->common->i2c_mcode_mask), vc->common->i2c_cfg_reg); if (cpu_is_omap44xx()) omap4_vc_i2c_timing_init(voltdm); initialized = true; } /** * omap_vc_calc_vsel - calculate vsel value for a channel * @voltdm: channel to calculate value for * @uvolt: microvolt value to convert to vsel * * Converts a microvolt value to vsel value for the used PMIC. * This checks whether the microvolt value is out of bounds, and * adjusts the value accordingly. If unsupported value detected, * warning is thrown. */ static u8 omap_vc_calc_vsel(struct voltagedomain *voltdm, u32 uvolt) { if (voltdm->pmic->vddmin > uvolt) uvolt = voltdm->pmic->vddmin; if (voltdm->pmic->vddmax < uvolt) { WARN(1, "%s: voltage not supported by pmic: %u vs max %u\n", __func__, uvolt, voltdm->pmic->vddmax); /* Lets try maximum value anyway */ uvolt = voltdm->pmic->vddmax; } return voltdm->pmic->uv_to_vsel(uvolt); } void __init omap_vc_init_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; u8 on_vsel, onlp_vsel, ret_vsel, off_vsel; u32 val; if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); return; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } vc->cfg_channel = 0; if (vc->flags & OMAP_VC_CHANNEL_CFG_MUTANT) vc_cfg_bits = &vc_mutant_channel_cfg; else vc_cfg_bits = &vc_default_channel_cfg; /* get PMIC/board specific settings */ vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr; vc->volt_reg_addr = voltdm->pmic->volt_reg_addr; vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr; /* Configure the i2c slave address for this VC */ voltdm->rmw(vc->smps_sa_mask, vc->i2c_slave_addr << __ffs(vc->smps_sa_mask), vc->smps_sa_reg); vc->cfg_channel |= vc_cfg_bits->sa; /* * Configure the PMIC register addresses. */ voltdm->rmw(vc->smps_volra_mask, vc->volt_reg_addr << __ffs(vc->smps_volra_mask), vc->smps_volra_reg); vc->cfg_channel |= vc_cfg_bits->rav; if (vc->cmd_reg_addr) { voltdm->rmw(vc->smps_cmdra_mask, vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask), vc->smps_cmdra_reg); vc->cfg_channel |= vc_cfg_bits->rac; } if (vc->cmd_reg_addr == vc->volt_reg_addr) vc->cfg_channel |= vc_cfg_bits->racen; /* Set up the on, inactive, retention and off voltage */ on_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->on); onlp_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->onlp); ret_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->ret); off_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->off); val = ((on_vsel << vc->common->cmd_on_shift) | (onlp_vsel << vc->common->cmd_onlp_shift) | (ret_vsel << vc->common->cmd_ret_shift) | (off_vsel << vc->common->cmd_off_shift)); voltdm->write(val, vc->cmdval_reg); vc->cfg_channel |= vc_cfg_bits->cmd; /* Channel configuration */ omap_vc_config_channel(voltdm); omap_vc_i2c_init(voltdm); if (cpu_is_omap34xx()) omap3_vc_init_channel(voltdm); else if (cpu_is_omap44xx()) omap4_vc_init_channel(voltdm); }
linux-master
arch/arm/mach-omap2/vc.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap4-restart.c - Common to OMAP4 and OMAP5 */ #include <linux/types.h> #include <linux/reboot.h> #include "common.h" #include "prm.h" /** * omap44xx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ void omap44xx_restart(enum reboot_mode mode, const char *cmd) { /* XXX Should save 'cmd' into scratchpad for use after reboot */ omap_prm_reset_system(); }
linux-master
arch/arm/mach-omap2/omap4-restart.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 Voltage Processor (VP) data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/io.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "voltage.h" #include "vp.h" static const struct omap_vp_ops omap4_vp_ops = { .check_txdone = omap_prm_vp_check_txdone, .clear_txdone = omap_prm_vp_clear_txdone, }; /* * VP data common to 44xx chips * XXX This stuff presumably belongs in the vp44xx.c or vp.c file. */ static const struct omap_vp_common omap4_vp_common = { .vpconfig_erroroffset_mask = OMAP4430_ERROROFFSET_MASK, .vpconfig_errorgain_mask = OMAP4430_ERRORGAIN_MASK, .vpconfig_initvoltage_mask = OMAP4430_INITVOLTAGE_MASK, .vpconfig_timeouten = OMAP4430_TIMEOUTEN_MASK, .vpconfig_initvdd = OMAP4430_INITVDD_MASK, .vpconfig_forceupdate = OMAP4430_FORCEUPDATE_MASK, .vpconfig_vpenable = OMAP4430_VPENABLE_MASK, .vstepmin_smpswaittimemin_shift = OMAP4430_SMPSWAITTIMEMIN_SHIFT, .vstepmax_smpswaittimemax_shift = OMAP4430_SMPSWAITTIMEMAX_SHIFT, .vstepmin_stepmin_shift = OMAP4430_VSTEPMIN_SHIFT, .vstepmax_stepmax_shift = OMAP4430_VSTEPMAX_SHIFT, .vlimitto_vddmin_shift = OMAP4430_VDDMIN_SHIFT, .vlimitto_vddmax_shift = OMAP4430_VDDMAX_SHIFT, .vlimitto_timeout_shift = OMAP4430_TIMEOUT_SHIFT, .vpvoltage_mask = OMAP4430_VPVOLTAGE_MASK, .ops = &omap4_vp_ops, }; struct omap_vp_instance omap4_vp_mpu = { .id = OMAP4_VP_VDD_MPU_ID, .common = &omap4_vp_common, .vpconfig = OMAP4_PRM_VP_MPU_CONFIG_OFFSET, .vstepmin = OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET, .vstepmax = OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET, .vlimitto = OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET, .vstatus = OMAP4_PRM_VP_MPU_STATUS_OFFSET, .voltage = OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET, }; struct omap_vp_instance omap4_vp_iva = { .id = OMAP4_VP_VDD_IVA_ID, .common = &omap4_vp_common, .vpconfig = OMAP4_PRM_VP_IVA_CONFIG_OFFSET, .vstepmin = OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET, .vstepmax = OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET, .vlimitto = OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET, .vstatus = OMAP4_PRM_VP_IVA_STATUS_OFFSET, .voltage = OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET, }; struct omap_vp_instance omap4_vp_core = { .id = OMAP4_VP_VDD_CORE_ID, .common = &omap4_vp_common, .vpconfig = OMAP4_PRM_VP_CORE_CONFIG_OFFSET, .vstepmin = OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET, .vstepmax = OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET, .vlimitto = OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET, .vstatus = OMAP4_PRM_VP_CORE_STATUS_OFFSET, .voltage = OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET, }; struct omap_vp_param omap4_mpu_vp_data = { .vddmin = OMAP4_VP_MPU_VLIMITTO_VDDMIN, .vddmax = OMAP4_VP_MPU_VLIMITTO_VDDMAX, }; struct omap_vp_param omap4_iva_vp_data = { .vddmin = OMAP4_VP_IVA_VLIMITTO_VDDMIN, .vddmax = OMAP4_VP_IVA_VLIMITTO_VDDMAX, }; struct omap_vp_param omap4_core_vp_data = { .vddmin = OMAP4_VP_CORE_VLIMITTO_VDDMIN, .vddmax = OMAP4_VP_CORE_VLIMITTO_VDDMAX, };
linux-master
arch/arm/mach-omap2/vp44xx_data.c
// SPDX-License-Identifier: GPL-2.0 /* * OMAP2420 clockdomains * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup dependencies * for OMAP2420 chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs must have a dep_bit assigned. So * wkdep_srcs are really just software-controllable dependencies. * Non-software-controllable dependencies do exist, but they are not * encoded below (yet). * * 24xx does not support programmable sleep dependencies (SLEEPDEP) * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "soc.h" #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "prm-regbits-24xx.h" /* * Clockdomain dependencies for wkdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* Wakeup dependency source arrays */ /* 2420-specific possible wakeup dependencies */ /* 2420 PM_WKDEP_MPU: CORE, DSP, WKUP */ static struct clkdm_dep mpu_2420_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 2420 PM_WKDEP_CORE: DSP, GFX, MPU, WKUP */ static struct clkdm_dep core_2420_wkdeps[] = { { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "gfx_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* * 2420-only clockdomains */ static struct clockdomain mpu_2420_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .wkdep_srcs = mpu_2420_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK, }; static struct clockdomain iva1_2420_clkdm = { .name = "iva1_clkdm", .pwrdm = { .name = "dsp_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT, .wkdep_srcs = dsp_24xx_wkdeps, .clktrctrl_mask = OMAP2420_AUTOSTATE_IVA_MASK, }; static struct clockdomain dsp_2420_clkdm = { .name = "dsp_clkdm", .pwrdm = { .name = "dsp_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK, }; static struct clockdomain gfx_2420_clkdm = { .name = "gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_24xx_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK, }; static struct clockdomain core_l3_2420_clkdm = { .name = "core_l3_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .wkdep_srcs = core_2420_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK, }; static struct clockdomain core_l4_2420_clkdm = { .name = "core_l4_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .wkdep_srcs = core_2420_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK, }; static struct clockdomain dss_2420_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK, }; static struct clockdomain *clockdomains_omap242x[] __initdata = { &wkup_common_clkdm, &mpu_2420_clkdm, &iva1_2420_clkdm, &dsp_2420_clkdm, &gfx_2420_clkdm, &core_l3_2420_clkdm, &core_l4_2420_clkdm, &dss_2420_clkdm, NULL, }; void __init omap242x_clockdomains_init(void) { if (!cpu_is_omap242x()) return; clkdm_register_platform_funcs(&omap2_clkdm_operations); clkdm_register_clkdms(clockdomains_omap242x); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains2420_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/cpuidle34xx.c * * OMAP3 CPU IDLE Routines * * Copyright (C) 2008 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * * Copyright (C) 2007 Texas Instruments, Inc. * Karthik Dasu <[email protected]> * * Copyright (C) 2006 Nokia Corporation * Tony Lindgren <[email protected]> * * Copyright (C) 2005 Texas Instruments, Inc. * Richard Woodruff <[email protected]> * * Based on pm.c for omap2 */ #include <linux/sched.h> #include <linux/cpuidle.h> #include <linux/export.h> #include <linux/cpu_pm.h> #include <asm/cpuidle.h> #include "powerdomain.h" #include "clockdomain.h" #include "pm.h" #include "control.h" #include "common.h" #include "soc.h" /* Mach specific information to be recorded in the C-state driver_data */ struct omap3_idle_statedata { u8 mpu_state; u8 core_state; u8 per_min_state; u8 flags; }; static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd; /* * Possible flag bits for struct omap3_idle_statedata.flags: * * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go * inactive. This in turn prevents the MPU DPLL from entering autoidle * mode, so wakeup latency is greatly reduced, at the cost of additional * energy consumption. This also prevents the CORE clockdomain from * entering idle. */ #define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE BIT(0) /* * Prevent PER OFF if CORE is not in RETention or OFF as this would * disable PER wakeups completely. */ static struct omap3_idle_statedata omap3_idle_data[] = { { .mpu_state = PWRDM_POWER_ON, .core_state = PWRDM_POWER_ON, /* In C1 do not allow PER state lower than CORE state */ .per_min_state = PWRDM_POWER_ON, .flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE, }, { .mpu_state = PWRDM_POWER_ON, .core_state = PWRDM_POWER_ON, .per_min_state = PWRDM_POWER_RET, }, { .mpu_state = PWRDM_POWER_RET, .core_state = PWRDM_POWER_ON, .per_min_state = PWRDM_POWER_RET, }, { .mpu_state = PWRDM_POWER_OFF, .core_state = PWRDM_POWER_ON, .per_min_state = PWRDM_POWER_RET, }, { .mpu_state = PWRDM_POWER_RET, .core_state = PWRDM_POWER_RET, .per_min_state = PWRDM_POWER_OFF, }, { .mpu_state = PWRDM_POWER_OFF, .core_state = PWRDM_POWER_RET, .per_min_state = PWRDM_POWER_OFF, }, { .mpu_state = PWRDM_POWER_OFF, .core_state = PWRDM_POWER_OFF, .per_min_state = PWRDM_POWER_OFF, }, }; /** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @drv: cpuidle driver * @index: the index of state to be entered */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; int error; if (omap_irq_pending() || need_resched()) goto return_sleep_time; /* Deny idle for C1 */ if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) { clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]); } else { pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state); pwrdm_set_next_pwrst(core_pd, cx->core_state); } /* * Call idle CPU PM enter notifier chain so that * VFP context is saved. */ if (cx->mpu_state == PWRDM_POWER_OFF) { error = cpu_pm_enter(); if (error) goto out_clkdm_set; } /* Execute ARM wfi */ omap_sram_idle(true); /* * Call idle CPU PM enter notifier chain to restore * VFP context. */ if (cx->mpu_state == PWRDM_POWER_OFF && pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) cpu_pm_exit(); out_clkdm_set: /* Re-allow idle for C1 */ if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); return_sleep_time: return index; } /** * next_valid_state - Find next valid C-state * @dev: cpuidle device * @drv: cpuidle driver * @index: Index of currently selected c-state * * If the state corresponding to index is valid, index is returned back * to the caller. Else, this function searches for a lower c-state which is * still valid (as defined in omap3_power_states[]) and returns its index. * * A state is valid if the 'valid' field is enabled and * if it satisfies the enable_off_mode condition. */ static int next_valid_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; u32 mpu_deepest_state = PWRDM_POWER_RET; u32 core_deepest_state = PWRDM_POWER_RET; int idx; int next_index = 0; /* C1 is the default value */ if (enable_off_mode) { mpu_deepest_state = PWRDM_POWER_OFF; /* * Erratum i583: valable for ES rev < Es1.2 on 3630. * CORE OFF mode is not supported in a stable form, restrict * instead the CORE state to RET. */ if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) core_deepest_state = PWRDM_POWER_OFF; } /* Check if current state is valid */ if ((cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) return index; /* * Drop to next valid state. * Start search from the next (lower) state. */ for (idx = index - 1; idx >= 0; idx--) { cx = &omap3_idle_data[idx]; if ((cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { next_index = idx; break; } } return next_index; } /** * omap3_enter_idle_bm - Checks for any bus activity * @dev: cpuidle device * @drv: cpuidle driver * @index: array index of target state to be programmed * * This function checks for any pending activity and then programs * the device to the specified or a safer state. */ static int omap3_enter_idle_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int new_state_idx, ret; u8 per_next_state, per_saved_state; struct omap3_idle_statedata *cx; /* * Use only C1 if CAM is active. * CAM does not have wakeup capability in OMAP3. */ if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON) new_state_idx = drv->safe_state_index; else new_state_idx = next_valid_state(dev, drv, index); /* * FIXME: we currently manage device-specific idle states * for PER and CORE in combination with CPU-specific * idle states. This is wrong, and device-specific * idle management needs to be separated out into * its own code. */ /* Program PER state */ cx = &omap3_idle_data[new_state_idx]; per_next_state = pwrdm_read_next_pwrst(per_pd); per_saved_state = per_next_state; if (per_next_state < cx->per_min_state) { per_next_state = cx->per_min_state; pwrdm_set_next_pwrst(per_pd, per_next_state); } ret = omap3_enter_idle(dev, drv, new_state_idx); /* Restore original PER state if it was modified */ if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_saved_state); return ret; } static struct cpuidle_driver omap3_idle_driver = { .name = "omap3_idle", .owner = THIS_MODULE, .states = { { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 2 + 2, .target_residency = 5, .name = "C1", .desc = "MPU ON + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 10 + 10, .target_residency = 30, .name = "C2", .desc = "MPU ON + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 50 + 50, .target_residency = 300, .name = "C3", .desc = "MPU RET + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 1500 + 1800, .target_residency = 4000, .name = "C4", .desc = "MPU OFF + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 2500 + 7500, .target_residency = 12000, .name = "C5", .desc = "MPU RET + CORE RET", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 3000 + 8500, .target_residency = 15000, .name = "C6", .desc = "MPU OFF + CORE RET", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 10000 + 30000, .target_residency = 30000, .name = "C7", .desc = "MPU OFF + CORE OFF", }, }, .state_count = ARRAY_SIZE(omap3_idle_data), .safe_state_index = 0, }; /* * Numbers based on measurements made in October 2009 for PM optimized kernel * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP, * and worst case latencies). */ static struct cpuidle_driver omap3430_idle_driver = { .name = "omap3430_idle", .owner = THIS_MODULE, .states = { { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 110 + 162, .target_residency = 5, .name = "C1", .desc = "MPU ON + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 106 + 180, .target_residency = 309, .name = "C2", .desc = "MPU ON + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 107 + 410, .target_residency = 46057, .name = "C3", .desc = "MPU RET + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 121 + 3374, .target_residency = 46057, .name = "C4", .desc = "MPU OFF + CORE ON", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 855 + 1146, .target_residency = 46057, .name = "C5", .desc = "MPU RET + CORE RET", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 7580 + 4134, .target_residency = 484329, .name = "C6", .desc = "MPU OFF + CORE RET", }, { .flags = CPUIDLE_FLAG_RCU_IDLE, .enter = omap3_enter_idle_bm, .exit_latency = 7505 + 15274, .target_residency = 484329, .name = "C7", .desc = "MPU OFF + CORE OFF", }, }, .state_count = ARRAY_SIZE(omap3_idle_data), .safe_state_index = 0, }; /* Public functions */ /** * omap3_idle_init - Init routine for OMAP3 idle * * Registers the OMAP3 specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init omap3_idle_init(void) { mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); if (!mpu_pd || !core_pd || !per_pd || !cam_pd) return -ENODEV; if (cpu_is_omap3430()) return cpuidle_register(&omap3430_idle_driver, NULL); else return cpuidle_register(&omap3_idle_driver, NULL); }
linux-master
arch/arm/mach-omap2/cpuidle34xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI81XX Clock Domain data. * * Copyright (C) 2010 Texas Instruments, Inc. - https://www.ti.com/ * Copyright (C) 2013 SKTB SKiT, http://www.skitlab.ru/ */ #ifndef __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS_81XX_H #define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS_81XX_H #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm81xx.h" /* * Note that 814x seems to have HWSUP_SWSUP for many clockdomains * while 816x does not. According to the TRM, 816x only has HWSUP * for ALWON_L3_FAST. Also note that the TI tree clockdomains81xx.h * seems to have the related ifdef the wrong way around claiming * 816x supports HWSUP while 814x does not. For now, we only set * HWSUP for ALWON_L3_FAST as that seems to be supported for both * dm814x and dm816x. */ /* Common for 81xx */ static struct clockdomain alwon_l3_slow_81xx_clkdm = { .name = "alwon_l3s_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_ALWON_L3_SLOW_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain alwon_l3_med_81xx_clkdm = { .name = "alwon_l3_med_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_ALWON_L3_MED_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain alwon_l3_fast_81xx_clkdm = { .name = "alwon_l3_fast_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_ALWON_L3_FAST_CLKDM, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain alwon_ethernet_81xx_clkdm = { .name = "alwon_ethernet_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_ETHERNET_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mmu_81xx_clkdm = { .name = "mmu_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_MMU_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mmu_cfg_81xx_clkdm = { .name = "mmu_cfg_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_MMUCFG_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain default_l3_slow_81xx_clkdm = { .name = "default_l3_slow_clkdm", .pwrdm = { .name = "default_pwrdm" }, .cm_inst = TI81XX_CM_DEFAULT_MOD, .clkdm_offs = TI816X_CM_DEFAULT_L3_SLOW_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain default_sata_81xx_clkdm = { .name = "default_clkdm", .pwrdm = { .name = "default_pwrdm" }, .cm_inst = TI81XX_CM_DEFAULT_MOD, .clkdm_offs = TI816X_CM_DEFAULT_SATA_CLKDM, .flags = CLKDM_CAN_SWSUP, }; /* 816x only */ static struct clockdomain alwon_mpu_816x_clkdm = { .name = "alwon_mpu_clkdm", .pwrdm = { .name = "alwon_pwrdm" }, .cm_inst = TI81XX_CM_ALWON_MOD, .clkdm_offs = TI81XX_CM_ALWON_MPU_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain active_gem_816x_clkdm = { .name = "active_gem_clkdm", .pwrdm = { .name = "active_pwrdm" }, .cm_inst = TI81XX_CM_ACTIVE_MOD, .clkdm_offs = TI816X_CM_ACTIVE_GEM_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ivahd0_816x_clkdm = { .name = "ivahd0_clkdm", .pwrdm = { .name = "ivahd0_pwrdm" }, .cm_inst = TI816X_CM_IVAHD0_MOD, .clkdm_offs = TI816X_CM_IVAHD0_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ivahd1_816x_clkdm = { .name = "ivahd1_clkdm", .pwrdm = { .name = "ivahd1_pwrdm" }, .cm_inst = TI816X_CM_IVAHD1_MOD, .clkdm_offs = TI816X_CM_IVAHD1_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ivahd2_816x_clkdm = { .name = "ivahd2_clkdm", .pwrdm = { .name = "ivahd2_pwrdm" }, .cm_inst = TI816X_CM_IVAHD2_MOD, .clkdm_offs = TI816X_CM_IVAHD2_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain sgx_816x_clkdm = { .name = "sgx_clkdm", .pwrdm = { .name = "sgx_pwrdm" }, .cm_inst = TI81XX_CM_SGX_MOD, .clkdm_offs = TI816X_CM_SGX_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain default_l3_med_816x_clkdm = { .name = "default_l3_med_clkdm", .pwrdm = { .name = "default_pwrdm" }, .cm_inst = TI81XX_CM_DEFAULT_MOD, .clkdm_offs = TI816X_CM_DEFAULT_L3_MED_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain default_ducati_816x_clkdm = { .name = "default_ducati_clkdm", .pwrdm = { .name = "default_pwrdm" }, .cm_inst = TI81XX_CM_DEFAULT_MOD, .clkdm_offs = TI816X_CM_DEFAULT_DUCATI_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain default_pci_816x_clkdm = { .name = "default_pci_clkdm", .pwrdm = { .name = "default_pwrdm" }, .cm_inst = TI81XX_CM_DEFAULT_MOD, .clkdm_offs = TI816X_CM_DEFAULT_PCI_CLKDM, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain *clockdomains_ti814x[] __initdata = { &alwon_l3_slow_81xx_clkdm, &alwon_l3_med_81xx_clkdm, &alwon_l3_fast_81xx_clkdm, &alwon_ethernet_81xx_clkdm, &mmu_81xx_clkdm, &mmu_cfg_81xx_clkdm, &default_l3_slow_81xx_clkdm, &default_sata_81xx_clkdm, NULL, }; void __init ti814x_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); clkdm_register_clkdms(clockdomains_ti814x); clkdm_complete_init(); } static struct clockdomain *clockdomains_ti816x[] __initdata = { &alwon_mpu_816x_clkdm, &alwon_l3_slow_81xx_clkdm, &alwon_l3_med_81xx_clkdm, &alwon_l3_fast_81xx_clkdm, &alwon_ethernet_81xx_clkdm, &mmu_81xx_clkdm, &mmu_cfg_81xx_clkdm, &active_gem_816x_clkdm, &ivahd0_816x_clkdm, &ivahd1_816x_clkdm, &ivahd2_816x_clkdm, &sgx_816x_clkdm, &default_l3_med_816x_clkdm, &default_ducati_816x_clkdm, &default_pci_816x_clkdm, &default_l3_slow_81xx_clkdm, &default_sata_81xx_clkdm, NULL, }; void __init ti816x_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); clkdm_register_clkdms(clockdomains_ti816x); clkdm_complete_init(); } #endif
linux-master
arch/arm/mach-omap2/clockdomains81xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 Voltage Processor (VP) data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/io.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm-regbits-34xx.h" #include "voltage.h" #include "vp.h" #include "prm2xxx_3xxx.h" static const struct omap_vp_ops omap3_vp_ops = { .check_txdone = omap_prm_vp_check_txdone, .clear_txdone = omap_prm_vp_clear_txdone, }; /* * VP data common to 34xx/36xx chips * XXX This stuff presumably belongs in the vp3xxx.c or vp.c file. */ static const struct omap_vp_common omap3_vp_common = { .vpconfig_erroroffset_mask = OMAP3430_ERROROFFSET_MASK, .vpconfig_errorgain_mask = OMAP3430_ERRORGAIN_MASK, .vpconfig_initvoltage_mask = OMAP3430_INITVOLTAGE_MASK, .vpconfig_timeouten = OMAP3430_TIMEOUTEN_MASK, .vpconfig_initvdd = OMAP3430_INITVDD_MASK, .vpconfig_forceupdate = OMAP3430_FORCEUPDATE_MASK, .vpconfig_vpenable = OMAP3430_VPENABLE_MASK, .vstepmin_smpswaittimemin_shift = OMAP3430_SMPSWAITTIMEMIN_SHIFT, .vstepmax_smpswaittimemax_shift = OMAP3430_SMPSWAITTIMEMAX_SHIFT, .vstepmin_stepmin_shift = OMAP3430_VSTEPMIN_SHIFT, .vstepmax_stepmax_shift = OMAP3430_VSTEPMAX_SHIFT, .vlimitto_vddmin_shift = OMAP3430_VDDMIN_SHIFT, .vlimitto_vddmax_shift = OMAP3430_VDDMAX_SHIFT, .vlimitto_timeout_shift = OMAP3430_TIMEOUT_SHIFT, .vpvoltage_mask = OMAP3430_VPVOLTAGE_MASK, .ops = &omap3_vp_ops, }; struct omap_vp_instance omap3_vp_mpu = { .id = OMAP3_VP_VDD_MPU_ID, .common = &omap3_vp_common, .vpconfig = OMAP3_PRM_VP1_CONFIG_OFFSET, .vstepmin = OMAP3_PRM_VP1_VSTEPMIN_OFFSET, .vstepmax = OMAP3_PRM_VP1_VSTEPMAX_OFFSET, .vlimitto = OMAP3_PRM_VP1_VLIMITTO_OFFSET, .vstatus = OMAP3_PRM_VP1_STATUS_OFFSET, .voltage = OMAP3_PRM_VP1_VOLTAGE_OFFSET, }; struct omap_vp_instance omap3_vp_core = { .id = OMAP3_VP_VDD_CORE_ID, .common = &omap3_vp_common, .vpconfig = OMAP3_PRM_VP2_CONFIG_OFFSET, .vstepmin = OMAP3_PRM_VP2_VSTEPMIN_OFFSET, .vstepmax = OMAP3_PRM_VP2_VSTEPMAX_OFFSET, .vlimitto = OMAP3_PRM_VP2_VLIMITTO_OFFSET, .vstatus = OMAP3_PRM_VP2_STATUS_OFFSET, .voltage = OMAP3_PRM_VP2_VOLTAGE_OFFSET, }; struct omap_vp_param omap3_mpu_vp_data = { .vddmin = OMAP3430_VP1_VLIMITTO_VDDMIN, .vddmax = OMAP3430_VP1_VLIMITTO_VDDMAX, }; struct omap_vp_param omap3_core_vp_data = { .vddmin = OMAP3430_VP2_VLIMITTO_VDDMIN, .vddmax = OMAP3430_VP2_VLIMITTO_VDDMAX, };
linux-master
arch/arm/mach-omap2/vp3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMS/SDRC (SDRAM controller) common code for OMAP2/3 * * Copyright (C) 2005, 2008 Texas Instruments Inc. * Copyright (C) 2005, 2008 Nokia Corporation * * Tony Lindgren <[email protected]> * Paul Walmsley * Richard Woodruff <[email protected]> */ #undef DEBUG #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include "common.h" #include "clock.h" #include "sdrc.h" static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1; void __iomem *omap2_sdrc_base; void __iomem *omap2_sms_base; struct omap2_sms_regs { u32 sms_sysconfig; }; static struct omap2_sms_regs sms_context; /* SDRC_POWER register bits */ #define SDRC_POWER_EXTCLKDIS_SHIFT 3 #define SDRC_POWER_PWDENA_SHIFT 2 #define SDRC_POWER_PAGEPOLICY_SHIFT 0 /** * omap2_sms_save_context - Save SMS registers * * Save SMS registers that need to be restored after off mode. */ static void omap2_sms_save_context(void) { sms_context.sms_sysconfig = sms_read_reg(SMS_SYSCONFIG); } /** * omap2_sms_restore_context - Restore SMS registers * * Restore SMS registers that need to be Restored after off mode. */ void omap2_sms_restore_context(void) { sms_write_reg(sms_context.sms_sysconfig, SMS_SYSCONFIG); } void __init omap2_set_globals_sdrc(void __iomem *sdrc, void __iomem *sms) { omap2_sdrc_base = sdrc; omap2_sms_base = sms; } /** * omap2_sdrc_init - initialize SMS, SDRC devices on boot * @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params * Support for 2 chip selects timings * * Turn on smart idle modes for SDRAM scheduler and controller. * Program a known-good configuration for the SDRC to deal with buggy * bootloaders. */ void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1) { u32 l; l = sms_read_reg(SMS_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sms_write_reg(l, SMS_SYSCONFIG); l = sdrc_read_reg(SDRC_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sdrc_write_reg(l, SDRC_SYSCONFIG); sdrc_init_params_cs0 = sdrc_cs0; sdrc_init_params_cs1 = sdrc_cs1; /* XXX Enable SRFRONIDLEREQ here also? */ /* * PWDENA should not be set due to 34xx erratum 1.150 - PWDENA * can cause random memory corruption */ l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | (1 << SDRC_POWER_PAGEPOLICY_SHIFT); sdrc_write_reg(l, SDRC_POWER); omap2_sms_save_context(); }
linux-master
arch/arm/mach-omap2/sdrc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Helper module for board specific I2C bus registration * * Copyright (C) 2009 Nokia Corporation. */ #include "soc.h" #include "omap_hwmod.h" #include "omap_device.h" #include "prm.h" #include "common.h" #include "i2c.h" /* In register I2C_CON, Bit 15 is the I2C enable bit */ #define I2C_EN BIT(15) #define OMAP2_I2C_CON_OFFSET 0x24 #define OMAP4_I2C_CON_OFFSET 0xA4 #define MAX_OMAP_I2C_HWMOD_NAME_LEN 16 /** * omap_i2c_reset - reset the omap i2c module. * @oh: struct omap_hwmod * * * The i2c moudle in omap2, omap3 had a special sequence to reset. The * sequence is: * - Disable the I2C. * - Write to SOFTRESET bit. * - Enable the I2C. * - Poll on the RESETDONE bit. * The sequence is implemented in below function. This is called for 2420, * 2430 and omap3. */ int omap_i2c_reset(struct omap_hwmod *oh) { u32 v; u16 i2c_con; int c = 0; if (soc_is_omap24xx() || soc_is_omap34xx() || soc_is_am35xx()) i2c_con = OMAP2_I2C_CON_OFFSET; else i2c_con = OMAP4_I2C_CON_OFFSET; /* Disable I2C */ v = omap_hwmod_read(oh, i2c_con); v &= ~I2C_EN; omap_hwmod_write(v, oh, i2c_con); /* Write to the SOFTRESET bit */ omap_hwmod_softreset(oh); /* Enable I2C */ v = omap_hwmod_read(oh, i2c_con); v |= I2C_EN; omap_hwmod_write(v, oh, i2c_con); /* Poll on RESETDONE bit */ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warn("%s: %s: softreset failed (waited %d usec)\n", __func__, oh->name, MAX_MODULE_SOFTRESET_WAIT); else pr_debug("%s: %s: softreset in %d usec\n", __func__, oh->name, c); return 0; }
linux-master
arch/arm/mach-omap2/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 OPP table definitions. * * Copyright (C) 2009-2010 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * Kevin Hilman * Copyright (C) 2010-2011 Nokia Corporation. * Eduardo Valentin * Paul Walmsley */ #include <linux/module.h> #include "soc.h" #include "control.h" #include "omap_opp_data.h" #include "pm.h" /* 34xx */ /* VDD1 */ #define OMAP3430_VDD_MPU_OPP1_UV 975000 #define OMAP3430_VDD_MPU_OPP2_UV 1075000 #define OMAP3430_VDD_MPU_OPP3_UV 1200000 #define OMAP3430_VDD_MPU_OPP4_UV 1270000 #define OMAP3430_VDD_MPU_OPP5_UV 1350000 struct omap_volt_data omap34xx_vddmpu_volt_data[] = { VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD1, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD1, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD1, 0xf9, 0x18), VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP4_UV, OMAP343X_CONTROL_FUSE_OPP4_VDD1, 0xf9, 0x18), VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP5_UV, OMAP343X_CONTROL_FUSE_OPP5_VDD1, 0xf9, 0x18), VOLT_DATA_DEFINE(0, 0, 0, 0), }; /* VDD2 */ #define OMAP3430_VDD_CORE_OPP1_UV 975000 #define OMAP3430_VDD_CORE_OPP2_UV 1050000 #define OMAP3430_VDD_CORE_OPP3_UV 1150000 struct omap_volt_data omap34xx_vddcore_volt_data[] = { VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD2, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD2, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD2, 0xf9, 0x18), VOLT_DATA_DEFINE(0, 0, 0, 0), }; /* 36xx */ /* VDD1 */ #define OMAP3630_VDD_MPU_OPP50_UV 1012500 #define OMAP3630_VDD_MPU_OPP100_UV 1200000 #define OMAP3630_VDD_MPU_OPP120_UV 1325000 #define OMAP3630_VDD_MPU_OPP1G_UV 1375000 struct omap_volt_data omap36xx_vddmpu_volt_data[] = { VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD1, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD1, 0xf9, 0x16), VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP120_UV, OMAP3630_CONTROL_FUSE_OPP120_VDD1, 0xfa, 0x23), VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP1G_UV, OMAP3630_CONTROL_FUSE_OPP1G_VDD1, 0xfa, 0x27), VOLT_DATA_DEFINE(0, 0, 0, 0), }; /* VDD2 */ #define OMAP3630_VDD_CORE_OPP50_UV 1000000 #define OMAP3630_VDD_CORE_OPP100_UV 1200000 struct omap_volt_data omap36xx_vddcore_volt_data[] = { VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD2, 0xf4, 0x0c), VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD2, 0xf9, 0x16), VOLT_DATA_DEFINE(0, 0, 0, 0), };
linux-master
arch/arm/mach-omap2/opp3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 specific common source file. * * Copyright (C) 2010 Texas Instruments, Inc. * Author: * Santosh Shilimkar <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/export.h> #include <linux/irqchip/arm-gic.h> #include <linux/of_address.h> #include <linux/reboot.h> #include <linux/genalloc.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> #include <asm/memblock.h> #include <asm/smp_twd.h> #include "omap-wakeupgen.h" #include "soc.h" #include "iomap.h" #include "common.h" #include "prminst44xx.h" #include "prcm_mpu44xx.h" #include "omap4-sar-layout.h" #include "omap-secure.h" #include "sram.h" #ifdef CONFIG_CACHE_L2X0 static void __iomem *l2cache_base; #endif static void __iomem *sar_ram_base; static void __iomem *gic_dist_base_addr; static void __iomem *twd_base; #define IRQ_LOCALTIMER 29 #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER /* Used to implement memory barrier on DRAM path */ #define OMAP4_DRAM_BARRIER_VA 0xfe600000 static void __iomem *dram_sync, *sram_sync; static phys_addr_t dram_sync_paddr; static u32 dram_sync_size; /* * The OMAP4 bus structure contains asynchronous bridges which can buffer * data writes from the MPU. These asynchronous bridges can be found on * paths between the MPU to EMIF, and the MPU to L3 interconnects. * * We need to be careful about re-ordering which can happen as a result * of different accesses being performed via different paths, and * therefore different asynchronous bridges. */ /* * OMAP4 interconnect barrier which is called for each mb() and wmb(). * This is to ensure that normal paths to DRAM (normal memory, cacheable * accesses) are properly synchronised with writes to DMA coherent memory * (normal memory, uncacheable) and device writes. * * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF * path, as we need to ensure that data is visible to other system * masters prior to writes to those system masters being seen. * * Note: the SRAM path is not synchronised via mb() and wmb(). */ static void omap4_mb(void) { if (dram_sync) writel_relaxed(0, dram_sync); } /* * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI. * * If a data is stalled inside asynchronous bridge because of back * pressure, it may be accepted multiple times, creating pointer * misalignment that will corrupt next transfers on that data path until * next reset of the system. No recovery procedure once the issue is hit, * the path remains consistently broken. * * Async bridges can be found on paths between MPU to EMIF and MPU to L3 * interconnects. * * This situation can happen only when the idle is initiated by a Master * Request Disconnection (which is trigged by software when executing WFI * on the CPU). * * The work-around for this errata needs all the initiators connected * through an async bridge to ensure that data path is properly drained * before issuing WFI. This condition will be met if one Strongly ordered * access is performed to the target right before executing the WFI. * * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained. * IO barrier ensure that there is no synchronisation loss on initiators * operating on both interconnect port simultaneously. * * This is a stronger version of the OMAP4 memory barrier below, and * operates on both the MPU->MA->EMIF path but also the MPU->OCP path * as well, and is necessary prior to executing a WFI. */ void omap_interconnect_sync(void) { if (dram_sync && sram_sync) { writel_relaxed(readl_relaxed(dram_sync), dram_sync); writel_relaxed(readl_relaxed(sram_sync), sram_sync); isb(); } } static int __init omap4_sram_init(void) { struct device_node *np; struct gen_pool *sram_pool; if (!soc_is_omap44xx() && !soc_is_omap54xx()) return 0; np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); if (!np) pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", __func__); sram_pool = of_gen_pool_get(np, "sram", 0); if (!sram_pool) pr_warn("%s:Unable to get sram pool needed to handle errata I688\n", __func__); else sram_sync = (void __iomem *)gen_pool_alloc(sram_pool, PAGE_SIZE); of_node_put(np); return 0; } omap_arch_initcall(omap4_sram_init); /* Steal one page physical memory for barrier implementation */ void __init omap_barrier_reserve_memblock(void) { dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M); dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M); } void __init omap_barriers_init(void) { struct map_desc dram_io_desc[1]; dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr); dram_io_desc[0].length = dram_sync_size; dram_io_desc[0].type = MT_MEMORY_RW_SO; iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc)); dram_sync = (void __iomem *) dram_io_desc[0].virtual; pr_info("OMAP4: Map %pa to %p for dram barrier\n", &dram_sync_paddr, dram_sync); soc_mb = omap4_mb; } #endif void gic_dist_disable(void) { if (gic_dist_base_addr) writel_relaxed(0x0, gic_dist_base_addr + GIC_DIST_CTRL); } void gic_dist_enable(void) { if (gic_dist_base_addr) writel_relaxed(0x1, gic_dist_base_addr + GIC_DIST_CTRL); } bool gic_dist_disabled(void) { return !(readl_relaxed(gic_dist_base_addr + GIC_DIST_CTRL) & 0x1); } void gic_timer_retrigger(void) { u32 twd_int = readl_relaxed(twd_base + TWD_TIMER_INTSTAT); u32 gic_int = readl_relaxed(gic_dist_base_addr + GIC_DIST_PENDING_SET); u32 twd_ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL); if (twd_int && !(gic_int & BIT(IRQ_LOCALTIMER))) { /* * The local timer interrupt got lost while the distributor was * disabled. Ack the pending interrupt, and retrigger it. */ pr_warn("%s: lost localtimer interrupt\n", __func__); writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT); if (!(twd_ctrl & TWD_TIMER_CONTROL_PERIODIC)) { writel_relaxed(1, twd_base + TWD_TIMER_COUNTER); twd_ctrl |= TWD_TIMER_CONTROL_ENABLE; writel_relaxed(twd_ctrl, twd_base + TWD_TIMER_CONTROL); } } } #ifdef CONFIG_CACHE_L2X0 void __iomem *omap4_get_l2cache_base(void) { return l2cache_base; } void omap4_l2c310_write_sec(unsigned long val, unsigned reg) { unsigned smc_op; switch (reg) { case L2X0_CTRL: smc_op = OMAP4_MON_L2X0_CTRL_INDEX; break; case L2X0_AUX_CTRL: smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX; break; case L2X0_DEBUG_CTRL: smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX; break; case L310_PREFETCH_CTRL: smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; break; case L310_POWER_CTRL: pr_info_once("OMAP L2C310: ROM does not support power control setting\n"); return; default: WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); return; } omap_smc1(smc_op, val); } int __init omap_l2_cache_init(void) { /* Static mapping, never released */ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); if (WARN_ON(!l2cache_base)) return -ENOMEM; return 0; } #endif void __iomem *omap4_get_sar_ram_base(void) { return sar_ram_base; } /* * SAR RAM used to save and restore the HW context in low power modes. * Note that we need to initialize this very early for kexec. See * omap4_mpuss_early_init(). */ void __init omap4_sar_ram_init(void) { unsigned long sar_base; /* * To avoid code running on other OMAPs in * multi-omap builds */ if (cpu_is_omap44xx()) sar_base = OMAP44XX_SAR_RAM_BASE; else if (soc_is_omap54xx()) sar_base = OMAP54XX_SAR_RAM_BASE; else return; /* Static mapping, never released */ sar_ram_base = ioremap(sar_base, SZ_16K); if (WARN_ON(!sar_ram_base)) return; } static const struct of_device_id intc_match[] = { { .compatible = "ti,omap4-wugen-mpu", }, { .compatible = "ti,omap5-wugen-mpu", }, { }, }; static struct device_node *intc_node; void __init omap_gic_of_init(void) { struct device_node *np; intc_node = of_find_matching_node(NULL, intc_match); if (WARN_ON(!intc_node)) { pr_err("No WUGEN found in DT, system will misbehave.\n"); pr_err("UPDATE YOUR DEVICE TREE!\n"); } /* Extract GIC distributor and TWD bases for OMAP4460 ROM Errata WA */ if (!cpu_is_omap446x()) goto skip_errata_init; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); gic_dist_base_addr = of_iomap(np, 0); of_node_put(np); WARN_ON(!gic_dist_base_addr); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer"); twd_base = of_iomap(np, 0); of_node_put(np); WARN_ON(!twd_base); skip_errata_init: irqchip_init(); }
linux-master
arch/arm/mach-omap2/omap4-common.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP Secure API infrastructure. * * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar <[email protected]> * Copyright (C) 2012 Ivaylo Dimitrov <[email protected]> * Copyright (C) 2013 Pali Rohár <[email protected]> */ #include <linux/arm-smccc.h> #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/memblock.h> #include <linux/of.h> #include <asm/cacheflush.h> #include <asm/memblock.h> #include "common.h" #include "omap-secure.h" #include "soc.h" static phys_addr_t omap_secure_memblock_base; bool optee_available; #define OMAP_SIP_SMC_STD_CALL_VAL(func_num) \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ ARM_SMCCC_OWNER_SIP, (func_num)) static void __init omap_optee_init_check(void) { struct device_node *np; /* * We only check that the OP-TEE node is present and available. The * OP-TEE kernel driver is not needed for the type of interaction made * with OP-TEE here so the driver's status is not checked. */ np = of_find_node_by_path("/firmware/optee"); if (np && of_device_is_available(np)) optee_available = true; of_node_put(np); } /** * omap_sec_dispatcher: Routine to dispatch low power secure * service routines * @idx: The HAL API index * @flag: The flag indicating criticality of operation * @nargs: Number of valid arguments out of four. * @arg1, arg2, arg3 args4: Parameters passed to secure API * * Return the non-zero error value on failure. */ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { static u32 buf[NR_CPUS][5]; u32 *param; int cpu; u32 ret; cpu = get_cpu(); param = buf[cpu]; param[0] = nargs; param[1] = arg1; param[2] = arg2; param[3] = arg3; param[4] = arg4; /* * Secure API needs physical address * pointer for the parameters */ flush_cache_all(); outer_clean_range(__pa(param), __pa(param + 5)); ret = omap_smc2(idx, flag, __pa(param)); put_cpu(); return ret; } void omap_smccc_smc(u32 fn, u32 arg) { struct arm_smccc_res res; arm_smccc_smc(OMAP_SIP_SMC_STD_CALL_VAL(fn), arg, 0, 0, 0, 0, 0, 0, &res); WARN(res.a0, "Secure function call 0x%08x failed\n", fn); } void omap_smc1(u32 fn, u32 arg) { /* * If this platform has OP-TEE installed we use ARM SMC calls * otherwise fall back to the OMAP ROM style calls. */ if (optee_available) omap_smccc_smc(fn, arg); else _omap_smc1(fn, arg); } /* Allocate the memory to save secure ram */ int __init omap_secure_ram_reserve_memblock(void) { u32 size = OMAP_SECURE_RAM_STORAGE; size = ALIGN(size, SECTION_SIZE); omap_secure_memblock_base = arm_memblock_steal(size, SECTION_SIZE); return 0; } #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) u32 omap3_save_secure_ram(void *addr, int size) { static u32 param[5]; u32 ret; if (size != OMAP3_SAVE_SECURE_RAM_SZ) return OMAP3_SAVE_SECURE_RAM_SZ; param[0] = 4; /* Number of arguments */ param[1] = __pa(addr); /* Physical address for saving */ param[2] = 0; param[3] = 1; param[4] = 1; ret = save_secure_ram_context(__pa(param)); return ret; } #endif /** * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls * @idx: The PPA API index * @process: Process ID * @flag: The flag indicating criticality of operation * @nargs: Number of valid arguments out of four. * @arg1, arg2, arg3 args4: Parameters passed to secure API * * Return the non-zero error value on failure. * * NOTE: rx51_secure_dispatcher differs from omap_secure_dispatcher because * it calling omap_smc3() instead omap_smc2() and param[0] is nargs+1 */ static u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { static u32 param[5]; u32 ret; param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */ param[1] = arg1; param[2] = arg2; param[3] = arg3; param[4] = arg4; /* * Secure API needs physical address * pointer for the parameters */ local_irq_disable(); local_fiq_disable(); flush_cache_all(); outer_clean_range(__pa(param), __pa(param + 5)); ret = omap_smc3(idx, process, flag, __pa(param)); flush_cache_all(); local_fiq_enable(); local_irq_enable(); return ret; } /** * rx51_secure_update_aux_cr: Routine to modify the contents of Auxiliary Control Register * @set_bits: bits to set in ACR * @clr_bits: bits to clear in ACR * * Return the non-zero error value on failure. */ u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits) { u32 acr; /* Read ACR */ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); acr &= ~clear_bits; acr |= set_bits; return rx51_secure_dispatcher(RX51_PPA_WRITE_ACR, 0, FLAG_START_CRITICAL, 1, acr, 0, 0, 0); } /** * rx51_secure_rng_call: Routine for HW random generator */ u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag) { return rx51_secure_dispatcher(RX51_PPA_HWRNG, 0, NO_FLAG, 3, ptr, count, flag, 0); } void __init omap_secure_init(void) { omap_optee_init_check(); } /* * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return * address after MMU has been re-enabled after CPU1 has been woken up again. * Otherwise the ROM code will attempt to use the earlier physical return * address that got set with MMU off when waking up CPU1. Only used on secure * devices. */ static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_EXIT: omap_secure_dispatcher(OMAP4_PPA_SERVICE_0, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); break; default: break; } return NOTIFY_OK; } static struct notifier_block secure_notifier_block = { .notifier_call = cpu_notifier, }; static int __init secure_pm_init(void) { if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx()) return 0; cpu_pm_register_notifier(&secure_notifier_block); return 0; } omap_arch_initcall(secure_pm_init);
linux-master
arch/arm/mach-omap2/omap-secure.c
// SPDX-License-Identifier: GPL-2.0-only /* * DPLL + CORE_CLK composite clock functions * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <[email protected]> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * XXX The DPLL and CORE clocks should be split into two separate clock * types. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/clk/ti.h> #include <linux/io.h> #include "clock.h" #include "clock2xxx.h" #include "opp2xxx.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "sram.h" /* #define DOWN_VARIABLE_DPLL 1 */ /* Experimental */ /* * dpll_core_ck: pointer to the combined dpll_ck + core_ck on OMAP2xxx * (currently defined as "dpll_ck" in the OMAP2xxx clock tree). Set * during dpll_ck init and used later by omap2xxx_clk_get_core_rate(). */ static struct clk_hw_omap *dpll_core_ck; /** * omap2xxx_clk_get_core_rate - return the CORE_CLK rate * * Returns the CORE_CLK rate. CORE_CLK can have one of three rate * sources on OMAP2xxx: the DPLL CLKOUT rate, DPLL CLKOUTX2, or 32KHz * (the latter is unusual). This currently should be called with * struct clk *dpll_ck, which is a composite clock of dpll_ck and * core_ck. */ unsigned long omap2xxx_clk_get_core_rate(void) { long long core_clk; u32 v; WARN_ON(!dpll_core_ck); core_clk = omap2_get_dpll_rate(dpll_core_ck); v = omap2xxx_cm_get_core_clk_src(); if (v == CORE_CLK_SRC_32K) core_clk = 32768; else core_clk *= v; return core_clk; } /* * Uses the current prcm set to tell if a rate is valid. * You can go slower, but not faster within a given rate set. */ static long omap2_dpllcore_round_rate(unsigned long target_rate) { u32 high, low, core_clk_src; core_clk_src = omap2xxx_cm_get_core_clk_src(); if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */ high = curr_prcm_set->dpll_speed * 2; low = curr_prcm_set->dpll_speed; } else { /* DPLL clockout x 2 */ high = curr_prcm_set->dpll_speed; low = curr_prcm_set->dpll_speed / 2; } #ifdef DOWN_VARIABLE_DPLL if (target_rate > high) return high; else return target_rate; #else if (target_rate > low) return high; else return low; #endif } unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, unsigned long parent_rate) { return omap2xxx_clk_get_core_rate(); } int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); u32 cur_rate, low, mult, div, valid_rate, done_rate; u32 bypass = 0; struct prcm_config tmpset; const struct dpll_data *dd; cur_rate = omap2xxx_clk_get_core_rate(); mult = omap2xxx_cm_get_core_clk_src(); if ((rate == (cur_rate / 2)) && (mult == 2)) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1); } else if ((rate == (cur_rate * 2)) && (mult == 1)) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); } else if (rate != cur_rate) { valid_rate = omap2_dpllcore_round_rate(rate); if (valid_rate != rate) return -EINVAL; if (mult == 1) low = curr_prcm_set->dpll_speed; else low = curr_prcm_set->dpll_speed / 2; dd = clk->dpll_data; if (!dd) return -EINVAL; tmpset.cm_clksel1_pll = omap_clk_ll_ops.clk_readl(&dd->mult_div1_reg); tmpset.cm_clksel1_pll &= ~(dd->mult_mask | dd->div1_mask); div = ((curr_prcm_set->xtal_speed / 1000000) - 1); tmpset.cm_clksel2_pll = omap2xxx_cm_get_core_pll_config(); tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK; if (rate > low) { tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2; mult = ((rate / 2) / 1000000); done_rate = CORE_CLK_SRC_DPLL_X2; } else { tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL; mult = (rate / 1000000); done_rate = CORE_CLK_SRC_DPLL; } tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask)); tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask)); /* Worst case */ tmpset.base_sdrc_rfr = SDRC_RFR_CTRL_BYPASS; if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */ bypass = 1; /* For omap2xxx_sdrc_init_params() */ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); /* Force dll lock mode */ omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr, bypass); /* Errata: ret dll entry state */ omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked()); omap2xxx_sdrc_reprogram(done_rate, 0); } return 0; } /** * omap2xxx_clkt_dpllcore_init - clk init function for dpll_ck * @clk: struct clk *dpll_ck * * Store a local copy of @clk in dpll_core_ck so other code can query * the core rate without having to clk_get(), which can sleep. Must * only be called once. No return value. XXX If the clock * registration process is ever changed such that dpll_ck is no longer * statically defined, this code may need to change to increment some * kind of use count on dpll_ck. */ void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw) { WARN(dpll_core_ck, "dpll_core_ck already set - should never happen"); dpll_core_ck = to_clk_hw_omap(hw); }
linux-master
arch/arm/mach-omap2/clkt2xxx_dpllcore.c
// SPDX-License-Identifier: GPL-2.0 /* * OMAP2xxx clockdomains * * Copyright (C) 2008-2009 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup dependencies * for OMAP2xxx chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs must have a dep_bit assigned. So * wkdep_srcs are really just software-controllable dependencies. * Non-software-controllable dependencies do exist, but they are not * encoded below (yet). * * 24xx does not support programmable sleep dependencies (SLEEPDEP) * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "soc.h" #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "prm-regbits-24xx.h" /* * Clockdomain dependencies for wkdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* Wakeup dependency source arrays */ /* 2430-specific possible wakeup dependencies */ /* 2430 PM_WKDEP_CORE: DSP, GFX, MPU, WKUP, MDM */ static struct clkdm_dep core_2430_wkdeps[] = { { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "gfx_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { .clkdm_name = "mdm_clkdm" }, { NULL }, }; /* 2430 PM_WKDEP_MPU: CORE, DSP, WKUP, MDM */ static struct clkdm_dep mpu_2430_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { .clkdm_name = "mdm_clkdm" }, { NULL }, }; /* 2430 PM_WKDEP_MDM: CORE, MPU, WKUP */ static struct clkdm_dep mdm_2430_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* * 2430-only clockdomains */ static struct clockdomain mpu_2430_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = mpu_2430_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK, }; /* Another case of bit name collisions between several registers: EN_MDM */ static struct clockdomain mdm_clkdm = { .name = "mdm_clkdm", .pwrdm = { .name = "mdm_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP2430_PM_WKDEP_MPU_EN_MDM_SHIFT, .wkdep_srcs = mdm_2430_wkdeps, .clktrctrl_mask = OMAP2430_AUTOSTATE_MDM_MASK, }; static struct clockdomain dsp_2430_clkdm = { .name = "dsp_clkdm", .pwrdm = { .name = "dsp_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT, .wkdep_srcs = dsp_24xx_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK, }; static struct clockdomain gfx_2430_clkdm = { .name = "gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_24xx_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm * could cause trouble */ static struct clockdomain core_l3_2430_clkdm = { .name = "core_l3_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP24XX_EN_CORE_SHIFT, .wkdep_srcs = core_2430_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm * could cause trouble */ static struct clockdomain core_l4_2430_clkdm = { .name = "core_l4_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP24XX_EN_CORE_SHIFT, .wkdep_srcs = core_2430_wkdeps, .clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK, }; static struct clockdomain dss_2430_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK, }; static struct clockdomain *clockdomains_omap243x[] __initdata = { &wkup_common_clkdm, &mpu_2430_clkdm, &mdm_clkdm, &dsp_2430_clkdm, &gfx_2430_clkdm, &core_l3_2430_clkdm, &core_l4_2430_clkdm, &dss_2430_clkdm, NULL, }; void __init omap243x_clockdomains_init(void) { if (!cpu_is_omap243x()) return; clkdm_register_platform_funcs(&omap2_clkdm_operations); clkdm_register_clkdms(clockdomains_omap243x); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains2430_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Nokia Corporation * Author: Paul Mundt <[email protected]> * * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * Modified from the original mach-omap/omap2/board-generic.c did by Paul * to support the OMAP2+ device tree boards with an unique board file. */ #include <linux/io.h> #include <linux/irqdomain.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/mod_devicetable.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/system_info.h> #include "common.h" static const struct of_device_id omap_dt_match_table[] __initconst = { { .compatible = "simple-bus", }, { .compatible = "ti,omap-infra", }, { } }; static void __init __maybe_unused omap_generic_init(void) { pdata_quirks_init(omap_dt_match_table); omap_soc_device_init(); } /* Clocks are needed early, see drivers/clocksource for the rest */ static void __init __maybe_unused omap_init_time_of(void) { omap_clk_init(); timer_probe(); } /* Used by am437x for ARM timer in non-SMP configurations */ #if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) void tick_broadcast(const struct cpumask *mask) { } #endif #ifdef CONFIG_SOC_OMAP2420 static const char *const omap242x_boards_compat[] __initconst = { "ti,omap2420", NULL, }; DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap242x_map_io, .init_early = omap2420_init_early, .init_machine = omap_generic_init, .init_time = omap_init_time_of, .dt_compat = omap242x_boards_compat, .restart = omap2xxx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_OMAP2430 static const char *const omap243x_boards_compat[] __initconst = { "ti,omap2430", NULL, }; DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap243x_map_io, .init_early = omap2430_init_early, .init_machine = omap_generic_init, .init_time = omap_init_time_of, .dt_compat = omap243x_boards_compat, .restart = omap2xxx_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_OMAP3 /* Some boards need board name for legacy userspace in /proc/cpuinfo */ static const char *const n900_boards_compat[] __initconst = { "nokia,omap3-n900", NULL, }; /* Set system_rev from atags */ static void __init rx51_set_system_rev(const struct tag *tags) { const struct tag *tag; if (tags->hdr.tag != ATAG_CORE) return; for_each_tag(tag, tags) { if (tag->hdr.tag == ATAG_REVISION) { system_rev = tag->u.revision.rev; break; } } } /* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags, * save them while the data is still not overwritten */ static void __init rx51_reserve(void) { const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100); save_atags(tags); rx51_set_system_rev(tags); omap_reserve(); } DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board") .reserve = rx51_reserve, .map_io = omap3_map_io, .init_early = omap3430_init_early, .init_machine = omap_generic_init, .init_late = omap3_init_late, .init_time = omap_init_time_of, .dt_compat = n900_boards_compat, .restart = omap3xxx_restart, MACHINE_END /* Generic omap3 boards, most boards can use these */ static const char *const omap3_boards_compat[] __initconst = { "ti,omap3430", "ti,omap3", NULL, }; DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3430_init_early, .init_machine = omap_generic_init, .init_late = omap3_init_late, .init_time = omap_init_time_of, .dt_compat = omap3_boards_compat, .restart = omap3xxx_restart, MACHINE_END static const char *const omap36xx_boards_compat[] __initconst = { "ti,omap3630", "ti,omap36xx", NULL, }; DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3630_init_early, .init_machine = omap_generic_init, .init_late = omap3_init_late, .init_time = omap_init_time_of, .dt_compat = omap36xx_boards_compat, .restart = omap3xxx_restart, MACHINE_END static const char *const omap3_gp_boards_compat[] __initconst = { "ti,omap3-beagle", "timll,omap3-devkit8000", NULL, }; DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3430_init_early, .init_machine = omap_generic_init, .init_late = omap3_init_late, .init_time = omap_init_time_of, .dt_compat = omap3_gp_boards_compat, .restart = omap3xxx_restart, MACHINE_END static const char *const am3517_boards_compat[] __initconst = { "ti,am3517", NULL, }; DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = am35xx_init_early, .init_machine = omap_generic_init, .init_late = omap3_init_late, .init_time = omap_init_time_of, .dt_compat = am3517_boards_compat, .restart = omap3xxx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_TI81XX static const char *const ti814x_boards_compat[] __initconst = { "ti,dm8148", "ti,dm814", NULL, }; DT_MACHINE_START(TI814X_DT, "Generic ti814x (Flattened Device Tree)") .reserve = omap_reserve, .map_io = ti81xx_map_io, .init_early = ti814x_init_early, .init_machine = omap_generic_init, .init_late = ti81xx_init_late, .init_time = omap_init_time_of, .dt_compat = ti814x_boards_compat, .restart = ti81xx_restart, MACHINE_END static const char *const ti816x_boards_compat[] __initconst = { "ti,dm8168", "ti,dm816", NULL, }; DT_MACHINE_START(TI816X_DT, "Generic ti816x (Flattened Device Tree)") .reserve = omap_reserve, .map_io = ti81xx_map_io, .init_early = ti816x_init_early, .init_machine = omap_generic_init, .init_late = ti81xx_init_late, .init_time = omap_init_time_of, .dt_compat = ti816x_boards_compat, .restart = ti81xx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_AM33XX static const char *const am33xx_boards_compat[] __initconst = { "ti,am33xx", NULL, }; DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)") .reserve = omap_reserve, .map_io = am33xx_map_io, .init_early = am33xx_init_early, .init_machine = omap_generic_init, .init_late = am33xx_init_late, .init_time = omap_init_time_of, .dt_compat = am33xx_boards_compat, .restart = am33xx_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_OMAP4 static const char *const omap4_boards_compat[] __initconst = { "ti,omap4460", "ti,omap4430", "ti,omap4", NULL, }; DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)") .l2c_aux_val = OMAP_L2C_AUX_CTRL, .l2c_aux_mask = 0xcf9fffff, .l2c_write_sec = omap4_l2c310_write_sec, .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = omap4_map_io, .init_early = omap4430_init_early, .init_irq = omap_gic_of_init, .init_machine = omap_generic_init, .init_late = omap4430_init_late, .init_time = omap_init_time_of, .dt_compat = omap4_boards_compat, .restart = omap44xx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_OMAP5 static const char *const omap5_boards_compat[] __initconst = { "ti,omap5432", "ti,omap5430", "ti,omap5", NULL, }; DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) .dma_zone_size = SZ_2G, #endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = omap5_map_io, .init_early = omap5_init_early, .init_irq = omap_gic_of_init, .init_machine = omap_generic_init, .init_late = omap5_init_late, .init_time = omap5_realtime_timer_init, .dt_compat = omap5_boards_compat, .restart = omap44xx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_AM43XX static const char *const am43_boards_compat[] __initconst = { "ti,am4372", "ti,am43", NULL, }; DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)") .l2c_aux_val = OMAP_L2C_AUX_CTRL, .l2c_aux_mask = 0xcf9fffff, .l2c_write_sec = omap4_l2c310_write_sec, .map_io = am33xx_map_io, .init_early = am43xx_init_early, .init_late = am43xx_init_late, .init_irq = omap_gic_of_init, .init_machine = omap_generic_init, .init_time = omap_init_time_of, .dt_compat = am43_boards_compat, .restart = omap44xx_restart, MACHINE_END #endif #ifdef CONFIG_SOC_DRA7XX static const char *const dra74x_boards_compat[] __initconst = { "ti,dra762", "ti,am5728", "ti,am5726", "ti,dra742", "ti,dra7", NULL, }; DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) .dma_zone_size = SZ_2G, #endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, .init_late = dra7xx_init_late, .init_irq = omap_gic_of_init, .init_machine = omap_generic_init, .init_time = omap5_realtime_timer_init, .dt_compat = dra74x_boards_compat, .restart = omap44xx_restart, MACHINE_END static const char *const dra72x_boards_compat[] __initconst = { "ti,am5718", "ti,am5716", "ti,dra722", "ti,dra718", NULL, }; DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) .dma_zone_size = SZ_2G, #endif .reserve = omap_reserve, .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, .init_late = dra7xx_init_late, .init_irq = omap_gic_of_init, .init_machine = omap_generic_init, .init_time = omap5_realtime_timer_init, .dt_compat = dra72x_boards_compat, .restart = omap44xx_restart, MACHINE_END #endif
linux-master
arch/arm/mach-omap2/board-generic.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/clock.c * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <[email protected]> * Paul Walmsley */ #undef DEBUG #include <linux/kernel.h> #include <linux/export.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/of_address.h> #include <asm/cpu.h> #include <trace/events/power.h> #include "soc.h" #include "clockdomain.h" #include "clock.h" #include "cm.h" #include "cm2xxx.h" #include "cm3xxx.h" #include "cm-regbits-24xx.h" #include "cm-regbits-34xx.h" #include "common.h" /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ #define OMAP3430_DPLL_FINT_BAND1_MIN 750000 #define OMAP3430_DPLL_FINT_BAND1_MAX 2100000 #define OMAP3430_DPLL_FINT_BAND2_MIN 7500000 #define OMAP3430_DPLL_FINT_BAND2_MAX 21000000 /* * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. * From device data manual section 4.3 "DPLL and DLL Specifications". */ #define OMAP3PLUS_DPLL_FINT_MIN 32000 #define OMAP3PLUS_DPLL_FINT_MAX 52000000 struct ti_clk_ll_ops omap_clk_ll_ops = { .clkdm_clk_enable = clkdm_clk_enable, .clkdm_clk_disable = clkdm_clk_disable, .clkdm_lookup = clkdm_lookup, .cm_wait_module_ready = omap_cm_wait_module_ready, .cm_split_idlest_reg = cm_split_idlest_reg, }; /** * omap2_clk_setup_ll_ops - setup clock driver low-level ops * * Sets up clock driver low-level platform ops. These are needed * for register accesses and various other misc platform operations. * Returns 0 on success, -EBUSY if low level ops have been registered * already. */ int __init omap2_clk_setup_ll_ops(void) { return ti_clk_setup_ll_ops(&omap_clk_ll_ops); } /* * OMAP2+ specific clock functions */ /** * ti_clk_init_features - init clock features struct for the SoC * * Initializes the clock features struct based on the SoC type. */ void __init ti_clk_init_features(void) { struct ti_clk_features features = { 0 }; /* Fint setup for DPLLs */ if (cpu_is_omap3430()) { features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX; features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN; } else { features.fint_min = OMAP3PLUS_DPLL_FINT_MIN; features.fint_max = OMAP3PLUS_DPLL_FINT_MAX; } /* Bypass value setup for DPLLs */ if (cpu_is_omap24xx()) { features.dpll_bypass_vals |= (1 << OMAP2XXX_EN_DPLL_LPBYPASS) | (1 << OMAP2XXX_EN_DPLL_FRBYPASS); } else if (cpu_is_omap34xx()) { features.dpll_bypass_vals |= (1 << OMAP3XXX_EN_DPLL_LPBYPASS) | (1 << OMAP3XXX_EN_DPLL_FRBYPASS); } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() || soc_is_omap54xx() || soc_is_dra7xx()) { features.dpll_bypass_vals |= (1 << OMAP4XXX_EN_DPLL_LPBYPASS) | (1 << OMAP4XXX_EN_DPLL_FRBYPASS) | (1 << OMAP4XXX_EN_DPLL_MNBYPASS); } /* Jitter correction only available on OMAP343X */ if (cpu_is_omap343x()) features.flags |= TI_CLK_DPLL_HAS_FREQSEL; if (omap_type() == OMAP2_DEVICE_TYPE_GP) features.flags |= TI_CLK_DEVICE_TYPE_GP; /* Idlest value for interface clocks. * 24xx uses 0 to indicate not ready, and 1 to indicate ready. * 34xx reverses this, just to keep us on our toes * AM35xx uses both, depending on the module. */ if (cpu_is_omap24xx()) features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL; else if (cpu_is_omap34xx()) features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL; /* On OMAP3430 ES1.0, DPLL4 can't be re-programmed */ if (omap_rev() == OMAP3430_REV_ES1_0) features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM; /* Errata I810 for omap5 / dra7 */ if (soc_is_omap54xx() || soc_is_dra7xx()) features.flags |= TI_CLK_ERRATA_I810; ti_clk_setup_features(&features); }
linux-master
arch/arm/mach-omap2/clock.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bug.h> #include "soc.h" #include "powerdomain.h" #include "powerdomains2xxx_3xxx_data.h" #include "prcm-common.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-34xx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /* * 34XX-specific powerdomains, dependencies */ /* * Powerdomains */ static struct powerdomain iva2_pwrdm = { .name = "iva2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, [1] = PWRSTS_OFF_RET, [2] = PWRSTS_OFF_RET, [3] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, [1] = PWRSTS_ON, [2] = PWRSTS_OFF_ON, [3] = PWRSTS_ON, }, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain mpu_3xxx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_MPU_QUIRK, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_ON, }, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain mpu_am35x_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .flags = PWRDM_HAS_MPU_QUIRK, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "mpu_iva" }, }; /* * The USBTLL Save-and-Restore mechanism is broken on * 3430s up to ES3.0 and 3630ES1.0. Hence this feature * needs to be disabled on these chips. * Refer: 3430 errata ID i459 and 3630 errata ID i579 * * Note: setting the SAR flag could help for errata ID i478 * which applies to 3430 <= ES3.1, but since the SAR feature * is broken, do not use it. */ static struct powerdomain core_3xxx_pre_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain core_3xxx_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, /* * Setting the SAR flag for errata ID i478 which applies * to 3430 <= ES3.1 */ .flags = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */ .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain core_am35x_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEM1RETSTATE */ [1] = PWRSTS_ON, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEM1ONSTATE */ [1] = PWRSTS_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dss_pwrdm = { .name = "dss_pwrdm", .prcm_offs = OMAP3430_DSS_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dss_am35x_pwrdm = { .name = "dss_pwrdm", .prcm_offs = OMAP3430_DSS_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; /* * Although the 34XX TRM Rev K Table 4-371 notes that retention is a * possible SGX powerstate, the SGX device itself does not support * retention. */ static struct powerdomain sgx_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = OMAP3430ES2_SGX_MOD, /* XXX This is accurate for 3430 SGX, but what about GFX? */ .pwrsts = PWRSTS_OFF_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain sgx_am35x_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = OMAP3430ES2_SGX_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain cam_pwrdm = { .name = "cam_pwrdm", .prcm_offs = OMAP3430_CAM_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain per_pwrdm = { .name = "per_pwrdm", .prcm_offs = OMAP3430_PER_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain per_am35x_pwrdm = { .name = "per_pwrdm", .prcm_offs = OMAP3430_PER_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain emu_pwrdm = { .name = "emu_pwrdm", .prcm_offs = OMAP3430_EMU_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain neon_pwrdm = { .name = "neon_pwrdm", .prcm_offs = OMAP3430_NEON_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain neon_am35x_pwrdm = { .name = "neon_pwrdm", .prcm_offs = OMAP3430_NEON_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain usbhost_pwrdm = { .name = "usbhost_pwrdm", .prcm_offs = OMAP3430ES2_USBHOST_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, /* * REVISIT: Enabling usb host save and restore mechanism seems to * leave the usb host domain permanently in ACTIVE mode after * changing the usb host power domain state from OFF to active once. * Disabling for now. */ /*.flags = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */ .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dpll1_pwrdm = { .name = "dpll1_pwrdm", .prcm_offs = MPU_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll2_pwrdm = { .name = "dpll2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll3_pwrdm = { .name = "dpll3_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll4_pwrdm = { .name = "dpll4_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll5_pwrdm = { .name = "dpll5_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain alwon_81xx_pwrdm = { .name = "alwon_pwrdm", .prcm_offs = TI81XX_PRM_ALWON_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; static struct powerdomain device_81xx_pwrdm = { .name = "device_pwrdm", .prcm_offs = TI81XX_PRM_DEVICE_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain gem_814x_pwrdm = { .name = "gem_pwrdm", .prcm_offs = TI814X_PRM_DSP_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "dsp" }, }; static struct powerdomain ivahd_814x_pwrdm = { .name = "ivahd_pwrdm", .prcm_offs = TI814X_PRM_HDVICP_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "iva" }, }; static struct powerdomain hdvpss_814x_pwrdm = { .name = "hdvpss_pwrdm", .prcm_offs = TI814X_PRM_HDVPSS_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "dsp" }, }; static struct powerdomain sgx_814x_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = TI814X_PRM_GFX_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; static struct powerdomain isp_814x_pwrdm = { .name = "isp_pwrdm", .prcm_offs = TI814X_PRM_ISP_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; static struct powerdomain active_81xx_pwrdm = { .name = "active_pwrdm", .prcm_offs = TI816X_PRM_ACTIVE_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; static struct powerdomain default_81xx_pwrdm = { .name = "default_pwrdm", .prcm_offs = TI81XX_PRM_DEFAULT_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; static struct powerdomain ivahd0_816x_pwrdm = { .name = "ivahd0_pwrdm", .prcm_offs = TI816X_PRM_IVAHD0_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain ivahd1_816x_pwrdm = { .name = "ivahd1_pwrdm", .prcm_offs = TI816X_PRM_IVAHD1_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain ivahd2_816x_pwrdm = { .name = "ivahd2_pwrdm", .prcm_offs = TI816X_PRM_IVAHD2_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain sgx_816x_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = TI816X_PRM_SGX_MOD, .pwrsts = PWRSTS_OFF_ON, .voltdm = { .name = "core" }, }; /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_omap3430_common[] __initdata = { &wkup_omap2_pwrdm, &iva2_pwrdm, &mpu_3xxx_pwrdm, &neon_pwrdm, &cam_pwrdm, &dss_pwrdm, &per_pwrdm, &emu_pwrdm, &dpll1_pwrdm, &dpll2_pwrdm, &dpll3_pwrdm, &dpll4_pwrdm, NULL }; static struct powerdomain *powerdomains_omap3430es1[] __initdata = { &gfx_omap2_pwrdm, &core_3xxx_pre_es3_1_pwrdm, NULL }; /* also includes 3630ES1.0 */ static struct powerdomain *powerdomains_omap3430es2_es3_0[] __initdata = { &core_3xxx_pre_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; /* also includes 3630ES1.1+ */ static struct powerdomain *powerdomains_omap3430es3_1plus[] __initdata = { &core_3xxx_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; static struct powerdomain *powerdomains_am35x[] __initdata = { &wkup_omap2_pwrdm, &mpu_am35x_pwrdm, &neon_am35x_pwrdm, &core_am35x_pwrdm, &sgx_am35x_pwrdm, &dss_am35x_pwrdm, &per_am35x_pwrdm, &emu_pwrdm, &dpll1_pwrdm, &dpll3_pwrdm, &dpll4_pwrdm, &dpll5_pwrdm, NULL }; static struct powerdomain *powerdomains_ti814x[] __initdata = { &alwon_81xx_pwrdm, &device_81xx_pwrdm, &active_81xx_pwrdm, &default_81xx_pwrdm, &gem_814x_pwrdm, &ivahd_814x_pwrdm, &hdvpss_814x_pwrdm, &sgx_814x_pwrdm, &isp_814x_pwrdm, NULL }; static struct powerdomain *powerdomains_ti816x[] __initdata = { &alwon_81xx_pwrdm, &device_81xx_pwrdm, &active_81xx_pwrdm, &default_81xx_pwrdm, &ivahd0_816x_pwrdm, &ivahd1_816x_pwrdm, &ivahd2_816x_pwrdm, &sgx_816x_pwrdm, NULL }; /* TI81XX specific ops */ #define TI81XX_PM_PWSTCTRL 0x0000 #define TI81XX_RM_RSTCTRL 0x0010 #define TI81XX_PM_PWSTST 0x0004 static int ti81xx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_offs, TI81XX_PM_PWSTCTRL); return 0; } static int ti81xx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, TI81XX_PM_PWSTCTRL, OMAP_POWERSTATE_MASK); } static int ti81xx_pwrdm_read_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : TI81XX_PM_PWSTST, OMAP_POWERSTATEST_MASK); } static int ti81xx_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : TI81XX_PM_PWSTST, OMAP3430_LOGICSTATEST_MASK); } static int ti81xx_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; while ((omap2_prm_read_mod_reg(pwrdm->prcm_offs, (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : TI81XX_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s timeout waiting for transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } /* For dm814x we need to fix up fix GFX pwstst and rstctrl reg offsets */ static struct pwrdm_ops ti81xx_pwrdm_operations = { .pwrdm_set_next_pwrst = ti81xx_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = ti81xx_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = ti81xx_pwrdm_read_pwrst, .pwrdm_read_logic_pwrst = ti81xx_pwrdm_read_logic_pwrst, .pwrdm_wait_transition = ti81xx_pwrdm_wait_transition, }; void __init omap3xxx_powerdomains_init(void) { unsigned int rev; if (!cpu_is_omap34xx() && !cpu_is_ti81xx()) return; /* Only 81xx needs custom pwrdm_operations */ if (!cpu_is_ti81xx()) pwrdm_register_platform_funcs(&omap3_pwrdm_operations); rev = omap_rev(); if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { pwrdm_register_pwrdms(powerdomains_am35x); } else if (rev == TI8148_REV_ES1_0 || rev == TI8148_REV_ES2_0 || rev == TI8148_REV_ES2_1) { pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_ti814x); } else if (rev == TI8168_REV_ES1_0 || rev == TI8168_REV_ES1_1 || rev == TI8168_REV_ES2_0 || rev == TI8168_REV_ES2_1) { pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_ti816x); } else { pwrdm_register_pwrdms(powerdomains_omap3430_common); switch (rev) { case OMAP3430_REV_ES1_0: pwrdm_register_pwrdms(powerdomains_omap3430es1); break; case OMAP3430_REV_ES2_0: case OMAP3430_REV_ES2_1: case OMAP3430_REV_ES3_0: case OMAP3630_REV_ES1_0: pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0); break; case OMAP3430_REV_ES3_1: case OMAP3430_REV_ES3_1_2: case OMAP3630_REV_ES1_1: case OMAP3630_REV_ES1_2: pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus); break; default: WARN(1, "OMAP3 powerdomain init: unknown chip type\n"); } } pwrdm_complete_init(); }
linux-master
arch/arm/mach-omap2/powerdomains3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/mcbsp.c * * Copyright (C) 2008 Instituto Nokia de Tecnologia * Contact: Eduardo Valentin <[email protected]> * * Multichannel mode not supported. */ #include <linux/module.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/platform_data/asoc-ti-mcbsp.h> #include <linux/pm_runtime.h> #include <linux/omap-dma.h> #include "soc.h" #include "omap_device.h" #include "clock.h" /* * FIXME: Find a mechanism to enable/disable runtime the McBSP ICLK autoidle. * Sidetone needs non-gated ICLK and sidetone autoidle is broken. */ #include "cm3xxx.h" #include "cm-regbits-34xx.h" static int omap3_mcbsp_force_ick_on(struct clk *clk, bool force_on) { if (!clk) return 0; if (force_on) return omap2_clk_deny_idle(clk); else return omap2_clk_allow_idle(clk); } void __init omap3_mcbsp_init_pdata_callback( struct omap_mcbsp_platform_data *pdata) { if (!pdata) return; pdata->force_ick_on = omap3_mcbsp_force_ick_on; }
linux-master
arch/arm/mach-omap2/mcbsp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Legacy platform_data quirks * * Copyright (C) 2013 Texas Instruments */ #include <linux/clk.h> #include <linux/davinci_emac.h> #include <linux/gpio/machine.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/power/smartreflex.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/platform_data/pinctrl-single.h> #include <linux/platform_data/hsmmc-omap.h> #include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/ti-sysc.h> #include <linux/platform_data/wkup_m3.h> #include <linux/platform_data/asoc-ti-mcbsp.h> #include <linux/platform_data/ti-prm.h> #include "clockdomain.h" #include "common.h" #include "common-board-devices.h" #include "control.h" #include "omap_device.h" #include "omap-secure.h" #include "soc.h" static struct omap_hsmmc_platform_data __maybe_unused mmc_pdata[2]; struct pdata_init { const char *compatible; void (*fn)(void); }; static struct of_dev_auxdata omap_auxdata_lookup[]; #ifdef CONFIG_MACH_NOKIA_N8X0 static void __init omap2420_n8x0_legacy_init(void) { omap_auxdata_lookup[0].platform_data = n8x0_legacy_init(); } #else #define omap2420_n8x0_legacy_init NULL #endif #ifdef CONFIG_ARCH_OMAP3 /* * Configures GPIOs 126, 127 and 129 to 1.8V mode instead of 3.0V * mode for MMC1 in case bootloader did not configure things. * Note that if the pins are used for MMC1, pbias-regulator * manages the IO voltage. */ static void __init omap3_gpio126_127_129(void) { u32 reg; reg = omap_ctrl_readl(OMAP343X_CONTROL_PBIAS_LITE); reg &= ~OMAP343X_PBIASLITEVMODE1; reg |= OMAP343X_PBIASLITEPWRDNZ1; omap_ctrl_writel(reg, OMAP343X_CONTROL_PBIAS_LITE); if (cpu_is_omap3630()) { reg = omap_ctrl_readl(OMAP34XX_CONTROL_WKUP_CTRL); reg |= OMAP36XX_GPIO_IO_PWRDNZ; omap_ctrl_writel(reg, OMAP34XX_CONTROL_WKUP_CTRL); } } static void __init hsmmc2_internal_input_clk(void) { u32 reg; reg = omap_ctrl_readl(OMAP343X_CONTROL_DEVCONF1); reg |= OMAP2_MMCSDIO2ADPCLKISEL; omap_ctrl_writel(reg, OMAP343X_CONTROL_DEVCONF1); } #ifdef CONFIG_OMAP_HWMOD static struct iommu_platform_data omap3_iommu_pdata = { .reset_name = "mmu", .assert_reset = omap_device_assert_hardreset, .deassert_reset = omap_device_deassert_hardreset, .device_enable = omap_device_enable, .device_idle = omap_device_idle, }; static struct iommu_platform_data omap3_iommu_isp_pdata = { .device_enable = omap_device_enable, .device_idle = omap_device_idle, }; #endif static void __init omap3_sbc_t3x_usb_hub_init(char *hub_name, int idx) { struct gpio_desc *d; /* This asserts the RESET line (reverse polarity) */ d = gpiod_get_index(NULL, "reset", idx, GPIOD_OUT_HIGH); if (IS_ERR(d)) { pr_err("Unable to get T3x USB reset GPIO descriptor\n"); return; } gpiod_set_consumer_name(d, hub_name); gpiod_export(d, 0); udelay(10); /* De-assert RESET */ gpiod_set_value(d, 0); msleep(1); } static struct gpiod_lookup_table omap3_sbc_t3x_usb_gpio_table = { .dev_id = NULL, .table = { GPIO_LOOKUP_IDX("gpio-160-175", 7, "reset", 0, GPIO_ACTIVE_LOW), { } }, }; static void __init omap3_sbc_t3730_legacy_init(void) { gpiod_add_lookup_table(&omap3_sbc_t3x_usb_gpio_table); omap3_sbc_t3x_usb_hub_init("sb-t35 usb hub", 0); } static void __init omap3_sbc_t3530_legacy_init(void) { gpiod_add_lookup_table(&omap3_sbc_t3x_usb_gpio_table); omap3_sbc_t3x_usb_hub_init("sb-t35 usb hub", 0); } static void __init omap3_evm_legacy_init(void) { hsmmc2_internal_input_clk(); } static void am35xx_enable_emac_int(void) { u32 v; v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR | AM35XX_CPGMAC_C0_MISC_PULSE_CLR | AM35XX_CPGMAC_C0_RX_THRESH_CLR); omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR); omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */ } static void am35xx_disable_emac_int(void) { u32 v; v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR); omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR); omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */ } static struct emac_platform_data am35xx_emac_pdata = { .interrupt_enable = am35xx_enable_emac_int, .interrupt_disable = am35xx_disable_emac_int, }; static void __init am35xx_emac_reset(void) { u32 v; v = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); v &= ~AM35XX_CPGMACSS_SW_RST; omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ } static struct gpiod_lookup_table cm_t3517_wlan_gpio_table = { .dev_id = NULL, .table = { GPIO_LOOKUP("gpio-48-53", 8, "power", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("gpio-0-15", 4, "noe", GPIO_ACTIVE_HIGH), { } }, }; static void __init omap3_sbc_t3517_wifi_init(void) { struct gpio_desc *d; gpiod_add_lookup_table(&cm_t3517_wlan_gpio_table); /* This asserts the RESET line (reverse polarity) */ d = gpiod_get(NULL, "power", GPIOD_OUT_HIGH); if (IS_ERR(d)) { pr_err("Unable to get CM T3517 WLAN power GPIO descriptor\n"); } else { gpiod_set_consumer_name(d, "wlan pwr"); gpiod_export(d, 0); } d = gpiod_get(NULL, "noe", GPIOD_OUT_HIGH); if (IS_ERR(d)) { pr_err("Unable to get CM T3517 WLAN XCVR NOE GPIO descriptor\n"); } else { gpiod_set_consumer_name(d, "xcvr noe"); gpiod_export(d, 0); } msleep(100); gpiod_set_value(d, 0); } static struct gpiod_lookup_table omap3_sbc_t3517_usb_gpio_table = { .dev_id = NULL, .table = { GPIO_LOOKUP_IDX("gpio-144-159", 8, "reset", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-96-111", 2, "reset", 1, GPIO_ACTIVE_LOW), { } }, }; static void __init omap3_sbc_t3517_legacy_init(void) { gpiod_add_lookup_table(&omap3_sbc_t3517_usb_gpio_table); omap3_sbc_t3x_usb_hub_init("cm-t3517 usb hub", 0); omap3_sbc_t3x_usb_hub_init("sb-t35 usb hub", 1); am35xx_emac_reset(); hsmmc2_internal_input_clk(); omap3_sbc_t3517_wifi_init(); } static void __init am3517_evm_legacy_init(void) { am35xx_emac_reset(); } static void __init nokia_n900_legacy_init(void) { hsmmc2_internal_input_clk(); mmc_pdata[0].name = "external"; mmc_pdata[1].name = "internal"; if (omap_type() != OMAP2_DEVICE_TYPE_GP) { if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) { pr_info("RX-51: Enabling ARM errata 430973 workaround\n"); /* set IBE to 1 */ rx51_secure_update_aux_cr(BIT(6), 0); } else { pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n"); pr_warn("Thumb binaries may crash randomly without this workaround\n"); } } } static void __init omap3_tao3530_legacy_init(void) { hsmmc2_internal_input_clk(); } static void __init omap3_logicpd_torpedo_init(void) { omap3_gpio126_127_129(); } /* omap3pandora legacy devices */ static struct platform_device pandora_backlight = { .name = "pandora-backlight", .id = -1, }; static void __init omap3_pandora_legacy_init(void) { platform_device_register(&pandora_backlight); } #endif /* CONFIG_ARCH_OMAP3 */ #ifdef CONFIG_SOC_DRA7XX static struct iommu_platform_data dra7_ipu1_dsp_iommu_pdata = { .set_pwrdm_constraint = omap_iommu_set_pwrdm_constraint, }; #endif static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk) { struct clk_hw *hw = __clk_get_hw(clk); struct clockdomain *clkdm = NULL; struct clk_hw_omap *hwclk; hwclk = to_clk_hw_omap(hw); if (!omap2_clk_is_hw_omap(hw)) return NULL; if (hwclk && hwclk->clkdm_name) clkdm = clkdm_lookup(hwclk->clkdm_name); return clkdm; } /** * ti_sysc_clkdm_init - find clockdomain based on clock * @fck: device functional clock * @ick: device interface clock * @dev: struct device * * Populate clockdomain based on clock. It is needed for * clkdm_deny_idle() and clkdm_allow_idle() for blocking clockdomain * clockdomain idle during reset, enable and idle. * * Note that we assume interconnect driver manages the clocks * and do not need to populate oh->_clk for dynamically * allocated modules. */ static int ti_sysc_clkdm_init(struct device *dev, struct clk *fck, struct clk *ick, struct ti_sysc_cookie *cookie) { if (!IS_ERR(fck)) cookie->clkdm = ti_sysc_find_one_clockdomain(fck); if (cookie->clkdm) return 0; if (!IS_ERR(ick)) cookie->clkdm = ti_sysc_find_one_clockdomain(ick); if (cookie->clkdm) return 0; return -ENODEV; } static void ti_sysc_clkdm_deny_idle(struct device *dev, const struct ti_sysc_cookie *cookie) { if (cookie->clkdm) clkdm_deny_idle(cookie->clkdm); } static void ti_sysc_clkdm_allow_idle(struct device *dev, const struct ti_sysc_cookie *cookie) { if (cookie->clkdm) clkdm_allow_idle(cookie->clkdm); } #ifdef CONFIG_OMAP_HWMOD static int ti_sysc_enable_module(struct device *dev, const struct ti_sysc_cookie *cookie) { if (!cookie->data) return -EINVAL; return omap_hwmod_enable(cookie->data); } static int ti_sysc_idle_module(struct device *dev, const struct ti_sysc_cookie *cookie) { if (!cookie->data) return -EINVAL; return omap_hwmod_idle(cookie->data); } static int ti_sysc_shutdown_module(struct device *dev, const struct ti_sysc_cookie *cookie) { if (!cookie->data) return -EINVAL; return omap_hwmod_shutdown(cookie->data); } #endif /* CONFIG_OMAP_HWMOD */ static bool ti_sysc_soc_type_gp(void) { return omap_type() == OMAP2_DEVICE_TYPE_GP; } static struct of_dev_auxdata omap_auxdata_lookup[]; static struct ti_sysc_platform_data ti_sysc_pdata = { .auxdata = omap_auxdata_lookup, .soc_type_gp = ti_sysc_soc_type_gp, .init_clockdomain = ti_sysc_clkdm_init, .clkdm_deny_idle = ti_sysc_clkdm_deny_idle, .clkdm_allow_idle = ti_sysc_clkdm_allow_idle, #ifdef CONFIG_OMAP_HWMOD .init_module = omap_hwmod_init_module, .enable_module = ti_sysc_enable_module, .idle_module = ti_sysc_idle_module, .shutdown_module = ti_sysc_shutdown_module, #endif }; static struct pcs_pdata pcs_pdata; void omap_pcs_legacy_init(int irq, void (*rearm)(void)) { pcs_pdata.irq = irq; pcs_pdata.rearm = rearm; } static struct ti_prm_platform_data ti_prm_pdata = { .clkdm_deny_idle = clkdm_deny_idle, .clkdm_allow_idle = clkdm_allow_idle, .clkdm_lookup = clkdm_lookup, }; #if defined(CONFIG_ARCH_OMAP3) && IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) static struct omap_mcbsp_platform_data mcbsp_pdata; static void __init omap3_mcbsp_init(void) { omap3_mcbsp_init_pdata_callback(&mcbsp_pdata); } #else static void __init omap3_mcbsp_init(void) {} #endif /* * Few boards still need auxdata populated before we populate * the dev entries in of_platform_populate(). */ static struct pdata_init auxdata_quirks[] __initdata = { #ifdef CONFIG_SOC_OMAP2420 { "nokia,n800", omap2420_n8x0_legacy_init, }, { "nokia,n810", omap2420_n8x0_legacy_init, }, { "nokia,n810-wimax", omap2420_n8x0_legacy_init, }, #endif { /* sentinel */ }, }; struct omap_sr_data __maybe_unused omap_sr_pdata[OMAP_SR_NR]; static struct of_dev_auxdata omap_auxdata_lookup[] = { #ifdef CONFIG_MACH_NOKIA_N8X0 OF_DEV_AUXDATA("ti,omap2420-mmc", 0x4809c000, "mmci-omap.0", NULL), OF_DEV_AUXDATA("menelaus", 0x72, "1-0072", &n8x0_menelaus_platform_data), #endif #ifdef CONFIG_ARCH_OMAP3 OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu", &omap3_iommu_pdata), OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu", &omap3_iommu_isp_pdata), OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000, "480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]), OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000, "480c9000.smartreflex", &omap_sr_pdata[OMAP_SR_MPU]), OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x4809c000, "4809c000.mmc", &mmc_pdata[0]), OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x480b4000, "480b4000.mmc", &mmc_pdata[1]), /* Only on am3517 */ OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0", &am35xx_emac_pdata), OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call), /* McBSP modules with sidetone core */ #if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP) OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata), OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49024000, "49024000.mcbsp", &mcbsp_pdata), #endif #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) OF_DEV_AUXDATA("ti,omap4-smartreflex-iva", 0x4a0db000, "4a0db000.smartreflex", &omap_sr_pdata[OMAP_SR_IVA]), OF_DEV_AUXDATA("ti,omap4-smartreflex-core", 0x4a0dd000, "4a0dd000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]), OF_DEV_AUXDATA("ti,omap4-smartreflex-mpu", 0x4a0d9000, "4a0d9000.smartreflex", &omap_sr_pdata[OMAP_SR_MPU]), #endif #ifdef CONFIG_SOC_DRA7XX OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x40d01000, "40d01000.mmu", &dra7_ipu1_dsp_iommu_pdata), OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x41501000, "41501000.mmu", &dra7_ipu1_dsp_iommu_pdata), OF_DEV_AUXDATA("ti,dra7-iommu", 0x58882000, "58882000.mmu", &dra7_ipu1_dsp_iommu_pdata), #endif /* Common auxdata */ OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup), OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata), OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata), OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata), OF_DEV_AUXDATA("ti,omap-sdma", 0, NULL, &dma_plat_info), { /* sentinel */ }, }; /* * Few boards still need to initialize some legacy devices with * platform data until the drivers support device tree. */ static struct pdata_init pdata_quirks[] __initdata = { #ifdef CONFIG_ARCH_OMAP3 { "compulab,omap3-sbc-t3517", omap3_sbc_t3517_legacy_init, }, { "compulab,omap3-sbc-t3530", omap3_sbc_t3530_legacy_init, }, { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, { "nokia,omap3-n900", nokia_n900_legacy_init, }, { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, { "logicpd,dm3730-torpedo-devkit", omap3_logicpd_torpedo_init, }, { "ti,omap3-evm-37xx", omap3_evm_legacy_init, }, { "ti,am3517-evm", am3517_evm_legacy_init, }, { "technexion,omap3-tao3530", omap3_tao3530_legacy_init, }, { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, #endif { /* sentinel */ }, }; static void pdata_quirks_check(struct pdata_init *quirks) { while (quirks->compatible) { if (of_machine_is_compatible(quirks->compatible)) { if (quirks->fn) quirks->fn(); } quirks++; } } static const char * const pdata_quirks_init_nodes[] = { "prcm", "prm", }; static void __init pdata_quirks_init_clocks(const struct of_device_id *omap_dt_match_table) { struct device_node *np; int i; for (i = 0; i < ARRAY_SIZE(pdata_quirks_init_nodes); i++) { np = of_find_node_by_name(NULL, pdata_quirks_init_nodes[i]); if (!np) continue; of_platform_populate(np, omap_dt_match_table, omap_auxdata_lookup, NULL); of_node_put(np); } } void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) { /* * We still need this for omap2420 and omap3 PM to work, others are * using drivers/misc/sram.c already. */ if (of_machine_is_compatible("ti,omap2420") || of_machine_is_compatible("ti,omap3")) omap_sdrc_init(NULL, NULL); if (of_machine_is_compatible("ti,omap3")) omap3_mcbsp_init(); pdata_quirks_check(auxdata_quirks); pdata_quirks_init_clocks(omap_dt_match_table); of_platform_populate(NULL, omap_dt_match_table, omap_auxdata_lookup, NULL); pdata_quirks_check(pdata_quirks); }
linux-master
arch/arm/mach-omap2/pdata-quirks.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap2-restart.c - code common to all OMAP2xxx machines. * * Copyright (C) 2012 Texas Instruments * Paul Walmsley */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/io.h> #include "soc.h" #include "common.h" #include "prm.h" /* * reset_virt_prcm_set_ck, reset_sys_ck: pointers to the virt_prcm_set * clock and the sys_ck. Used during the reset process */ static struct clk *reset_virt_prcm_set_ck, *reset_sys_ck; /* Reboot handling */ /** * omap2xxx_restart - Set DPLL to bypass mode for reboot to work * * Set the DPLL to bypass so that reboot completes successfully. No * return value. */ void omap2xxx_restart(enum reboot_mode mode, const char *cmd) { u32 rate; rate = clk_get_rate(reset_sys_ck); clk_set_rate(reset_virt_prcm_set_ck, rate); /* XXX Should save the cmd argument for use after the reboot */ omap_prm_reset_system(); } /** * omap2xxx_common_look_up_clks_for_reset - look up clocks needed for restart * * Some clocks need to be looked up in advance for the SoC restart * operation to work - see omap2xxx_restart(). Returns -EINVAL upon * error or 0 upon success. */ static int __init omap2xxx_common_look_up_clks_for_reset(void) { reset_virt_prcm_set_ck = clk_get(NULL, "virt_prcm_set"); if (IS_ERR(reset_virt_prcm_set_ck)) return -EINVAL; reset_sys_ck = clk_get(NULL, "sys_ck"); if (IS_ERR(reset_sys_ck)) return -EINVAL; return 0; } omap_postcore_initcall(omap2xxx_common_look_up_clks_for_reset);
linux-master
arch/arm/mach-omap2/omap2-restart.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP Power Management debug routines * * Copyright (C) 2005 Texas Instruments, Inc. * Copyright (C) 2006-2008 Nokia Corporation * * Written by: * Richard Woodruff <[email protected]> * Tony Lindgren * Juha Yrjola * Amit Kucheria <[email protected]> * Igor Stoppa <[email protected]> * Jouni Hogander * * Based on pm.c for omap2 */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> #include "clock.h" #include "powerdomain.h" #include "clockdomain.h" #include "soc.h" #include "cm2xxx_3xxx.h" #include "prm2xxx_3xxx.h" #include "pm.h" #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> static int pm_dbg_init_done; static int pm_dbg_init(void); static const char pwrdm_state_names[][PWRDM_MAX_PWRSTS] = { "OFF", "RET", "INA", "ON" }; void pm_dbg_update_time(struct powerdomain *pwrdm, int prev) { s64 t; if (!pm_dbg_init_done) return ; /* Update timer for previous state */ t = sched_clock(); pwrdm->state_timer[prev] += t - pwrdm->timer; pwrdm->timer = t; } static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user) { struct seq_file *s = (struct seq_file *)user; if (strcmp(clkdm->name, "emu_clkdm") == 0 || strcmp(clkdm->name, "wkup_clkdm") == 0 || strncmp(clkdm->name, "dpll", 4) == 0) return 0; seq_printf(s, "%s->%s (%d)\n", clkdm->name, clkdm->pwrdm.ptr->name, clkdm->usecount); return 0; } static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user) { struct seq_file *s = (struct seq_file *)user; int i; if (strcmp(pwrdm->name, "emu_pwrdm") == 0 || strcmp(pwrdm->name, "wkup_pwrdm") == 0 || strncmp(pwrdm->name, "dpll", 4) == 0) return 0; if (pwrdm->state != pwrdm_read_pwrst(pwrdm)) printk(KERN_ERR "pwrdm state mismatch(%s) %d != %d\n", pwrdm->name, pwrdm->state, pwrdm_read_pwrst(pwrdm)); seq_printf(s, "%s (%s)", pwrdm->name, pwrdm_state_names[pwrdm->state]); for (i = 0; i < PWRDM_MAX_PWRSTS; i++) seq_printf(s, ",%s:%d", pwrdm_state_names[i], pwrdm->state_counter[i]); seq_printf(s, ",RET-LOGIC-OFF:%d", pwrdm->ret_logic_off_counter); for (i = 0; i < pwrdm->banks; i++) seq_printf(s, ",RET-MEMBANK%d-OFF:%d", i + 1, pwrdm->ret_mem_off_counter[i]); seq_putc(s, '\n'); return 0; } static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user) { struct seq_file *s = (struct seq_file *)user; int i; if (strcmp(pwrdm->name, "emu_pwrdm") == 0 || strcmp(pwrdm->name, "wkup_pwrdm") == 0 || strncmp(pwrdm->name, "dpll", 4) == 0) return 0; pwrdm_state_switch(pwrdm); seq_printf(s, "%s (%s)", pwrdm->name, pwrdm_state_names[pwrdm->state]); for (i = 0; i < 4; i++) seq_printf(s, ",%s:%lld", pwrdm_state_names[i], pwrdm->state_timer[i]); seq_putc(s, '\n'); return 0; } static int pm_dbg_counters_show(struct seq_file *s, void *unused) { pwrdm_for_each(pwrdm_dbg_show_counter, s); clkdm_for_each(clkdm_dbg_show_counter, s); return 0; } DEFINE_SHOW_ATTRIBUTE(pm_dbg_counters); static int pm_dbg_timers_show(struct seq_file *s, void *unused) { pwrdm_for_each(pwrdm_dbg_show_timer, s); return 0; } DEFINE_SHOW_ATTRIBUTE(pm_dbg_timers); static int pwrdm_suspend_get(void *data, u64 *val) { int ret = -EINVAL; if (cpu_is_omap34xx()) ret = omap3_pm_get_suspend_state((struct powerdomain *)data); *val = ret; if (ret >= 0) return 0; return *val; } static int pwrdm_suspend_set(void *data, u64 val) { if (cpu_is_omap34xx()) return omap3_pm_set_suspend_state( (struct powerdomain *)data, (int)val); return -EINVAL; } DEFINE_DEBUGFS_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get, pwrdm_suspend_set, "%llu\n"); static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir) { int i; s64 t; struct dentry *d; t = sched_clock(); for (i = 0; i < 4; i++) pwrdm->state_timer[i] = 0; pwrdm->timer = t; if (strncmp(pwrdm->name, "dpll", 4) == 0) return 0; d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir); debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d, pwrdm, &pwrdm_suspend_fops); return 0; } static int option_get(void *data, u64 *val) { u32 *option = data; *val = *option; return 0; } static int option_set(void *data, u64 val) { u32 *option = data; *option = val; if (option == &enable_off_mode) { if (cpu_is_omap34xx()) omap3_pm_off_mode_enable(val); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); static int __init pm_dbg_init(void) { struct dentry *d; if (pm_dbg_init_done) return 0; d = debugfs_create_dir("pm_debug", NULL); debugfs_create_file("count", 0444, d, NULL, &pm_dbg_counters_fops); debugfs_create_file("time", 0444, d, NULL, &pm_dbg_timers_fops); pwrdm_for_each(pwrdms_setup, (void *)d); debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d, &enable_off_mode, &pm_dbg_option_fops); pm_dbg_init_done = 1; return 0; } omap_arch_initcall(pm_dbg_init); #endif
linux-master
arch/arm/mach-omap2/pm-debug.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 Voltage Controller (VC) data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/io.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm-regbits-34xx.h" #include "voltage.h" #include "vc.h" /* * VC data common to 34xx/36xx chips * XXX This stuff presumably belongs in the vc3xxx.c or vc.c file. */ static struct omap_vc_common omap3_vc_common = { .bypass_val_reg = OMAP3_PRM_VC_BYPASS_VAL_OFFSET, .data_shift = OMAP3430_DATA_SHIFT, .slaveaddr_shift = OMAP3430_SLAVEADDR_SHIFT, .regaddr_shift = OMAP3430_REGADDR_SHIFT, .valid = OMAP3430_VALID_MASK, .cmd_on_shift = OMAP3430_VC_CMD_ON_SHIFT, .cmd_on_mask = OMAP3430_VC_CMD_ON_MASK, .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK, .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, .i2c_mcode_mask = OMAP3430_MCODE_MASK, }; struct omap_vc_channel omap3_vc_mpu = { .flags = OMAP_VC_CHANNEL_DEFAULT, .common = &omap3_vc_common, .smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET, .smps_cmdra_reg = OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET, .cfg_channel_reg = OMAP3_PRM_VC_CH_CONF_OFFSET, .cmdval_reg = OMAP3_PRM_VC_CMD_VAL_0_OFFSET, .smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA0_MASK, .smps_volra_mask = OMAP3430_VOLRA0_MASK, .smps_cmdra_mask = OMAP3430_CMDRA0_MASK, .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA0_SHIFT, }; struct omap_vc_channel omap3_vc_core = { .common = &omap3_vc_common, .smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET, .smps_cmdra_reg = OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET, .cfg_channel_reg = OMAP3_PRM_VC_CH_CONF_OFFSET, .cmdval_reg = OMAP3_PRM_VC_CMD_VAL_1_OFFSET, .smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA1_MASK, .smps_volra_mask = OMAP3430_VOLRA1_MASK, .smps_cmdra_mask = OMAP3430_CMDRA1_MASK, .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT, }; /* * Voltage levels for different operating modes: on, sleep, retention and off */ #define OMAP3_ON_VOLTAGE_UV 1200000 #define OMAP3_ONLP_VOLTAGE_UV 1000000 #define OMAP3_RET_VOLTAGE_UV 975000 #define OMAP3_OFF_VOLTAGE_UV 600000 struct omap_vc_param omap3_mpu_vc_data = { .on = OMAP3_ON_VOLTAGE_UV, .onlp = OMAP3_ONLP_VOLTAGE_UV, .ret = OMAP3_RET_VOLTAGE_UV, .off = OMAP3_OFF_VOLTAGE_UV, }; struct omap_vc_param omap3_core_vc_data = { .on = OMAP3_ON_VOLTAGE_UV, .onlp = OMAP3_ONLP_VOLTAGE_UV, .ret = OMAP3_RET_VOLTAGE_UV, .off = OMAP3_OFF_VOLTAGE_UV, };
linux-master
arch/arm/mach-omap2/vc3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3/OMAP4 Voltage Management Routines * * Author: Thara Gopinath <[email protected]> * * Copyright (C) 2007 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <[email protected]> */ #include <linux/delay.h> #include <linux/io.h> #include <linux/err.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/clk.h> #include "common.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "control.h" #include "voltage.h" #include "powerdomain.h" #include "vc.h" #include "vp.h" static LIST_HEAD(voltdm_list); /* Public functions */ /** * voltdm_get_voltage() - Gets the current non-auto-compensated voltage * @voltdm: pointer to the voltdm for which current voltage info is needed * * API to get the current non-auto-compensated voltage for a voltage domain. * Returns 0 in case of error else returns the current voltage. */ unsigned long voltdm_get_voltage(struct voltagedomain *voltdm) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return 0; } return voltdm->nominal_volt; } /** * voltdm_scale() - API to scale voltage of a particular voltage domain. * @voltdm: pointer to the voltage domain which is to be scaled. * @target_volt: The target voltage of the voltage domain * * This API should be called by the kernel to do the voltage scaling * for a particular voltage domain during DVFS. */ static int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt) { int ret, i; unsigned long volt = 0; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return -EINVAL; } if (!voltdm->scale) { pr_err("%s: No voltage scale API registered for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } if (!voltdm->volt_data) { pr_err("%s: No voltage data defined for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } /* Adjust voltage to the exact voltage from the OPP table */ for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { if (voltdm->volt_data[i].volt_nominal >= target_volt) { volt = voltdm->volt_data[i].volt_nominal; break; } } if (!volt) { pr_warn("%s: not scaling. OPP voltage for %lu, not found.\n", __func__, target_volt); return -EINVAL; } ret = voltdm->scale(voltdm, volt); if (!ret) voltdm->nominal_volt = volt; return ret; } /** * voltdm_reset() - Resets the voltage of a particular voltage domain * to that of the current OPP. * @voltdm: pointer to the voltage domain whose voltage is to be reset. * * This API finds out the correct voltage the voltage domain is supposed * to be at and resets the voltage to that level. Should be used especially * while disabling any voltage compensation modules. */ void voltdm_reset(struct voltagedomain *voltdm) { unsigned long target_volt; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } target_volt = voltdm_get_voltage(voltdm); if (!target_volt) { pr_err("%s: unable to find current voltage for vdd_%s\n", __func__, voltdm->name); return; } voltdm_scale(voltdm, target_volt); } /** * omap_voltage_get_volttable() - API to get the voltage table associated with a * particular voltage domain. * @voltdm: pointer to the VDD for which the voltage table is required * @volt_data: the voltage table for the particular vdd which is to be * populated by this API * * This API populates the voltage table associated with a VDD into the * passed parameter pointer. Returns the count of distinct voltages * supported by this vdd. * */ void omap_voltage_get_volttable(struct voltagedomain *voltdm, struct omap_volt_data **volt_data) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } *volt_data = voltdm->volt_data; } /** * omap_voltage_get_voltdata() - API to get the voltage table entry for a * particular voltage * @voltdm: pointer to the VDD whose voltage table has to be searched * @volt: the voltage to be searched in the voltage table * * This API searches through the voltage table for the required voltage * domain and tries to find a matching entry for the passed voltage volt. * If a matching entry is found volt_data is populated with that entry. * This API searches only through the non-compensated voltages int the * voltage table. * Returns pointer to the voltage table entry corresponding to volt on * success. Returns -ENODATA if no voltage table exisits for the passed voltage * domain or if there is no matching entry. */ struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm, unsigned long volt) { int i; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return ERR_PTR(-EINVAL); } if (!voltdm->volt_data) { pr_warn("%s: voltage table does not exist for vdd_%s\n", __func__, voltdm->name); return ERR_PTR(-ENODATA); } for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { if (voltdm->volt_data[i].volt_nominal == volt) return &voltdm->volt_data[i]; } pr_notice("%s: Unable to match the current voltage with the voltage table for vdd_%s\n", __func__, voltdm->name); return ERR_PTR(-ENODATA); } /** * omap_voltage_register_pmic() - API to register PMIC specific data * @voltdm: pointer to the VDD for which the PMIC specific data is * to be registered * @pmic: the structure containing pmic info * * This API is to be called by the SOC/PMIC file to specify the * pmic specific info as present in omap_voltdm_pmic structure. */ int omap_voltage_register_pmic(struct voltagedomain *voltdm, struct omap_voltdm_pmic *pmic) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return -EINVAL; } voltdm->pmic = pmic; return 0; } /** * omap_voltage_late_init() - Init the various voltage parameters * * This API is to be called in the later stages of the * system boot to init the voltage controller and * voltage processors. */ int __init omap_voltage_late_init(void) { struct voltagedomain *voltdm; if (list_empty(&voltdm_list)) { pr_err("%s: Voltage driver support not added\n", __func__); return -EINVAL; } list_for_each_entry(voltdm, &voltdm_list, node) { struct clk *sys_ck; if (!voltdm->scalable) continue; sys_ck = clk_get(NULL, voltdm->sys_clk.name); if (IS_ERR(sys_ck)) { pr_warn("%s: Could not get sys clk.\n", __func__); return -EINVAL; } voltdm->sys_clk.rate = clk_get_rate(sys_ck); WARN_ON(!voltdm->sys_clk.rate); clk_put(sys_ck); if (voltdm->vc) { voltdm->scale = omap_vc_bypass_scale; omap_vc_init_channel(voltdm); } if (voltdm->vp) { voltdm->scale = omap_vp_forceupdate_scale; omap_vp_init(voltdm); } } return 0; } static struct voltagedomain *_voltdm_lookup(const char *name) { struct voltagedomain *voltdm, *temp_voltdm; voltdm = NULL; list_for_each_entry(temp_voltdm, &voltdm_list, node) { if (!strcmp(name, temp_voltdm->name)) { voltdm = temp_voltdm; break; } } return voltdm; } static int _voltdm_register(struct voltagedomain *voltdm) { if (!voltdm || !voltdm->name) return -EINVAL; list_add(&voltdm->node, &voltdm_list); pr_debug("voltagedomain: registered %s\n", voltdm->name); return 0; } /** * voltdm_lookup - look up a voltagedomain by name, return a pointer * @name: name of voltagedomain * * Find a registered voltagedomain by its name @name. Returns a pointer * to the struct voltagedomain if found, or NULL otherwise. */ struct voltagedomain *voltdm_lookup(const char *name) { struct voltagedomain *voltdm ; if (!name) return NULL; voltdm = _voltdm_lookup(name); return voltdm; } /** * voltdm_init - set up the voltagedomain layer * @voltdm_list: array of struct voltagedomain pointers to register * * Loop through the array of voltagedomains @voltdm_list, registering all * that are available on the current CPU. If voltdm_list is supplied * and not null, all of the referenced voltagedomains will be * registered. No return value. */ void voltdm_init(struct voltagedomain **voltdms) { struct voltagedomain **v; if (voltdms) { for (v = voltdms; *v; v++) _voltdm_register(*v); } }
linux-master
arch/arm/mach-omap2/voltage.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2+ common Power & Reset Management (PRM) IP block functions * * Copyright (C) 2011 Texas Instruments, Inc. * Tero Kristo <[email protected]> * * For historical purposes, the API used to configure the PRM * interrupt handler refers to it as the "PRCM interrupt." The * underlying registers are located in the PRM on OMAP3/4. * * XXX This code should eventually be moved to a PRM driver. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk-provider.h> #include <linux/clk/ti.h> #include "soc.h" #include "prm2xxx_3xxx.h" #include "prm2xxx.h" #include "prm3xxx.h" #include "prm33xx.h" #include "prm44xx.h" #include "prm54xx.h" #include "prm7xx.h" #include "prcm43xx.h" #include "common.h" #include "clock.h" #include "cm.h" #include "control.h" /* * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs * XXX this is technically not needed, since * omap_prcm_register_chain_handler() could allocate this based on the * actual amount of memory needed for the SoC */ #define OMAP_PRCM_MAX_NR_PENDING_REG 2 /* * prcm_irq_chips: an array of all of the "generic IRQ chips" in use * by the PRCM interrupt handler code. There will be one 'chip' per * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have * one "chip" and OMAP4 will have two.) */ static struct irq_chip_generic **prcm_irq_chips; /* * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code * is currently running on. Defined and passed by initialization code * that calls omap_prcm_register_chain_handler(). */ static struct omap_prcm_irq_setup *prcm_irq_setup; /* prm_base: base virtual address of the PRM IP block */ struct omap_domain_base prm_base; u16 prm_features; /* * prm_ll_data: function pointers to SoC-specific implementations of * common PRM functions */ static struct prm_ll_data null_prm_ll_data; static struct prm_ll_data *prm_ll_data = &null_prm_ll_data; /* Private functions */ /* * Move priority events from events to priority_events array */ static void omap_prcm_events_filter_priority(unsigned long *events, unsigned long *priority_events) { int i; for (i = 0; i < prcm_irq_setup->nr_regs; i++) { priority_events[i] = events[i] & prcm_irq_setup->priority_mask[i]; events[i] ^= priority_events[i]; } } /* * PRCM Interrupt Handler * * This is a common handler for the OMAP PRCM interrupts. Pending * interrupts are detected by a call to prcm_pending_events and * dispatched accordingly. Clearing of the wakeup events should be * done by the SoC specific individual handlers. */ static void omap_prcm_irq_handler(struct irq_desc *desc) { unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int virtirq; int nr_irq = prcm_irq_setup->nr_regs * 32; /* * If we are suspended, mask all interrupts from PRCM level, * this does not ack them, and they will be pending until we * re-enable the interrupts, at which point the * omap_prcm_irq_handler will be executed again. The * _save_and_clear_irqen() function must ensure that the PRM * write to disable all IRQs has reached the PRM before * returning, or spurious PRCM interrupts may occur during * suspend. */ if (prcm_irq_setup->suspended) { prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask); prcm_irq_setup->suspend_save_flag = true; } /* * Loop until all pending irqs are handled, since * generic_handle_irq() can cause new irqs to come */ while (!prcm_irq_setup->suspended) { prcm_irq_setup->read_pending_irqs(pending); /* No bit set, then all IRQs are handled */ if (find_first_bit(pending, nr_irq) >= nr_irq) break; omap_prcm_events_filter_priority(pending, priority_pending); /* * Loop on all currently pending irqs so that new irqs * cannot starve previously pending irqs */ /* Serve priority events first */ for_each_set_bit(virtirq, priority_pending, nr_irq) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); /* Serve normal events next */ for_each_set_bit(virtirq, pending, nr_irq) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); } if (chip->irq_ack) chip->irq_ack(&desc->irq_data); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); chip->irq_unmask(&desc->irq_data); prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */ } /* Public functions */ /** * omap_prcm_event_to_irq - given a PRCM event name, returns the * corresponding IRQ on which the handler should be registered * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq * * Returns the Linux internal IRQ ID corresponding to @name upon success, * or -ENOENT upon failure. */ int omap_prcm_event_to_irq(const char *name) { int i; if (!prcm_irq_setup || !name) return -ENOENT; for (i = 0; i < prcm_irq_setup->nr_irqs; i++) if (!strcmp(prcm_irq_setup->irqs[i].name, name)) return prcm_irq_setup->base_irq + prcm_irq_setup->irqs[i].offset; return -ENOENT; } /** * omap_prcm_irq_cleanup - reverses memory allocated and other steps * done by omap_prcm_register_chain_handler() * * No return value. */ static void omap_prcm_irq_cleanup(void) { unsigned int irq; int i; if (!prcm_irq_setup) { pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n"); return; } if (prcm_irq_chips) { for (i = 0; i < prcm_irq_setup->nr_regs; i++) { if (prcm_irq_chips[i]) irq_remove_generic_chip(prcm_irq_chips[i], 0xffffffff, 0, 0); prcm_irq_chips[i] = NULL; } kfree(prcm_irq_chips); prcm_irq_chips = NULL; } kfree(prcm_irq_setup->saved_mask); prcm_irq_setup->saved_mask = NULL; kfree(prcm_irq_setup->priority_mask); prcm_irq_setup->priority_mask = NULL; irq = prcm_irq_setup->irq; irq_set_chained_handler(irq, NULL); if (prcm_irq_setup->base_irq > 0) irq_free_descs(prcm_irq_setup->base_irq, prcm_irq_setup->nr_regs * 32); prcm_irq_setup->base_irq = 0; } void omap_prcm_irq_prepare(void) { prcm_irq_setup->suspended = true; } void omap_prcm_irq_complete(void) { prcm_irq_setup->suspended = false; /* If we have not saved the masks, do not attempt to restore */ if (!prcm_irq_setup->suspend_save_flag) return; prcm_irq_setup->suspend_save_flag = false; /* * Re-enable all masked PRCM irq sources, this causes the PRCM * interrupt to fire immediately if the events were masked * previously in the chain handler */ prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask); } /** * omap_prcm_register_chain_handler - initializes the prcm chained interrupt * handler based on provided parameters * @irq_setup: hardware data about the underlying PRM/PRCM * * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up * one generic IRQ chip per PRM interrupt status/enable register pair. * Returns 0 upon success, -EINVAL if called twice or if invalid * arguments are passed, or -ENOMEM on any other error. */ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) { int nr_regs; u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG]; int offset, i, irq; struct irq_chip_generic *gc; struct irq_chip_type *ct; if (!irq_setup) return -EINVAL; nr_regs = irq_setup->nr_regs; if (prcm_irq_setup) { pr_err("PRCM: already initialized; won't reinitialize\n"); return -EINVAL; } if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) { pr_err("PRCM: nr_regs too large\n"); return -EINVAL; } prcm_irq_setup = irq_setup; prcm_irq_chips = kcalloc(nr_regs, sizeof(void *), GFP_KERNEL); prcm_irq_setup->saved_mask = kcalloc(nr_regs, sizeof(u32), GFP_KERNEL); prcm_irq_setup->priority_mask = kcalloc(nr_regs, sizeof(u32), GFP_KERNEL); if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || !prcm_irq_setup->priority_mask) goto err; memset(mask, 0, sizeof(mask)); for (i = 0; i < irq_setup->nr_irqs; i++) { offset = irq_setup->irqs[i].offset; mask[offset >> 5] |= 1 << (offset & 0x1f); if (irq_setup->irqs[i].priority) irq_setup->priority_mask[offset >> 5] |= 1 << (offset & 0x1f); } irq = irq_setup->irq; irq_set_chained_handler(irq, omap_prcm_irq_handler); irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 0); if (irq_setup->base_irq < 0) { pr_err("PRCM: failed to allocate irq descs: %d\n", irq_setup->base_irq); goto err; } for (i = 0; i < irq_setup->nr_regs; i++) { gc = irq_alloc_generic_chip("PRCM", 1, irq_setup->base_irq + i * 32, prm_base.va, handle_level_irq); if (!gc) { pr_err("PRCM: failed to allocate generic chip\n"); goto err; } ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->regs.ack = irq_setup->ack + i * 4; ct->regs.mask = irq_setup->mask + i * 4; irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0); prcm_irq_chips[i] = gc; } irq = omap_prcm_event_to_irq("io"); omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain); return 0; err: omap_prcm_irq_cleanup(); return -ENOMEM; } /** * prm_was_any_context_lost_old - was device context lost? (old API) * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Return 1 if any bits were set in the *_CONTEXT_* register * identified by (@part, @inst, @idx), which means that some context * was lost for that module; otherwise, return 0. XXX Deprecated; * callers need to use a less-SoC-dependent way to identify hardware * IP blocks. */ bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) { bool ret = true; if (prm_ll_data->was_any_context_lost_old) ret = prm_ll_data->was_any_context_lost_old(part, inst, idx); else WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return ret; } /** * prm_clear_context_lost_flags_old - clear context loss flags (old API) * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Clear hardware context loss bits for the module identified by * (@part, @inst, @idx). No return value. XXX Deprecated; callers * need to use a less-SoC-dependent way to identify hardware IP * blocks. */ void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) { if (prm_ll_data->clear_context_loss_flags_old) prm_ll_data->clear_context_loss_flags_old(part, inst, idx); else WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); } /** * omap_prm_assert_hardreset - assert hardreset for an IP block * @shift: register bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * * Asserts a hardware reset line for an IP block. */ int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset) { if (!prm_ll_data->assert_hardreset) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset); } /** * omap_prm_deassert_hardreset - deassert hardreset for an IP block * @shift: register bit shift corresponding to the reset line * @st_shift: reset status bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * @st_offset: status register offset * * Deasserts a hardware reset line for an IP block. */ int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod, u16 offset, u16 st_offset) { if (!prm_ll_data->deassert_hardreset) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod, offset, st_offset); } /** * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block * @shift: register bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * * Checks if a hardware reset line for an IP block is enabled or not. */ int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset) { if (!prm_ll_data->is_hardreset_asserted) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset); } /** * omap_prm_reset_system - trigger global SW reset * * Triggers SoC specific global warm reset to reboot the device. */ void omap_prm_reset_system(void) { if (!prm_ll_data->reset_system) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return; } prm_ll_data->reset_system(); while (1) { cpu_relax(); wfe(); } } /** * omap_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt * @module: PRM module to clear wakeups from * @regs: register to clear * @wkst_mask: wkst bits to clear * * Clears any wakeup events for the module and register set defined. * Uses SoC specific implementation to do the actual wakeup status * clearing. */ int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask) { if (!prm_ll_data->clear_mod_irqs) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->clear_mod_irqs(module, regs, wkst_mask); } /** * omap_prm_vp_check_txdone - check voltage processor TX done status * * Checks if voltage processor transmission has been completed. * Returns non-zero if a transmission has completed, 0 otherwise. */ u32 omap_prm_vp_check_txdone(u8 vp_id) { if (!prm_ll_data->vp_check_txdone) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return 0; } return prm_ll_data->vp_check_txdone(vp_id); } /** * omap_prm_vp_clear_txdone - clears voltage processor TX done status * * Clears the status bit for completed voltage processor transmission * returned by prm_vp_check_txdone. */ void omap_prm_vp_clear_txdone(u8 vp_id) { if (!prm_ll_data->vp_clear_txdone) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return; } prm_ll_data->vp_clear_txdone(vp_id); } /** * prm_register - register per-SoC low-level data with the PRM * @pld: low-level per-SoC OMAP PRM data & function pointers to register * * Register per-SoC low-level OMAP PRM data and function pointers with * the OMAP PRM common interface. The caller must keep the data * pointed to by @pld valid until it calls prm_unregister() and * it returns successfully. Returns 0 upon success, -EINVAL if @pld * is NULL, or -EEXIST if prm_register() has already been called * without an intervening prm_unregister(). */ int prm_register(struct prm_ll_data *pld) { if (!pld) return -EINVAL; if (prm_ll_data != &null_prm_ll_data) return -EEXIST; prm_ll_data = pld; return 0; } /** * prm_unregister - unregister per-SoC low-level data & function pointers * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister * * Unregister per-SoC low-level OMAP PRM data and function pointers * that were previously registered with prm_register(). The * caller may not destroy any of the data pointed to by @pld until * this function returns successfully. Returns 0 upon success, or * -EINVAL if @pld is NULL or if @pld does not match the struct * prm_ll_data * previously registered by prm_register(). */ int prm_unregister(struct prm_ll_data *pld) { if (!pld || prm_ll_data != pld) return -EINVAL; prm_ll_data = &null_prm_ll_data; return 0; } #ifdef CONFIG_ARCH_OMAP2 static struct omap_prcm_init_data omap2_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap2xxx_prm_init, }; #endif #ifdef CONFIG_ARCH_OMAP3 static struct omap_prcm_init_data omap3_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap3xxx_prm_init, /* * IVA2 offset is a negative value, must offset the prm_base * address by this to get it to positive */ .offset = -OMAP3430_IVA2_MOD, }; #endif #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX) static struct omap_prcm_init_data am3_prm_data __initdata = { .index = TI_CLKM_PRM, .init = am33xx_prm_init, }; #endif #ifdef CONFIG_SOC_TI81XX static struct omap_prcm_init_data dm814_pllss_data __initdata = { .index = TI_CLKM_PLLSS, .init = am33xx_prm_init, }; #endif #ifdef CONFIG_ARCH_OMAP4 static struct omap_prcm_init_data omap4_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = OMAP4430_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE, }; #endif #ifdef CONFIG_SOC_OMAP5 static struct omap_prcm_init_data omap5_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = OMAP54XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE, }; #endif #ifdef CONFIG_SOC_DRA7XX static struct omap_prcm_init_data dra7_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = DRA7XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP, }; #endif #ifdef CONFIG_SOC_AM43XX static struct omap_prcm_init_data am4_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = AM43XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP, }; #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) static struct omap_prcm_init_data scrm_data __initdata = { .index = TI_CLKM_SCRM, }; #endif static const struct of_device_id omap_prcm_dt_match_table[] __initconst = { #ifdef CONFIG_SOC_AM33XX { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, #endif #ifdef CONFIG_SOC_AM43XX { .compatible = "ti,am4-prcm", .data = &am4_prm_data }, #endif #ifdef CONFIG_SOC_TI81XX { .compatible = "ti,dm814-prcm", .data = &am3_prm_data }, { .compatible = "ti,dm814-pllss", .data = &dm814_pllss_data }, { .compatible = "ti,dm816-prcm", .data = &am3_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP2 { .compatible = "ti,omap2-prcm", .data = &omap2_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP3 { .compatible = "ti,omap3-prm", .data = &omap3_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP4 { .compatible = "ti,omap4-prm", .data = &omap4_prm_data }, { .compatible = "ti,omap4-scrm", .data = &scrm_data }, #endif #ifdef CONFIG_SOC_OMAP5 { .compatible = "ti,omap5-prm", .data = &omap5_prm_data }, { .compatible = "ti,omap5-scrm", .data = &scrm_data }, #endif #ifdef CONFIG_SOC_DRA7XX { .compatible = "ti,dra7-prm", .data = &dra7_prm_data }, #endif { } }; /** * omap2_prm_base_init - initialize iomappings for the PRM driver * * Detects and initializes the iomappings for the PRM driver, based * on the DT data. Returns 0 in success, negative error value * otherwise. */ static int __init omap2_prm_base_init(void) { struct device_node *np; const struct of_device_id *match; struct omap_prcm_init_data *data; struct resource res; int ret; for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) { data = (struct omap_prcm_init_data *)match->data; ret = of_address_to_resource(np, 0, &res); if (ret) { of_node_put(np); return ret; } data->mem = ioremap(res.start, resource_size(&res)); if (data->index == TI_CLKM_PRM) { prm_base.va = data->mem + data->offset; prm_base.pa = res.start + data->offset; } data->np = np; if (data->init) data->init(data); } return 0; } int __init omap2_prcm_base_init(void) { int ret; ret = omap2_prm_base_init(); if (ret) return ret; return omap2_cm_base_init(); } /** * omap_prcm_init - low level init for the PRCM drivers * * Initializes the low level clock infrastructure for PRCM drivers. * Returns 0 in success, negative error value in failure. */ int __init omap_prcm_init(void) { struct device_node *np; const struct of_device_id *match; const struct omap_prcm_init_data *data; int ret; for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) { data = match->data; ret = omap2_clk_provider_init(np, data->index, NULL, data->mem); if (ret) { of_node_put(np); return ret; } } omap_cm_init(); return 0; } static int __init prm_late_init(void) { if (prm_ll_data->late_init) return prm_ll_data->late_init(); return 0; } subsys_initcall(prm_late_init);
linux-master
arch/arm/mach-omap2/prm_common.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2/3 System Control Module register access * * Copyright (C) 2007, 2012 Texas Instruments, Inc. * Copyright (C) 2007 Nokia Corporation * * Written by Paul Walmsley */ #undef DEBUG #include <linux/kernel.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/cpu_pm.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" #include "prm3xxx.h" #include "cm3xxx.h" #include "sdrc.h" #include "pm.h" #include "control.h" #include "clock.h" /* Used by omap3_ctrl_save_padconf() */ #define START_PADCONF_SAVE 0x2 #define PADCONF_SAVE_DONE 0x1 static void __iomem *omap2_ctrl_base; static s16 omap2_ctrl_offset; #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) struct omap3_scratchpad { u32 boot_config_ptr; u32 public_restore_ptr; u32 secure_ram_restore_ptr; u32 sdrc_module_semaphore; u32 prcm_block_offset; u32 sdrc_block_offset; }; struct omap3_scratchpad_prcm_block { u32 prm_contents[2]; u32 cm_contents[11]; u32 prcm_block_size; }; struct omap3_scratchpad_sdrc_block { u16 sysconfig; u16 cs_cfg; u16 sharing; u16 err_type; u32 dll_a_ctrl; u32 dll_b_ctrl; u32 power; u32 cs_0; u32 mcfg_0; u16 mr_0; u16 emr_1_0; u16 emr_2_0; u16 emr_3_0; u32 actim_ctrla_0; u32 actim_ctrlb_0; u32 rfr_ctrl_0; u32 cs_1; u32 mcfg_1; u16 mr_1; u16 emr_1_1; u16 emr_2_1; u16 emr_3_1; u32 actim_ctrla_1; u32 actim_ctrlb_1; u32 rfr_ctrl_1; u16 dcdl_1_ctrl; u16 dcdl_2_ctrl; u32 flags; u32 block_size; }; void *omap3_secure_ram_storage; /* * This is used to store ARM registers in SDRAM before attempting * an MPU OFF. The save and restore happens from the SRAM sleep code. * The address is stored in scratchpad, so that it can be used * during the restore path. */ u32 omap3_arm_context[128]; struct omap3_control_regs { u32 sysconfig; u32 devconf0; u32 mem_dftrw0; u32 mem_dftrw1; u32 msuspendmux_0; u32 msuspendmux_1; u32 msuspendmux_2; u32 msuspendmux_3; u32 msuspendmux_4; u32 msuspendmux_5; u32 sec_ctrl; u32 devconf1; u32 csirxfe; u32 iva2_bootaddr; u32 iva2_bootmod; u32 wkup_ctrl; u32 debobs_0; u32 debobs_1; u32 debobs_2; u32 debobs_3; u32 debobs_4; u32 debobs_5; u32 debobs_6; u32 debobs_7; u32 debobs_8; u32 prog_io0; u32 prog_io1; u32 dss_dpll_spreading; u32 core_dpll_spreading; u32 per_dpll_spreading; u32 usbhost_dpll_spreading; u32 pbias_lite; u32 temp_sensor; u32 sramldo4; u32 sramldo5; u32 csi; u32 padconf_sys_nirq; }; static struct omap3_control_regs control_context; #endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */ u8 omap_ctrl_readb(u16 offset) { u32 val; u8 byte_offset = offset & 0x3; val = omap_ctrl_readl(offset); return (val >> (byte_offset * 8)) & 0xff; } u16 omap_ctrl_readw(u16 offset) { u32 val; u16 byte_offset = offset & 0x2; val = omap_ctrl_readl(offset); return (val >> (byte_offset * 8)) & 0xffff; } u32 omap_ctrl_readl(u16 offset) { offset &= 0xfffc; return readl_relaxed(omap2_ctrl_base + offset); } void omap_ctrl_writeb(u8 val, u16 offset) { u32 tmp; u8 byte_offset = offset & 0x3; tmp = omap_ctrl_readl(offset); tmp &= 0xffffffff ^ (0xff << (byte_offset * 8)); tmp |= val << (byte_offset * 8); omap_ctrl_writel(tmp, offset); } void omap_ctrl_writew(u16 val, u16 offset) { u32 tmp; u8 byte_offset = offset & 0x2; tmp = omap_ctrl_readl(offset); tmp &= 0xffffffff ^ (0xffff << (byte_offset * 8)); tmp |= val << (byte_offset * 8); omap_ctrl_writel(tmp, offset); } void omap_ctrl_writel(u32 val, u16 offset) { offset &= 0xfffc; writel_relaxed(val, omap2_ctrl_base + offset); } #ifdef CONFIG_ARCH_OMAP3 /** * omap3_ctrl_write_boot_mode - set scratchpad boot mode for the next boot * @bootmode: 8-bit value to pass to some boot code * * Set the bootmode in the scratchpad RAM. This is used after the * system restarts. Not sure what actually uses this - it may be the * bootloader, rather than the boot ROM - contrary to the preserved * comment below. No return value. */ void omap3_ctrl_write_boot_mode(u8 bootmode) { u32 l; l = ('B' << 24) | ('M' << 16) | bootmode; /* * Reserve the first word in scratchpad for communicating * with the boot ROM. A pointer to a data structure * describing the boot process can be stored there, * cf. OMAP34xx TRM, Initialization / Software Booting * Configuration. * * XXX This should use some omap_ctrl_writel()-type function */ writel_relaxed(l, OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD + 4)); } #endif #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) /* Populate the scratchpad structure with restore structure */ void omap3_save_scratchpad_contents(void) { void __iomem *scratchpad_address; u32 arm_context_addr; struct omap3_scratchpad scratchpad_contents; struct omap3_scratchpad_prcm_block prcm_block_contents; struct omap3_scratchpad_sdrc_block sdrc_block_contents; /* * Populate the Scratchpad contents * * The "get_*restore_pointer" functions are used to provide a * physical restore address where the ROM code jumps while waking * up from MPU OFF/OSWR state. * The restore pointer is stored into the scratchpad. */ scratchpad_contents.boot_config_ptr = 0x0; if (cpu_is_omap3630()) scratchpad_contents.public_restore_ptr = __pa_symbol(omap3_restore_3630); else if (omap_rev() != OMAP3430_REV_ES3_0 && omap_rev() != OMAP3430_REV_ES3_1 && omap_rev() != OMAP3430_REV_ES3_1_2) scratchpad_contents.public_restore_ptr = __pa_symbol(omap3_restore); else scratchpad_contents.public_restore_ptr = __pa_symbol(omap3_restore_es3); if (omap_type() == OMAP2_DEVICE_TYPE_GP) scratchpad_contents.secure_ram_restore_ptr = 0x0; else scratchpad_contents.secure_ram_restore_ptr = (u32) __pa(omap3_secure_ram_storage); scratchpad_contents.sdrc_module_semaphore = 0x0; scratchpad_contents.prcm_block_offset = 0x2C; scratchpad_contents.sdrc_block_offset = 0x64; /* Populate the PRCM block contents */ omap3_prm_save_scratchpad_contents(prcm_block_contents.prm_contents); omap3_cm_save_scratchpad_contents(prcm_block_contents.cm_contents); prcm_block_contents.prcm_block_size = 0x0; /* Populate the SDRC block contents */ sdrc_block_contents.sysconfig = (sdrc_read_reg(SDRC_SYSCONFIG) & 0xFFFF); sdrc_block_contents.cs_cfg = (sdrc_read_reg(SDRC_CS_CFG) & 0xFFFF); sdrc_block_contents.sharing = (sdrc_read_reg(SDRC_SHARING) & 0xFFFF); sdrc_block_contents.err_type = (sdrc_read_reg(SDRC_ERR_TYPE) & 0xFFFF); sdrc_block_contents.dll_a_ctrl = sdrc_read_reg(SDRC_DLLA_CTRL); sdrc_block_contents.dll_b_ctrl = 0x0; /* * Due to a OMAP3 errata (1.142), on EMU/HS devices SRDC should * be programed to issue automatic self refresh on timeout * of AUTO_CNT = 1 prior to any transition to OFF mode. */ if ((omap_type() != OMAP2_DEVICE_TYPE_GP) && (omap_rev() >= OMAP3430_REV_ES3_0)) sdrc_block_contents.power = (sdrc_read_reg(SDRC_POWER) & ~(SDRC_POWER_AUTOCOUNT_MASK| SDRC_POWER_CLKCTRL_MASK)) | (1 << SDRC_POWER_AUTOCOUNT_SHIFT) | SDRC_SELF_REFRESH_ON_AUTOCOUNT; else sdrc_block_contents.power = sdrc_read_reg(SDRC_POWER); sdrc_block_contents.cs_0 = 0x0; sdrc_block_contents.mcfg_0 = sdrc_read_reg(SDRC_MCFG_0); sdrc_block_contents.mr_0 = (sdrc_read_reg(SDRC_MR_0) & 0xFFFF); sdrc_block_contents.emr_1_0 = 0x0; sdrc_block_contents.emr_2_0 = 0x0; sdrc_block_contents.emr_3_0 = 0x0; sdrc_block_contents.actim_ctrla_0 = sdrc_read_reg(SDRC_ACTIM_CTRL_A_0); sdrc_block_contents.actim_ctrlb_0 = sdrc_read_reg(SDRC_ACTIM_CTRL_B_0); sdrc_block_contents.rfr_ctrl_0 = sdrc_read_reg(SDRC_RFR_CTRL_0); sdrc_block_contents.cs_1 = 0x0; sdrc_block_contents.mcfg_1 = sdrc_read_reg(SDRC_MCFG_1); sdrc_block_contents.mr_1 = sdrc_read_reg(SDRC_MR_1) & 0xFFFF; sdrc_block_contents.emr_1_1 = 0x0; sdrc_block_contents.emr_2_1 = 0x0; sdrc_block_contents.emr_3_1 = 0x0; sdrc_block_contents.actim_ctrla_1 = sdrc_read_reg(SDRC_ACTIM_CTRL_A_1); sdrc_block_contents.actim_ctrlb_1 = sdrc_read_reg(SDRC_ACTIM_CTRL_B_1); sdrc_block_contents.rfr_ctrl_1 = sdrc_read_reg(SDRC_RFR_CTRL_1); sdrc_block_contents.dcdl_1_ctrl = 0x0; sdrc_block_contents.dcdl_2_ctrl = 0x0; sdrc_block_contents.flags = 0x0; sdrc_block_contents.block_size = 0x0; arm_context_addr = __pa_symbol(omap3_arm_context); /* Copy all the contents to the scratchpad location */ scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); memcpy_toio(scratchpad_address, &scratchpad_contents, sizeof(scratchpad_contents)); /* Scratchpad contents being 32 bits, a divide by 4 done here */ memcpy_toio(scratchpad_address + scratchpad_contents.prcm_block_offset, &prcm_block_contents, sizeof(prcm_block_contents)); memcpy_toio(scratchpad_address + scratchpad_contents.sdrc_block_offset, &sdrc_block_contents, sizeof(sdrc_block_contents)); /* * Copies the address of the location in SDRAM where ARM * registers get saved during a MPU OFF transition. */ memcpy_toio(scratchpad_address + scratchpad_contents.sdrc_block_offset + sizeof(sdrc_block_contents), &arm_context_addr, 4); } void omap3_control_save_context(void) { control_context.sysconfig = omap_ctrl_readl(OMAP2_CONTROL_SYSCONFIG); control_context.devconf0 = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); control_context.mem_dftrw0 = omap_ctrl_readl(OMAP343X_CONTROL_MEM_DFTRW0); control_context.mem_dftrw1 = omap_ctrl_readl(OMAP343X_CONTROL_MEM_DFTRW1); control_context.msuspendmux_0 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_0); control_context.msuspendmux_1 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_1); control_context.msuspendmux_2 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_2); control_context.msuspendmux_3 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_3); control_context.msuspendmux_4 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_4); control_context.msuspendmux_5 = omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_5); control_context.sec_ctrl = omap_ctrl_readl(OMAP2_CONTROL_SEC_CTRL); control_context.devconf1 = omap_ctrl_readl(OMAP343X_CONTROL_DEVCONF1); control_context.csirxfe = omap_ctrl_readl(OMAP343X_CONTROL_CSIRXFE); control_context.iva2_bootaddr = omap_ctrl_readl(OMAP343X_CONTROL_IVA2_BOOTADDR); control_context.iva2_bootmod = omap_ctrl_readl(OMAP343X_CONTROL_IVA2_BOOTMOD); control_context.wkup_ctrl = omap_ctrl_readl(OMAP34XX_CONTROL_WKUP_CTRL); control_context.debobs_0 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(0)); control_context.debobs_1 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(1)); control_context.debobs_2 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(2)); control_context.debobs_3 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(3)); control_context.debobs_4 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(4)); control_context.debobs_5 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(5)); control_context.debobs_6 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(6)); control_context.debobs_7 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(7)); control_context.debobs_8 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(8)); control_context.prog_io0 = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO0); control_context.prog_io1 = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1); control_context.dss_dpll_spreading = omap_ctrl_readl(OMAP343X_CONTROL_DSS_DPLL_SPREADING); control_context.core_dpll_spreading = omap_ctrl_readl(OMAP343X_CONTROL_CORE_DPLL_SPREADING); control_context.per_dpll_spreading = omap_ctrl_readl(OMAP343X_CONTROL_PER_DPLL_SPREADING); control_context.usbhost_dpll_spreading = omap_ctrl_readl(OMAP343X_CONTROL_USBHOST_DPLL_SPREADING); control_context.pbias_lite = omap_ctrl_readl(OMAP343X_CONTROL_PBIAS_LITE); control_context.temp_sensor = omap_ctrl_readl(OMAP343X_CONTROL_TEMP_SENSOR); control_context.sramldo4 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO4); control_context.sramldo5 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO5); control_context.csi = omap_ctrl_readl(OMAP343X_CONTROL_CSI); control_context.padconf_sys_nirq = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_SYSNIRQ); } void omap3_control_restore_context(void) { omap_ctrl_writel(control_context.sysconfig, OMAP2_CONTROL_SYSCONFIG); omap_ctrl_writel(control_context.devconf0, OMAP2_CONTROL_DEVCONF0); omap_ctrl_writel(control_context.mem_dftrw0, OMAP343X_CONTROL_MEM_DFTRW0); omap_ctrl_writel(control_context.mem_dftrw1, OMAP343X_CONTROL_MEM_DFTRW1); omap_ctrl_writel(control_context.msuspendmux_0, OMAP2_CONTROL_MSUSPENDMUX_0); omap_ctrl_writel(control_context.msuspendmux_1, OMAP2_CONTROL_MSUSPENDMUX_1); omap_ctrl_writel(control_context.msuspendmux_2, OMAP2_CONTROL_MSUSPENDMUX_2); omap_ctrl_writel(control_context.msuspendmux_3, OMAP2_CONTROL_MSUSPENDMUX_3); omap_ctrl_writel(control_context.msuspendmux_4, OMAP2_CONTROL_MSUSPENDMUX_4); omap_ctrl_writel(control_context.msuspendmux_5, OMAP2_CONTROL_MSUSPENDMUX_5); omap_ctrl_writel(control_context.sec_ctrl, OMAP2_CONTROL_SEC_CTRL); omap_ctrl_writel(control_context.devconf1, OMAP343X_CONTROL_DEVCONF1); omap_ctrl_writel(control_context.csirxfe, OMAP343X_CONTROL_CSIRXFE); omap_ctrl_writel(control_context.iva2_bootaddr, OMAP343X_CONTROL_IVA2_BOOTADDR); omap_ctrl_writel(control_context.iva2_bootmod, OMAP343X_CONTROL_IVA2_BOOTMOD); omap_ctrl_writel(control_context.wkup_ctrl, OMAP34XX_CONTROL_WKUP_CTRL); omap_ctrl_writel(control_context.debobs_0, OMAP343X_CONTROL_DEBOBS(0)); omap_ctrl_writel(control_context.debobs_1, OMAP343X_CONTROL_DEBOBS(1)); omap_ctrl_writel(control_context.debobs_2, OMAP343X_CONTROL_DEBOBS(2)); omap_ctrl_writel(control_context.debobs_3, OMAP343X_CONTROL_DEBOBS(3)); omap_ctrl_writel(control_context.debobs_4, OMAP343X_CONTROL_DEBOBS(4)); omap_ctrl_writel(control_context.debobs_5, OMAP343X_CONTROL_DEBOBS(5)); omap_ctrl_writel(control_context.debobs_6, OMAP343X_CONTROL_DEBOBS(6)); omap_ctrl_writel(control_context.debobs_7, OMAP343X_CONTROL_DEBOBS(7)); omap_ctrl_writel(control_context.debobs_8, OMAP343X_CONTROL_DEBOBS(8)); omap_ctrl_writel(control_context.prog_io0, OMAP343X_CONTROL_PROG_IO0); omap_ctrl_writel(control_context.prog_io1, OMAP343X_CONTROL_PROG_IO1); omap_ctrl_writel(control_context.dss_dpll_spreading, OMAP343X_CONTROL_DSS_DPLL_SPREADING); omap_ctrl_writel(control_context.core_dpll_spreading, OMAP343X_CONTROL_CORE_DPLL_SPREADING); omap_ctrl_writel(control_context.per_dpll_spreading, OMAP343X_CONTROL_PER_DPLL_SPREADING); omap_ctrl_writel(control_context.usbhost_dpll_spreading, OMAP343X_CONTROL_USBHOST_DPLL_SPREADING); omap_ctrl_writel(control_context.pbias_lite, OMAP343X_CONTROL_PBIAS_LITE); omap_ctrl_writel(control_context.temp_sensor, OMAP343X_CONTROL_TEMP_SENSOR); omap_ctrl_writel(control_context.sramldo4, OMAP343X_CONTROL_SRAMLDO4); omap_ctrl_writel(control_context.sramldo5, OMAP343X_CONTROL_SRAMLDO5); omap_ctrl_writel(control_context.csi, OMAP343X_CONTROL_CSI); omap_ctrl_writel(control_context.padconf_sys_nirq, OMAP343X_CONTROL_PADCONF_SYSNIRQ); } void omap3630_ctrl_disable_rta(void) { if (!cpu_is_omap3630()) return; omap_ctrl_writel(OMAP36XX_RTA_DISABLE, OMAP36XX_CONTROL_MEM_RTA_CTRL); } /** * omap3_ctrl_save_padconf - save padconf registers to scratchpad RAM * * Tell the SCM to start saving the padconf registers, then wait for * the process to complete. Returns 0 unconditionally, although it * should also eventually be able to return -ETIMEDOUT, if the save * does not complete. * * XXX This function is missing a timeout. What should it be? */ int omap3_ctrl_save_padconf(void) { u32 cpo; /* Save the padconf registers */ cpo = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF); cpo |= START_PADCONF_SAVE; omap_ctrl_writel(cpo, OMAP343X_CONTROL_PADCONF_OFF); /* wait for the save to complete */ while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) & PADCONF_SAVE_DONE)) udelay(1); return 0; } /** * omap3_ctrl_set_iva_bootmode_idle - sets the IVA2 bootmode to idle * * Sets the bootmode for IVA2 to idle. This is needed by the PM code to * force disable IVA2 so that it does not prevent any low-power states. */ static void __init omap3_ctrl_set_iva_bootmode_idle(void) { omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE, OMAP343X_CONTROL_IVA2_BOOTMOD); } /** * omap3_ctrl_setup_d2d_padconf - setup stacked modem pads for idle * * Sets up the pads controlling the stacked modem in such way that the * device can enter idle. */ static void __init omap3_ctrl_setup_d2d_padconf(void) { u16 mask, padconf; /* * In a stand alone OMAP3430 where there is not a stacked * modem for the D2D Idle Ack and D2D MStandby must be pulled * high. S CONTROL_PADCONF_SAD2D_IDLEACK and * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */ mask = (1 << 4) | (1 << 3); /* pull-up, enabled */ padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY); padconf |= mask; omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY); padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK); padconf |= mask; omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK); } /** * omap3_ctrl_init - does static initializations for control module * * Initializes system control module. This sets up the sysconfig autoidle, * and sets up modem and iva2 so that they can be idled properly. */ void __init omap3_ctrl_init(void) { omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG); omap3_ctrl_set_iva_bootmode_idle(); omap3_ctrl_setup_d2d_padconf(); } #endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */ static unsigned long am43xx_control_reg_offsets[] = { AM33XX_CONTROL_SYSCONFIG_OFFSET, AM33XX_CONTROL_STATUS_OFFSET, AM43XX_CONTROL_MPU_L2_CTRL_OFFSET, AM33XX_CONTROL_CORE_SLDO_CTRL_OFFSET, AM33XX_CONTROL_MPU_SLDO_CTRL_OFFSET, AM33XX_CONTROL_CLK32KDIVRATIO_CTRL_OFFSET, AM33XX_CONTROL_BANDGAP_CTRL_OFFSET, AM33XX_CONTROL_BANDGAP_TRIM_OFFSET, AM33XX_CONTROL_PLL_CLKINPULOW_CTRL_OFFSET, AM33XX_CONTROL_MOSC_CTRL_OFFSET, AM33XX_CONTROL_DEEPSLEEP_CTRL_OFFSET, AM43XX_CONTROL_DISPLAY_PLL_SEL_OFFSET, AM33XX_CONTROL_INIT_PRIORITY_0_OFFSET, AM33XX_CONTROL_INIT_PRIORITY_1_OFFSET, AM33XX_CONTROL_TPTC_CFG_OFFSET, AM33XX_CONTROL_USB_CTRL0_OFFSET, AM33XX_CONTROL_USB_CTRL1_OFFSET, AM43XX_CONTROL_USB_CTRL2_OFFSET, AM43XX_CONTROL_GMII_SEL_OFFSET, AM43XX_CONTROL_MPUSS_CTRL_OFFSET, AM43XX_CONTROL_TIMER_CASCADE_CTRL_OFFSET, AM43XX_CONTROL_PWMSS_CTRL_OFFSET, AM33XX_CONTROL_MREQPRIO_0_OFFSET, AM33XX_CONTROL_MREQPRIO_1_OFFSET, AM33XX_CONTROL_HW_EVENT_SEL_GRP1_OFFSET, AM33XX_CONTROL_HW_EVENT_SEL_GRP2_OFFSET, AM33XX_CONTROL_HW_EVENT_SEL_GRP3_OFFSET, AM33XX_CONTROL_HW_EVENT_SEL_GRP4_OFFSET, AM33XX_CONTROL_SMRT_CTRL_OFFSET, AM33XX_CONTROL_MPUSS_HW_DEBUG_SEL_OFFSET, AM43XX_CONTROL_CQDETECT_STS_OFFSET, AM43XX_CONTROL_CQDETECT_STS2_OFFSET, AM43XX_CONTROL_VTP_CTRL_OFFSET, AM33XX_CONTROL_VREF_CTRL_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_0_3_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_4_7_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_8_11_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_12_15_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_16_19_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_20_23_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_24_27_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_28_31_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_32_35_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_36_39_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_40_43_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_44_47_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_48_51_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_52_55_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_56_59_OFFSET, AM33XX_CONTROL_TPCC_EVT_MUX_60_63_OFFSET, AM33XX_CONTROL_TIMER_EVT_CAPT_OFFSET, AM33XX_CONTROL_ECAP_EVT_CAPT_OFFSET, AM33XX_CONTROL_ADC_EVT_CAPT_OFFSET, AM43XX_CONTROL_ADC1_EVT_CAPT_OFFSET, AM33XX_CONTROL_RESET_ISO_OFFSET, }; static u32 am33xx_control_vals[ARRAY_SIZE(am43xx_control_reg_offsets)]; /** * am43xx_control_save_context - Save the wakeup domain registers * * Save the wkup domain registers */ static void am43xx_control_save_context(void) { int i; for (i = 0; i < ARRAY_SIZE(am43xx_control_reg_offsets); i++) am33xx_control_vals[i] = omap_ctrl_readl(am43xx_control_reg_offsets[i]); } /** * am43xx_control_restore_context - Restore the wakeup domain registers * * Restore the wkup domain registers */ static void am43xx_control_restore_context(void) { int i; for (i = 0; i < ARRAY_SIZE(am43xx_control_reg_offsets); i++) omap_ctrl_writel(am33xx_control_vals[i], am43xx_control_reg_offsets[i]); } static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (enable_off_mode) am43xx_control_save_context(); break; case CPU_CLUSTER_PM_EXIT: if (enable_off_mode) am43xx_control_restore_context(); break; } return NOTIFY_OK; } struct control_init_data { int index; void __iomem *mem; s16 offset; }; static struct control_init_data ctrl_data = { .index = TI_CLKM_CTRL, }; static const struct control_init_data omap2_ctrl_data = { .index = TI_CLKM_CTRL, .offset = -OMAP2_CONTROL_GENERAL, }; static const struct control_init_data ctrl_aux_data = { .index = TI_CLKM_CTRL_AUX, }; static const struct of_device_id omap_scrm_dt_match_table[] = { { .compatible = "ti,am3-scm", .data = &ctrl_data }, { .compatible = "ti,am4-scm", .data = &ctrl_data }, { .compatible = "ti,omap2-scm", .data = &omap2_ctrl_data }, { .compatible = "ti,omap3-scm", .data = &omap2_ctrl_data }, { .compatible = "ti,dm814-scm", .data = &ctrl_data }, { .compatible = "ti,dm816-scrm", .data = &ctrl_data }, { .compatible = "ti,omap4-scm-core", .data = &ctrl_data }, { .compatible = "ti,omap5-scm-core", .data = &ctrl_data }, { .compatible = "ti,omap5-scm-wkup-pad-conf", .data = &ctrl_aux_data }, { .compatible = "ti,dra7-scm-core", .data = &ctrl_data }, { } }; /** * omap2_control_base_init - initialize iomappings for the control driver * * Detects and initializes the iomappings for the control driver, based * on the DT data. Returns 0 in success, negative error value * otherwise. */ int __init omap2_control_base_init(void) { struct device_node *np; const struct of_device_id *match; struct control_init_data *data; void __iomem *mem; for_each_matching_node_and_match(np, omap_scrm_dt_match_table, &match) { data = (struct control_init_data *)match->data; mem = of_iomap(np, 0); if (!mem) { of_node_put(np); return -ENOMEM; } if (data->index == TI_CLKM_CTRL) { omap2_ctrl_base = mem; omap2_ctrl_offset = data->offset; } data->mem = mem; } return 0; } /** * omap_control_init - low level init for the control driver * * Initializes the low level clock infrastructure for control driver. * Returns 0 in success, negative error value in failure. */ int __init omap_control_init(void) { struct device_node *np, *scm_conf; const struct of_device_id *match; const struct omap_prcm_init_data *data; int ret; struct regmap *syscon; static struct notifier_block nb; for_each_matching_node_and_match(np, omap_scrm_dt_match_table, &match) { data = match->data; /* * Check if we have scm_conf node, if yes, use this to * access clock registers. */ scm_conf = of_get_child_by_name(np, "scm_conf"); if (scm_conf) { syscon = syscon_node_to_regmap(scm_conf); if (IS_ERR(syscon)) { ret = PTR_ERR(syscon); goto of_node_put; } if (of_get_child_by_name(scm_conf, "clocks")) { ret = omap2_clk_provider_init(scm_conf, data->index, syscon, NULL); if (ret) goto of_node_put; } } else { /* No scm_conf found, direct access */ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem); if (ret) goto of_node_put; } } /* Only AM43XX can lose ctrl registers context during rtc-ddr suspend */ if (soc_is_am43xx()) { nb.notifier_call = cpu_notifier; cpu_pm_register_notifier(&nb); } return 0; of_node_put: of_node_put(np); return ret; }
linux-master
arch/arm/mach-omap2/control.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2/3 PRM module functions * * Copyright (C) 2010-2011 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "powerdomain.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-24xx.h" #include "clockdomain.h" /** * omap2_prm_is_hardreset_asserted - read the HW reset line state of * submodules contained in the hwmod module * @shift: register bit shift corresponding to the reset line to check * @part: PRM partition, ignored for OMAP2 * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @offset: register offset, ignored for OMAP2 * * Returns 1 if the (sub)module hardreset line is currently asserted, * 0 if the (sub)module hardreset line is not currently asserted, or * -EINVAL if called while running on a non-OMAP2/3 chip. */ int omap2_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset) { return omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, (1 << shift)); } /** * omap2_prm_assert_hardreset - assert the HW reset line of a submodule * @shift: register bit shift corresponding to the reset line to assert * @part: PRM partition, ignored for OMAP2 * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @offset: register offset, ignored for OMAP2 * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ int omap2_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset) { u32 mask; mask = 1 << shift; omap2_prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL); return 0; } /** * omap2_prm_deassert_hardreset - deassert a submodule hardreset line and wait * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @rst_shift: register bit shift corresponding to the reset line to deassert * @st_shift: register bit shift for the status of the deasserted submodule * @part: PRM partition, not used for OMAP2 * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @rst_offset: reset register offset, not used for OMAP2 * @st_offset: reset status register offset, not used for OMAP2 * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap2_prm_deassert_hardreset(u8 rst_shift, u8 st_shift, u8 part, s16 prm_mod, u16 rst_offset, u16 st_offset) { u32 rst, st; int c; rst = 1 << rst_shift; st = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, rst) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap2_prm_rmw_mod_reg_bits(0xffffffff, st, prm_mod, OMAP2_RM_RSTST); /* de-assert the reset control line */ omap2_prm_rmw_mod_reg_bits(rst, 0, prm_mod, OMAP2_RM_RSTCTRL); /* wait the status to be set */ omap_test_timeout(omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST, st), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; } /* Powerdomain low-level functions */ /* Common functions across OMAP2 and OMAP3 */ int omap2_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_onstate_mask(bank); omap2_prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } int omap2_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); omap2_prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } int omap2_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m; m = omap2_pwrdm_get_mem_bank_stst_mask(bank); return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTST, m); } int omap2_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL, m); } int omap2_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 v; v = pwrst << __ffs(OMAP_LOGICRETSTATE_MASK); omap2_prm_rmw_mod_reg_bits(OMAP_LOGICRETSTATE_MASK, v, pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); return 0; } int omap2_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((omap2_prm_read_mod_reg(pwrdm->prcm_offs, OMAP2_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s: waited too long to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } int omap2_clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_prm_set_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP); return 0; } int omap2_clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_prm_clear_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP); return 0; } int omap2_clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { return omap2_prm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP, (1 << clkdm2->dep_bit)); } /* XXX Caller must hold the clkdm's powerdomain lock */ int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm) { struct clkdm_dep *cd; u32 mask = 0; for (cd = clkdm->wkdep_srcs; cd && cd->clkdm_name; cd++) { if (!cd->clkdm) continue; /* only happens if data is erroneous */ /* PRM accesses are slow, so minimize them */ mask |= 1 << cd->clkdm->dep_bit; cd->wkdep_usecount = 0; } omap2_prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, PM_WKDEP); return 0; }
linux-master
arch/arm/mach-omap2/prm2xxx_3xxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM43xx Clock domains framework * * Copyright (C) 2013 Texas Instruments, Inc. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "prcm44xx.h" #include "prcm43xx.h" static struct clockdomain l4_cefuse_43xx_clkdm = { .name = "l4_cefuse_clkdm", .pwrdm = { .name = "cefuse_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_CEFUSE_INST, .clkdm_offs = AM43XX_CM_CEFUSE_CEFUSE_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu_43xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_MPU_INST, .clkdm_offs = AM43XX_CM_MPU_MPU_CDOFFS, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4ls_43xx_clkdm = { .name = "l4ls_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_L4LS_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain tamper_43xx_clkdm = { .name = "tamper_clkdm", .pwrdm = { .name = "tamper_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_TAMPER_INST, .clkdm_offs = AM43XX_CM_TAMPER_TAMPER_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_rtc_43xx_clkdm = { .name = "l4_rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_RTC_INST, .clkdm_offs = AM43XX_CM_RTC_RTC_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain pruss_ocp_43xx_clkdm = { .name = "pruss_ocp_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_ICSS_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ocpwp_l3_43xx_clkdm = { .name = "ocpwp_l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_OCPWP_L3_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3s_tsc_43xx_clkdm = { .name = "l3s_tsc_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_WKUP_INST, .clkdm_offs = AM43XX_CM_WKUP_L3S_TSC_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain lcdc_43xx_clkdm = { .name = "lcdc_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_LCDC_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain dss_43xx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_DSS_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_aon_43xx_clkdm = { .name = "l3_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_WKUP_INST, .clkdm_offs = AM43XX_CM_WKUP_L3_AON_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain emif_43xx_clkdm = { .name = "emif_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_EMIF_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_aon_43xx_clkdm = { .name = "l4_wkup_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_WKUP_INST, .clkdm_offs = AM43XX_CM_WKUP_L4_WKUP_AON_CDOFFS, }; static struct clockdomain l3_43xx_clkdm = { .name = "l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_L3_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_43xx_clkdm = { .name = "l4_wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_WKUP_INST, .clkdm_offs = AM43XX_CM_WKUP_WKUP_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain cpsw_125mhz_43xx_clkdm = { .name = "cpsw_125mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_CPSW_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l3_43xx_clkdm = { .name = "gfx_l3_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_GFX_INST, .clkdm_offs = AM43XX_CM_GFX_GFX_L3_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3s_43xx_clkdm = { .name = "l3s_clkdm", .pwrdm = { .name = "per_pwrdm" }, .prcm_partition = AM43XX_CM_PARTITION, .cm_inst = AM43XX_CM_PER_INST, .clkdm_offs = AM43XX_CM_PER_L3S_CDOFFS, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain *clockdomains_am43xx[] __initdata = { &l4_cefuse_43xx_clkdm, &mpu_43xx_clkdm, &l4ls_43xx_clkdm, &tamper_43xx_clkdm, &l4_rtc_43xx_clkdm, &pruss_ocp_43xx_clkdm, &ocpwp_l3_43xx_clkdm, &l3s_tsc_43xx_clkdm, &lcdc_43xx_clkdm, &dss_43xx_clkdm, &l3_aon_43xx_clkdm, &emif_43xx_clkdm, &l4_wkup_aon_43xx_clkdm, &l3_43xx_clkdm, &l4_wkup_43xx_clkdm, &cpsw_125mhz_43xx_clkdm, &gfx_l3_43xx_clkdm, &l3s_43xx_clkdm, NULL }; void __init am43xx_clockdomains_init(void) { clkdm_register_platform_funcs(&am43xx_clkdm_operations); clkdm_register_clkdms(clockdomains_am43xx); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains43xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * am33xx-restart.c - Code common to all AM33xx machines. */ #include <linux/kernel.h> #include <linux/reboot.h> #include "common.h" #include "prm.h" /** * am3xx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ void am33xx_restart(enum reboot_mode mode, const char *cmd) { /* TODO: Handle mode and cmd if necessary */ omap_prm_reset_system(); }
linux-master
arch/arm/mach-omap2/am33xx-restart.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2+ DMA driver * * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <[email protected]> * DMA channel linking for 1610 by Samuel Ortiz <[email protected]> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <[email protected]> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <[email protected]> * * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ * Converted DMA library into platform driver * - G, Manjunath Kondaiah <[email protected]> */ #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/of.h> #include <linux/omap-dma.h> #include "soc.h" #include "common.h" static const struct omap_dma_reg reg_map[] = { [REVISION] = { 0x0000, 0x00, OMAP_DMA_REG_32BIT }, [GCR] = { 0x0078, 0x00, OMAP_DMA_REG_32BIT }, [IRQSTATUS_L0] = { 0x0008, 0x00, OMAP_DMA_REG_32BIT }, [IRQSTATUS_L1] = { 0x000c, 0x00, OMAP_DMA_REG_32BIT }, [IRQSTATUS_L2] = { 0x0010, 0x00, OMAP_DMA_REG_32BIT }, [IRQSTATUS_L3] = { 0x0014, 0x00, OMAP_DMA_REG_32BIT }, [IRQENABLE_L0] = { 0x0018, 0x00, OMAP_DMA_REG_32BIT }, [IRQENABLE_L1] = { 0x001c, 0x00, OMAP_DMA_REG_32BIT }, [IRQENABLE_L2] = { 0x0020, 0x00, OMAP_DMA_REG_32BIT }, [IRQENABLE_L3] = { 0x0024, 0x00, OMAP_DMA_REG_32BIT }, [SYSSTATUS] = { 0x0028, 0x00, OMAP_DMA_REG_32BIT }, [OCP_SYSCONFIG] = { 0x002c, 0x00, OMAP_DMA_REG_32BIT }, [CAPS_0] = { 0x0064, 0x00, OMAP_DMA_REG_32BIT }, [CAPS_2] = { 0x006c, 0x00, OMAP_DMA_REG_32BIT }, [CAPS_3] = { 0x0070, 0x00, OMAP_DMA_REG_32BIT }, [CAPS_4] = { 0x0074, 0x00, OMAP_DMA_REG_32BIT }, /* Common register offsets */ [CCR] = { 0x0080, 0x60, OMAP_DMA_REG_32BIT }, [CLNK_CTRL] = { 0x0084, 0x60, OMAP_DMA_REG_32BIT }, [CICR] = { 0x0088, 0x60, OMAP_DMA_REG_32BIT }, [CSR] = { 0x008c, 0x60, OMAP_DMA_REG_32BIT }, [CSDP] = { 0x0090, 0x60, OMAP_DMA_REG_32BIT }, [CEN] = { 0x0094, 0x60, OMAP_DMA_REG_32BIT }, [CFN] = { 0x0098, 0x60, OMAP_DMA_REG_32BIT }, [CSEI] = { 0x00a4, 0x60, OMAP_DMA_REG_32BIT }, [CSFI] = { 0x00a8, 0x60, OMAP_DMA_REG_32BIT }, [CDEI] = { 0x00ac, 0x60, OMAP_DMA_REG_32BIT }, [CDFI] = { 0x00b0, 0x60, OMAP_DMA_REG_32BIT }, [CSAC] = { 0x00b4, 0x60, OMAP_DMA_REG_32BIT }, [CDAC] = { 0x00b8, 0x60, OMAP_DMA_REG_32BIT }, /* Channel specific register offsets */ [CSSA] = { 0x009c, 0x60, OMAP_DMA_REG_32BIT }, [CDSA] = { 0x00a0, 0x60, OMAP_DMA_REG_32BIT }, [CCEN] = { 0x00bc, 0x60, OMAP_DMA_REG_32BIT }, [CCFN] = { 0x00c0, 0x60, OMAP_DMA_REG_32BIT }, [COLOR] = { 0x00c4, 0x60, OMAP_DMA_REG_32BIT }, /* OMAP4 specific registers */ [CDP] = { 0x00d0, 0x60, OMAP_DMA_REG_32BIT }, [CNDP] = { 0x00d4, 0x60, OMAP_DMA_REG_32BIT }, [CCDN] = { 0x00d8, 0x60, OMAP_DMA_REG_32BIT }, }; static unsigned configure_dma_errata(void) { unsigned errata = 0; /* * Errata applicable for OMAP2430ES1.0 and all omap2420 * * I. * Erratum ID: Not Available * Inter Frame DMA buffering issue DMA will wrongly * buffer elements if packing and bursting is enabled. This might * result in data gets stalled in FIFO at the end of the block. * Workaround: DMA channels must have BUFFERING_DISABLED bit set to * guarantee no data will stay in the DMA FIFO in case inter frame * buffering occurs * * II. * Erratum ID: Not Available * DMA may hang when several channels are used in parallel * In the following configuration, DMA channel hanging can occur: * a. Channel i, hardware synchronized, is enabled * b. Another channel (Channel x), software synchronized, is enabled. * c. Channel i is disabled before end of transfer * d. Channel i is reenabled. * e. Steps 1 to 4 are repeated a certain number of times. * f. A third channel (Channel y), software synchronized, is enabled. * Channel x and Channel y may hang immediately after step 'f'. * Workaround: * For any channel used - make sure NextLCH_ID is set to the value j. */ if (cpu_is_omap2420() || (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0))) { SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING); SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS); } /* * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled * after a transaction error. * Workaround: SW should explicitely disable the channel. */ if (cpu_class_is_omap2()) SET_DMA_ERRATA(DMA_ERRATA_i378); /* * Erratum ID: i541: sDMA FIFO draining does not finish * If sDMA channel is disabled on the fly, sDMA enters standby even * through FIFO Drain is still in progress * Workaround: Put sDMA in NoStandby more before a logical channel is * disabled, then put it back to SmartStandby right after the channel * finishes FIFO draining. */ if (cpu_is_omap34xx()) SET_DMA_ERRATA(DMA_ERRATA_i541); /* * Erratum ID: i88 : Special programming model needed to disable DMA * before end of block. * Workaround: software must ensure that the DMA is configured in No * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01") */ if (omap_type() == OMAP3430_REV_ES1_0) SET_DMA_ERRATA(DMA_ERRATA_i88); /* * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ SET_DMA_ERRATA(DMA_ERRATA_3_3); /* * Erratum ID: Not Available * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared * after secure sram context save and restore. * Work around: Hence we need to manually clear those IRQs to avoid * spurious interrupts. This affects only secure devices. */ if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) SET_DMA_ERRATA(DMA_ROMCODE_BUG); return errata; } static const struct dma_slave_map omap24xx_sdma_dt_map[] = { /* external DMA requests when tusb6010 is used */ { "musb-hdrc.1.auto", "dmareq0", SDMA_FILTER_PARAM(2) }, { "musb-hdrc.1.auto", "dmareq1", SDMA_FILTER_PARAM(3) }, { "musb-hdrc.1.auto", "dmareq2", SDMA_FILTER_PARAM(14) }, /* OMAP2420 only */ { "musb-hdrc.1.auto", "dmareq3", SDMA_FILTER_PARAM(15) }, /* OMAP2420 only */ { "musb-hdrc.1.auto", "dmareq4", SDMA_FILTER_PARAM(16) }, /* OMAP2420 only */ { "musb-hdrc.1.auto", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */ }; static struct omap_dma_dev_attr dma_attr = { .dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY | IS_CSSA_32 | IS_CDSA_32, .lch_count = 32, }; struct omap_system_dma_plat_info dma_plat_info = { .reg_map = reg_map, .channel_stride = 0x60, .dma_attr = &dma_attr, }; /* One time initializations */ static int __init omap2_system_dma_init(void) { dma_plat_info.errata = configure_dma_errata(); if (soc_is_omap24xx()) { /* DMA slave map for drivers not yet converted to DT */ dma_plat_info.slave_map = omap24xx_sdma_dt_map; dma_plat_info.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map); } if (!soc_is_omap242x()) dma_attr.dev_caps |= IS_RW_PRIORITY; if (soc_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) dma_attr.dev_caps |= HS_CHANNELS_RESERVED; return 0; } omap_arch_initcall(omap2_system_dma_init);
linux-master
arch/arm/mach-omap2/dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * pm.c - Common OMAP2+ power management-related code * * Copyright (C) 2010 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/err.h> #include <linux/pm_opp.h> #include <linux/export.h> #include <linux/suspend.h> #include <linux/clk.h> #include <linux/cpu.h> #include <asm/system_misc.h> #include "omap_device.h" #include "common.h" #include "soc.h" #include "prcm-common.h" #include "voltage.h" #include "powerdomain.h" #include "clockdomain.h" #include "pm.h" u32 enable_off_mode; #ifdef CONFIG_SUSPEND /* * omap_pm_suspend: points to a function that does the SoC-specific * suspend work */ static int (*omap_pm_suspend)(void); #endif #ifdef CONFIG_PM /** * struct omap2_oscillator - Describe the board main oscillator latencies * @startup_time: oscillator startup latency * @shutdown_time: oscillator shutdown latency */ struct omap2_oscillator { u32 startup_time; u32 shutdown_time; }; static struct omap2_oscillator oscillator = { .startup_time = ULONG_MAX, .shutdown_time = ULONG_MAX, }; void omap_pm_get_oscillator(u32 *tstart, u32 *tshut) { if (!tstart || !tshut) return; *tstart = oscillator.startup_time; *tshut = oscillator.shutdown_time; } #endif int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused) { clkdm_allow_idle(clkdm); return 0; } #ifdef CONFIG_SUSPEND static int omap_pm_enter(suspend_state_t suspend_state) { int ret = 0; if (!omap_pm_suspend) return -ENOENT; /* XXX doublecheck */ switch (suspend_state) { case PM_SUSPEND_MEM: ret = omap_pm_suspend(); break; default: ret = -EINVAL; } return ret; } static int omap_pm_begin(suspend_state_t state) { cpu_idle_poll_ctrl(true); if (soc_is_omap34xx()) omap_prcm_irq_prepare(); return 0; } static void omap_pm_end(void) { cpu_idle_poll_ctrl(false); } static void omap_pm_wake(void) { if (soc_is_omap34xx()) omap_prcm_irq_complete(); } static const struct platform_suspend_ops omap_pm_ops = { .begin = omap_pm_begin, .end = omap_pm_end, .enter = omap_pm_enter, .wake = omap_pm_wake, .valid = suspend_valid_only_mem, }; /** * omap_common_suspend_init - Set common suspend routines for OMAP SoCs * @pm_suspend: function pointer to SoC specific suspend function */ void omap_common_suspend_init(void *pm_suspend) { omap_pm_suspend = pm_suspend; suspend_set_ops(&omap_pm_ops); } #endif /* CONFIG_SUSPEND */ int __maybe_unused omap_pm_nop_init(void) { return 0; } int (*omap_pm_soc_init)(void); static int __init omap2_common_pm_late_init(void) { int error; if (!omap_pm_soc_init) return 0; /* Init the voltage layer */ omap3_twl_init(); omap4_twl_init(); omap4_cpcap_init(); omap_voltage_late_init(); /* Smartreflex device init */ omap_devinit_smartreflex(); error = omap_pm_soc_init(); if (error) pr_warn("%s: pm soc init failed: %i\n", __func__, error); omap2_clk_enable_autoidle_all(); return 0; } omap_late_initcall(omap2_common_pm_late_init);
linux-master
arch/arm/mach-omap2/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3/OMAP4 smartreflex device file * * Author: Thara Gopinath <[email protected]> * * Based originally on code from smartreflex.c * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <[email protected]> * * Copyright (C) 2008 Nokia Corporation * Kalle Jokiniemi * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <[email protected]> */ #include <linux/power/smartreflex.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include "soc.h" #include "omap_device.h" #include "voltage.h" #include "control.h" #include "pm.h" /* Read EFUSE values from control registers for OMAP3430 */ static void __init sr_set_nvalues(struct omap_volt_data *volt_data, struct omap_sr_data *sr_data) { struct omap_sr_nvalue_table *nvalue_table; int i, j, count = 0; sr_data->nvalue_count = 0; sr_data->nvalue_table = NULL; while (volt_data[count].volt_nominal) count++; nvalue_table = kcalloc(count, sizeof(*nvalue_table), GFP_KERNEL); if (!nvalue_table) return; for (i = 0, j = 0; i < count; i++) { u32 v; /* * In OMAP4 the efuse registers are 24 bit aligned. * A readl_relaxed will fail for non-32 bit aligned address * and hence the 8-bit read and shift. */ if (cpu_is_omap44xx()) { u16 offset = volt_data[i].sr_efuse_offs; v = omap_ctrl_readb(offset) | omap_ctrl_readb(offset + 1) << 8 | omap_ctrl_readb(offset + 2) << 16; } else { v = omap_ctrl_readl(volt_data[i].sr_efuse_offs); } /* * Many OMAP SoCs don't have the eFuse values set. * For example, pretty much all OMAP3xxx before * ES3.something. * * XXX There needs to be some way for board files or * userspace to add these in. */ if (v == 0) continue; nvalue_table[j].nvalue = v; nvalue_table[j].efuse_offs = volt_data[i].sr_efuse_offs; nvalue_table[j].errminlimit = volt_data[i].sr_errminlimit; nvalue_table[j].volt_nominal = volt_data[i].volt_nominal; j++; } sr_data->nvalue_table = nvalue_table; sr_data->nvalue_count = j; } extern struct omap_sr_data omap_sr_pdata[]; static int __init sr_init_by_name(const char *name, const char *voltdm) { struct omap_sr_data *sr_data = NULL; struct omap_volt_data *volt_data; static int i; if (!strncmp(name, "smartreflex_mpu_iva", 20) || !strncmp(name, "smartreflex_mpu", 16)) sr_data = &omap_sr_pdata[OMAP_SR_MPU]; else if (!strncmp(name, "smartreflex_core", 17)) sr_data = &omap_sr_pdata[OMAP_SR_CORE]; else if (!strncmp(name, "smartreflex_iva", 16)) sr_data = &omap_sr_pdata[OMAP_SR_IVA]; if (!sr_data) { pr_err("%s: Unknown instance %s\n", __func__, name); return -EINVAL; } sr_data->name = name; if (cpu_is_omap343x()) sr_data->ip_type = 1; else sr_data->ip_type = 2; sr_data->senn_mod = 0x1; sr_data->senp_mod = 0x1; if (cpu_is_omap34xx() || cpu_is_omap44xx()) { sr_data->err_weight = OMAP3430_SR_ERRWEIGHT; sr_data->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT; sr_data->accum_data = OMAP3430_SR_ACCUMDATA; if (!(strcmp(sr_data->name, "smartreflex_mpu"))) { sr_data->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT; sr_data->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT; } else { sr_data->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT; sr_data->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT; } } sr_data->voltdm = voltdm_lookup(voltdm); if (!sr_data->voltdm) { pr_err("%s: Unable to get voltage domain pointer for VDD %s\n", __func__, voltdm); goto exit; } omap_voltage_get_volttable(sr_data->voltdm, &volt_data); if (!volt_data) { pr_err("%s: No Voltage table registered for VDD%d\n", __func__, i + 1); goto exit; } sr_set_nvalues(volt_data, sr_data); exit: i++; return 0; } #ifdef CONFIG_OMAP_HWMOD static int __init sr_dev_init(struct omap_hwmod *oh, void *user) { struct omap_smartreflex_dev_attr *sr_dev_attr; sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr; if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) { pr_err("%s: No voltage domain specified for %s. Cannot initialize\n", __func__, oh->name); return 0; } return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name); } #else static int __init sr_dev_init(struct omap_hwmod *oh, void *user) { return -EINVAL; } #endif static const char * const omap4_sr_instances[] = { "mpu", "iva", "core", }; static const char * const dra7_sr_instances[] = { "mpu", "core", }; int __init omap_devinit_smartreflex(void) { const char * const *sr_inst = NULL; int i, nr_sr = 0; if (soc_is_omap44xx()) { sr_inst = omap4_sr_instances; nr_sr = ARRAY_SIZE(omap4_sr_instances); } else if (soc_is_dra7xx()) { sr_inst = dra7_sr_instances; nr_sr = ARRAY_SIZE(dra7_sr_instances); } if (nr_sr) { const char *name, *voltdm; for (i = 0; i < nr_sr; i++) { name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]); voltdm = sr_inst[i]; sr_init_by_name(name, voltdm); } return 0; } return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL); }
linux-master
arch/arm/mach-omap2/sr_device.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_2420_data.c - hardware modules present on the OMAP2420 chips * * Copyright (C) 2009-2011 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * XXX handle crossbar/shared link difference for L3? * XXX these should be marked initdata for multi-OMAP kernels */ #include <linux/platform_data/i2c-omap.h> #include "omap_hwmod.h" #include "l3_2xxx.h" #include "l4_2xxx.h" #include "omap_hwmod_common_data.h" #include "cm-regbits-24xx.h" #include "prm-regbits-24xx.h" #include "i2c.h" #include "mmc.h" #include "wd_timer.h" /* * OMAP2420 hardware module integration data * * All of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ /* * IP blocks */ /* IVA1 (IVA1) */ static struct omap_hwmod_class iva1_hwmod_class = { .name = "iva1", }; static struct omap_hwmod_rst_info omap2420_iva_resets[] = { { .name = "iva", .rst_shift = 8 }, }; static struct omap_hwmod omap2420_iva_hwmod = { .name = "iva", .class = &iva1_hwmod_class, .clkdm_name = "iva1_clkdm", .rst_lines = omap2420_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap2420_iva_resets), .main_clk = "iva1_ifck", }; /* DSP */ static struct omap_hwmod_class dsp_hwmod_class = { .name = "dsp", }; static struct omap_hwmod_rst_info omap2420_dsp_resets[] = { { .name = "logic", .rst_shift = 0 }, { .name = "mmu", .rst_shift = 1 }, }; static struct omap_hwmod omap2420_dsp_hwmod = { .name = "dsp", .class = &dsp_hwmod_class, .clkdm_name = "dsp_clkdm", .rst_lines = omap2420_dsp_resets, .rst_lines_cnt = ARRAY_SIZE(omap2420_dsp_resets), .main_clk = "dsp_fck", }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x00, .sysc_offs = 0x20, .syss_offs = 0x10, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, .reset = &omap_i2c_reset, }; /* I2C1 */ static struct omap_hwmod omap2420_i2c1_hwmod = { .name = "i2c1", .main_clk = "i2c1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2420_ST_I2C1_SHIFT, }, }, .class = &i2c_class, /* * From mach-omap2/pm24xx.c: "Putting MPU into the WFI state * while a transfer is active seems to cause the I2C block to * timeout. Why? Good question." */ .flags = (HWMOD_16BIT_REG | HWMOD_BLOCK_WFI), }; /* I2C2 */ static struct omap_hwmod omap2420_i2c2_hwmod = { .name = "i2c2", .main_clk = "i2c2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2420_ST_I2C2_SHIFT, }, }, .class = &i2c_class, .flags = HWMOD_16BIT_REG, }; /* mailbox */ static struct omap_hwmod omap2420_mailbox_hwmod = { .name = "mailbox", .class = &omap2xxx_mailbox_hwmod_class, .main_clk = "mailboxes_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MAILBOXES_SHIFT, }, }, }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class omap2420_mcbsp_hwmod_class = { .name = "mcbsp", }; static struct omap_hwmod_opt_clk mcbsp_opt_clks[] = { { .role = "pad_fck", .clk = "mcbsp_clks" }, { .role = "prcm_fck", .clk = "func_96m_ck" }, }; /* mcbsp1 */ static struct omap_hwmod omap2420_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap2420_mcbsp_hwmod_class, .main_clk = "mcbsp1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* mcbsp2 */ static struct omap_hwmod omap2420_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap2420_mcbsp_hwmod_class, .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; static struct omap_hwmod_class_sysconfig omap2420_msdi_sysc = { .rev_offs = 0x3c, .sysc_offs = 0x64, .syss_offs = 0x68, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2420_msdi_hwmod_class = { .name = "msdi", .sysc = &omap2420_msdi_sysc, .reset = &omap_msdi_reset, }; /* msdi1 */ static struct omap_hwmod omap2420_msdi1_hwmod = { .name = "msdi1", .class = &omap2420_msdi_hwmod_class, .main_clk = "mmc_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2420_ST_MMC_SHIFT, }, }, .flags = HWMOD_16BIT_REG, }; /* HDQ1W/1-wire */ static struct omap_hwmod omap2420_hdq1w_hwmod = { .name = "hdq1w", .main_clk = "hdq_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_HDQ_SHIFT, }, }, .class = &omap2_hdq1w_class, }; /* * interfaces */ /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_ocp_if omap2420_l4_core__i2c1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_i2c1_hwmod, .clk = "i2c1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C2 interface */ static struct omap_hwmod_ocp_if omap2420_l4_core__i2c2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_i2c2_hwmod, .clk = "i2c2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* IVA <- L3 interface */ static struct omap_hwmod_ocp_if omap2420_l3__iva = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2420_iva_hwmod, .clk = "core_l3_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* DSP <- L3 interface */ static struct omap_hwmod_ocp_if omap2420_l3__dsp = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2420_dsp_hwmod, .clk = "dsp_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_wd_timer2_hwmod, .clk = "mpu_wdt_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio1_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio2 */ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio2_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio3 */ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio3_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio4 */ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio4_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mailbox */ static struct omap_hwmod_ocp_if omap2420_l4_core__mailbox = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_mailbox_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp1 */ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_mcbsp1_hwmod, .clk = "mcbsp1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp2 */ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_mcbsp2_hwmod, .clk = "mcbsp2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> msdi1 */ static struct omap_hwmod_ocp_if omap2420_l4_core__msdi1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_msdi1_hwmod, .clk = "mmc_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> hdq1w interface */ static struct omap_hwmod_ocp_if omap2420_l4_core__hdq1w = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2420_hdq1w_hwmod, .clk = "hdq_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE, }; static struct omap_hwmod_ocp_if omap2420_l3__gpmc = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2xxx_gpmc_hwmod, .clk = "core_l3_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap2420_hwmod_ocp_ifs[] __initdata = { &omap2xxx_l3_main__l4_core, &omap2xxx_mpu__l3_main, &omap2xxx_dss__l3, &omap2xxx_l4_core__mcspi1, &omap2xxx_l4_core__mcspi2, &omap2xxx_l4_core__l4_wkup, &omap2_l4_core__uart1, &omap2_l4_core__uart2, &omap2_l4_core__uart3, &omap2420_l4_core__i2c1, &omap2420_l4_core__i2c2, &omap2420_l3__iva, &omap2420_l3__dsp, &omap2xxx_l4_core__timer3, &omap2xxx_l4_core__timer4, &omap2xxx_l4_core__timer5, &omap2xxx_l4_core__timer6, &omap2xxx_l4_core__timer7, &omap2xxx_l4_core__timer8, &omap2xxx_l4_core__timer9, &omap2xxx_l4_core__timer10, &omap2xxx_l4_core__timer11, &omap2xxx_l4_core__timer12, &omap2420_l4_wkup__wd_timer2, &omap2xxx_l4_core__dss, &omap2xxx_l4_core__dss_dispc, &omap2xxx_l4_core__dss_rfbi, &omap2xxx_l4_core__dss_venc, &omap2420_l4_wkup__gpio1, &omap2420_l4_wkup__gpio2, &omap2420_l4_wkup__gpio3, &omap2420_l4_wkup__gpio4, &omap2420_l4_core__mailbox, &omap2420_l4_core__mcbsp1, &omap2420_l4_core__mcbsp2, &omap2420_l4_core__msdi1, &omap2xxx_l4_core__rng, &omap2xxx_l4_core__sham, &omap2xxx_l4_core__aes, &omap2420_l4_core__hdq1w, &omap2420_l3__gpmc, NULL, }; int __init omap2420_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(omap2420_hwmod_ocp_ifs); }
linux-master
arch/arm/mach-omap2/omap_hwmod_2420_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_2xxx_interconnect_data.c - common interconnect data for OMAP2xxx * * Copyright (C) 2009-2011 Nokia Corporation * Paul Walmsley * * XXX handle crossbar/shared link difference for L3? * XXX these should be marked initdata for multi-OMAP kernels */ #include <linux/sizes.h> #include "omap_hwmod.h" #include "l3_2xxx.h" #include "l4_2xxx.h" #include "omap_hwmod_common_data.h" /* * Common interconnect data */ /* L3 -> L4_CORE interface */ struct omap_hwmod_ocp_if omap2xxx_l3_main__l4_core = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2xxx_l4_core_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* MPU -> L3 interface */ struct omap_hwmod_ocp_if omap2xxx_mpu__l3_main = { .master = &omap2xxx_mpu_hwmod, .slave = &omap2xxx_l3_main_hwmod, .user = OCP_USER_MPU, }; /* DSS -> l3 */ struct omap_hwmod_ocp_if omap2xxx_dss__l3 = { .master = &omap2xxx_dss_core_hwmod, .slave = &omap2xxx_l3_main_hwmod, .fw = { .omap2 = { .l3_perm_bit = OMAP2_L3_CORE_FW_CONNID_DSS, .flags = OMAP_FIREWALL_L3, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4_CORE -> L4_WKUP interface */ struct omap_hwmod_ocp_if omap2xxx_l4_core__l4_wkup = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_l4_wkup_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART1 interface */ struct omap_hwmod_ocp_if omap2_l4_core__uart1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_uart1_hwmod, .clk = "uart1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART2 interface */ struct omap_hwmod_ocp_if omap2_l4_core__uart2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_uart2_hwmod, .clk = "uart2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART3 interface */ struct omap_hwmod_ocp_if omap2_l4_core__uart3 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_uart3_hwmod, .clk = "uart3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi1 interface */ struct omap_hwmod_ocp_if omap2xxx_l4_core__mcspi1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_mcspi1_hwmod, .clk = "mcspi1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi2 interface */ struct omap_hwmod_ocp_if omap2xxx_l4_core__mcspi2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_mcspi2_hwmod, .clk = "mcspi2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer3 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer3 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer3_hwmod, .clk = "gpt3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer4 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer4 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer4_hwmod, .clk = "gpt4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer5 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer5 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer5_hwmod, .clk = "gpt5_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer6 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer6 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer6_hwmod, .clk = "gpt6_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer7 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer7 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer7_hwmod, .clk = "gpt7_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer8 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer8 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer8_hwmod, .clk = "gpt8_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer9 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer9 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer9_hwmod, .clk = "gpt9_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer10 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer10 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer10_hwmod, .clk = "gpt10_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer11 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer11 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer11_hwmod, .clk = "gpt11_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> timer12 */ struct omap_hwmod_ocp_if omap2xxx_l4_core__timer12 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_timer12_hwmod, .clk = "gpt12_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss */ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_dss_core_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_dispc */ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_dispc = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_dss_dispc_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_DISPC_REGION, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_rfbi */ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_rfbi = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_dss_rfbi_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION, .flags = OMAP_FIREWALL_L4, }, }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> dss_venc */ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_venc = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_dss_venc_hwmod, .clk = "dss_ick", .fw = { .omap2 = { .l4_fw_region = OMAP2420_L4_CORE_FW_DSS_VENC_REGION, .flags = OMAP_FIREWALL_L4, }, }, .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> rng */ struct omap_hwmod_ocp_if omap2xxx_l4_core__rng = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_rng_hwmod, .clk = "rng_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> sham interface */ struct omap_hwmod_ocp_if omap2xxx_l4_core__sham = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_sham_hwmod, .clk = "sha_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> aes interface */ struct omap_hwmod_ocp_if omap2xxx_l4_core__aes = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2xxx_aes_hwmod, .clk = "aes_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, };
linux-master
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 voltage domain data * * Copyright (C) 2011 Texas Instruments, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include "voltage.h" static struct voltagedomain omap2_voltdm_core = { .name = "core", }; static struct voltagedomain omap2_voltdm_wkup = { .name = "wakeup", }; static struct voltagedomain *voltagedomains_omap2[] __initdata = { &omap2_voltdm_core, &omap2_voltdm_wkup, NULL, }; void __init omap2xxx_voltagedomains_init(void) { voltdm_init(voltagedomains_omap2); }
linux-master
arch/arm/mach-omap2/voltagedomains2xxx_data.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/arch/arm/mach-omap2/devices.c * * OMAP2 platform device setup/initialization */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <linux/omap-dma.h> #include "iomap.h" #include "omap_hwmod.h" #include "omap_device.h" #include "soc.h" #include "common.h" #include "control.h" #include "display.h" #define L3_MODULES_MAX_LEN 12 #define L3_MODULES 3 /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_VIDEO_OMAP2_VOUT) #if IS_ENABLED(CONFIG_FB_OMAP2) static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = { }; #else static struct resource omap_vout_resource[2] = { }; #endif static u64 omap_vout_dma_mask = DMA_BIT_MASK(32); static struct platform_device omap_vout_device = { .name = "omap_vout", .num_resources = ARRAY_SIZE(omap_vout_resource), .resource = &omap_vout_resource[0], .id = -1, .dev = { .dma_mask = &omap_vout_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; int __init omap_init_vout(void) { return platform_device_register(&omap_vout_device); } #else int __init omap_init_vout(void) { return 0; } #endif
linux-master
arch/arm/mach-omap2/devices.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod common data structures * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <[email protected]> * Benoît Cousson * * Copyright (C) 2010 Nokia Corporation * Paul Walmsley * * This data/structures are to be used while defining OMAP on-chip module * data and their integration with other OMAP modules and Linux. */ #include <linux/types.h> #include <linux/platform_data/ti-sysc.h> #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" /** * struct omap_hwmod_sysc_type1 - TYPE1 sysconfig scheme. * * To be used by hwmod structure to specify the sysconfig offsets * if the device ip is compliant with the original PRCM protocol * defined for OMAP2420. */ struct sysc_regbits omap_hwmod_sysc_type1 = { .midle_shift = SYSC_TYPE1_MIDLEMODE_SHIFT, .clkact_shift = SYSC_TYPE1_CLOCKACTIVITY_SHIFT, .sidle_shift = SYSC_TYPE1_SIDLEMODE_SHIFT, .enwkup_shift = SYSC_TYPE1_ENAWAKEUP_SHIFT, .srst_shift = SYSC_TYPE1_SOFTRESET_SHIFT, .autoidle_shift = SYSC_TYPE1_AUTOIDLE_SHIFT, }; /** * struct omap_hwmod_sysc_type2 - TYPE2 sysconfig scheme. * * To be used by hwmod structure to specify the sysconfig offsets if the * device ip is compliant with the new PRCM protocol defined for new * OMAP4 IPs. */ struct sysc_regbits omap_hwmod_sysc_type2 = { .midle_shift = SYSC_TYPE2_MIDLEMODE_SHIFT, .sidle_shift = SYSC_TYPE2_SIDLEMODE_SHIFT, .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, .dmadisable_shift = SYSC_TYPE2_DMADISABLE_SHIFT, }; /** * struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme. * Used by some IPs on AM33xx */ struct sysc_regbits omap_hwmod_sysc_type3 = { .midle_shift = SYSC_TYPE3_MIDLEMODE_SHIFT, .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT, }; struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = { .manager_count = 2, .has_framedonetv_irq = 0 }; struct sysc_regbits omap34xx_sr_sysc_fields = { .clkact_shift = 20, }; struct sysc_regbits omap36xx_sr_sysc_fields = { .sidle_shift = 24, .enwkup_shift = 26, }; struct sysc_regbits omap3_sham_sysc_fields = { .sidle_shift = 4, .srst_shift = 1, .autoidle_shift = 0, }; struct sysc_regbits omap3xxx_aes_sysc_fields = { .sidle_shift = 6, .srst_shift = 1, .autoidle_shift = 0, }; struct sysc_regbits omap_hwmod_sysc_type_mcasp = { .sidle_shift = 0, }; struct sysc_regbits omap_hwmod_sysc_type_usb_host_fs = { .midle_shift = 4, .sidle_shift = 2, .srst_shift = 1, };
linux-master
arch/arm/mach-omap2/omap_hwmod_common_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_2430_data.c - hardware modules present on the OMAP2430 chips * * Copyright (C) 2009-2011 Nokia Corporation * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * XXX handle crossbar/shared link difference for L3? * XXX these should be marked initdata for multi-OMAP kernels */ #include <linux/platform_data/i2c-omap.h> #include <linux/platform_data/hsmmc-omap.h> #include "omap_hwmod.h" #include "l3_2xxx.h" #include "soc.h" #include "omap_hwmod_common_data.h" #include "prm-regbits-24xx.h" #include "cm-regbits-24xx.h" #include "i2c.h" #include "wd_timer.h" /* * OMAP2430 hardware module integration data * * All of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ /* * IP blocks */ /* IVA2 (IVA2) */ static struct omap_hwmod_rst_info omap2430_iva_resets[] = { { .name = "logic", .rst_shift = 0 }, { .name = "mmu", .rst_shift = 1 }, }; static struct omap_hwmod omap2430_iva_hwmod = { .name = "iva", .class = &iva_hwmod_class, .clkdm_name = "dsp_clkdm", .rst_lines = omap2430_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap2430_iva_resets), .main_clk = "dsp_fck", }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x00, .sysc_offs = 0x20, .syss_offs = 0x10, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, .reset = &omap_i2c_reset, }; /* I2C1 */ static struct omap_hwmod omap2430_i2c1_hwmod = { .name = "i2c1", .flags = HWMOD_16BIT_REG, .main_clk = "i2chs1_fck", .prcm = { .omap2 = { /* * NOTE: The CM_FCLKEN* and CM_ICLKEN* for * I2CHS IP's do not follow the usual pattern. * prcm_reg_id alone cannot be used to program * the iclk and fclk. Needs to be handled using * additional flags when clk handling is moved * to hwmod framework. */ .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2430_ST_I2CHS1_SHIFT, }, }, .class = &i2c_class, }; /* I2C2 */ static struct omap_hwmod omap2430_i2c2_hwmod = { .name = "i2c2", .flags = HWMOD_16BIT_REG, .main_clk = "i2chs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2430_ST_I2CHS2_SHIFT, }, }, .class = &i2c_class, }; /* gpio5 */ static struct omap_hwmod omap2430_gpio5_hwmod = { .name = "gpio5", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpio5_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_GPIO5_SHIFT, }, }, .class = &omap2xxx_gpio_hwmod_class, }; /* mailbox */ static struct omap_hwmod omap2430_mailbox_hwmod = { .name = "mailbox", .class = &omap2xxx_mailbox_hwmod_class, .main_clk = "mailboxes_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MAILBOXES_SHIFT, }, }, }; /* mcspi3 */ static struct omap_hwmod omap2430_mcspi3_hwmod = { .name = "mcspi3", .main_clk = "mcspi3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MCSPI3_SHIFT, }, }, .class = &omap2xxx_mcspi_class, }; /* usbhsotg */ static struct omap_hwmod_class_sysconfig omap2430_usbhsotg_sysc = { .rev_offs = 0x0400, .sysc_offs = 0x0404, .syss_offs = 0x0408, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE| SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class usbotg_class = { .name = "usbotg", .sysc = &omap2430_usbhsotg_sysc, }; /* usb_otg_hs */ static struct omap_hwmod omap2430_usbhsotg_hwmod = { .name = "usb_otg_hs", .main_clk = "usbhs_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP2430_ST_USBHS_SHIFT, }, }, .class = &usbotg_class, /* * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap2430_mcbsp_sysc = { .rev_offs = 0x007C, .sysc_offs = 0x008C, .sysc_flags = (SYSC_HAS_SOFTRESET), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2430_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap2430_mcbsp_sysc, }; static struct omap_hwmod_opt_clk mcbsp_opt_clks[] = { { .role = "pad_fck", .clk = "mcbsp_clks" }, { .role = "prcm_fck", .clk = "func_96m_ck" }, }; /* mcbsp1 */ static struct omap_hwmod omap2430_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap2430_mcbsp_hwmod_class, .main_clk = "mcbsp1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* mcbsp2 */ static struct omap_hwmod omap2430_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap2430_mcbsp_hwmod_class, .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* mcbsp3 */ static struct omap_hwmod omap2430_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap2430_mcbsp_hwmod_class, .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MCBSP3_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* mcbsp4 */ static struct omap_hwmod omap2430_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap2430_mcbsp_hwmod_class, .main_clk = "mcbsp4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MCBSP4_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* mcbsp5 */ static struct omap_hwmod omap2430_mcbsp5_hwmod = { .name = "mcbsp5", .class = &omap2430_mcbsp_hwmod_class, .main_clk = "mcbsp5_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MCBSP5_SHIFT, }, }, .opt_clks = mcbsp_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks), }; /* MMC/SD/SDIO common */ static struct omap_hwmod_class_sysconfig omap2430_mmc_sysc = { .rev_offs = 0x1fc, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2430_mmc_class = { .name = "mmc", .sysc = &omap2430_mmc_sysc, }; /* MMC/SD/SDIO1 */ static struct omap_hwmod_opt_clk omap2430_mmc1_opt_clks[] = { { .role = "dbck", .clk = "mmchsdb1_fck" }, }; static struct omap_hsmmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; static struct omap_hwmod omap2430_mmc1_hwmod = { .name = "mmc1", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .opt_clks = omap2430_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap2430_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MMCHS1_SHIFT, }, }, .dev_attr = &mmc1_dev_attr, .class = &omap2430_mmc_class, }; /* MMC/SD/SDIO2 */ static struct omap_hwmod_opt_clk omap2430_mmc2_opt_clks[] = { { .role = "dbck", .clk = "mmchsdb2_fck" }, }; static struct omap_hwmod omap2430_mmc2_hwmod = { .name = "mmc2", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .opt_clks = omap2430_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap2430_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP2430_ST_MMCHS2_SHIFT, }, }, .class = &omap2430_mmc_class, }; /* HDQ1W/1-wire */ static struct omap_hwmod omap2430_hdq1w_hwmod = { .name = "hdq1w", .main_clk = "hdq_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_HDQ_SHIFT, }, }, .class = &omap2_hdq1w_class, }; /* * interfaces */ /* L3 -> L4_CORE interface */ /* l3_core -> usbhsotg interface */ static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = { .master = &omap2430_usbhsotg_hwmod, .slave = &omap2xxx_l3_main_hwmod, .clk = "core_l3_ck", .user = OCP_USER_MPU, }; /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__i2c1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_i2c1_hwmod, .clk = "i2c1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C2 interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__i2c2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_i2c2_hwmod, .clk = "i2c2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core ->usbhsotg interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_usbhsotg_hwmod, .clk = "usb_l4_ick", .user = OCP_USER_MPU, }; /* L4 CORE -> MMC1 interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__mmc1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mmc1_hwmod, .clk = "mmchs1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> MMC2 interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__mmc2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mmc2_hwmod, .clk = "mmchs2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi3 interface */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcspi3_hwmod, .clk = "mcspi3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* IVA2 <- L3 interface */ static struct omap_hwmod_ocp_if omap2430_l3__iva = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2430_iva_hwmod, .clk = "core_l3_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_wd_timer2_hwmod, .clk = "mpu_wdt_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio1_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio2 */ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio2_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio3 */ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio3_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup -> gpio4 */ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = { .master = &omap2xxx_l4_wkup_hwmod, .slave = &omap2xxx_gpio4_hwmod, .clk = "gpios_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> gpio5 */ static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_gpio5_hwmod, .clk = "gpio5_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mailbox */ static struct omap_hwmod_ocp_if omap2430_l4_core__mailbox = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mailbox_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp1 */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp1 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcbsp1_hwmod, .clk = "mcbsp1_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp2 */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp2 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcbsp2_hwmod, .clk = "mcbsp2_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp3 */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp3 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcbsp3_hwmod, .clk = "mcbsp3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp4 */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp4 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcbsp4_hwmod, .clk = "mcbsp4_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> mcbsp5 */ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp5 = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_mcbsp5_hwmod, .clk = "mcbsp5_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_core -> hdq1w */ static struct omap_hwmod_ocp_if omap2430_l4_core__hdq1w = { .master = &omap2xxx_l4_core_hwmod, .slave = &omap2430_hdq1w_hwmod, .clk = "hdq_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE, }; static struct omap_hwmod_ocp_if omap2430_l3__gpmc = { .master = &omap2xxx_l3_main_hwmod, .slave = &omap2xxx_gpmc_hwmod, .clk = "core_l3_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap2430_hwmod_ocp_ifs[] __initdata = { &omap2xxx_l3_main__l4_core, &omap2xxx_mpu__l3_main, &omap2xxx_dss__l3, &omap2430_usbhsotg__l3, &omap2430_l4_core__i2c1, &omap2430_l4_core__i2c2, &omap2xxx_l4_core__l4_wkup, &omap2_l4_core__uart1, &omap2_l4_core__uart2, &omap2_l4_core__uart3, &omap2430_l4_core__usbhsotg, &omap2430_l4_core__mmc1, &omap2430_l4_core__mmc2, &omap2xxx_l4_core__mcspi1, &omap2xxx_l4_core__mcspi2, &omap2430_l4_core__mcspi3, &omap2430_l3__iva, &omap2xxx_l4_core__timer3, &omap2xxx_l4_core__timer4, &omap2xxx_l4_core__timer5, &omap2xxx_l4_core__timer6, &omap2xxx_l4_core__timer7, &omap2xxx_l4_core__timer8, &omap2xxx_l4_core__timer9, &omap2xxx_l4_core__timer10, &omap2xxx_l4_core__timer11, &omap2xxx_l4_core__timer12, &omap2430_l4_wkup__wd_timer2, &omap2xxx_l4_core__dss, &omap2xxx_l4_core__dss_dispc, &omap2xxx_l4_core__dss_rfbi, &omap2xxx_l4_core__dss_venc, &omap2430_l4_wkup__gpio1, &omap2430_l4_wkup__gpio2, &omap2430_l4_wkup__gpio3, &omap2430_l4_wkup__gpio4, &omap2430_l4_core__gpio5, &omap2430_l4_core__mailbox, &omap2430_l4_core__mcbsp1, &omap2430_l4_core__mcbsp2, &omap2430_l4_core__mcbsp3, &omap2430_l4_core__mcbsp4, &omap2430_l4_core__mcbsp5, &omap2430_l4_core__hdq1w, &omap2xxx_l4_core__rng, &omap2xxx_l4_core__sham, &omap2xxx_l4_core__aes, &omap2430_l3__gpmc, NULL, }; int __init omap2430_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(omap2430_hwmod_ocp_ifs); }
linux-master
arch/arm/mach-omap2/omap_hwmod_2430_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP and TWL PMIC specific initializations. * * Copyright (C) 2010 Texas Instruments Incorporated. * Thara Gopinath * Copyright (C) 2009 Texas Instruments Incorporated. * Nishanth Menon * Copyright (C) 2009 Nokia Corporation * Paul Walmsley */ #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/twl.h> #include "soc.h" #include "voltage.h" #include "pm.h" #define OMAP3_SRI2C_SLAVE_ADDR 0x12 #define OMAP3_VDD_MPU_SR_CONTROL_REG 0x00 #define OMAP3_VDD_CORE_SR_CONTROL_REG 0x01 #define OMAP3_VP_CONFIG_ERROROFFSET 0x00 #define OMAP3_VP_VSTEPMIN_VSTEPMIN 0x1 #define OMAP3_VP_VSTEPMAX_VSTEPMAX 0x04 #define OMAP3_VP_VLIMITTO_TIMEOUT_US 200 #define OMAP4_SRI2C_SLAVE_ADDR 0x12 #define OMAP4_VDD_MPU_SR_VOLT_REG 0x55 #define OMAP4_VDD_MPU_SR_CMD_REG 0x56 #define OMAP4_VDD_IVA_SR_VOLT_REG 0x5B #define OMAP4_VDD_IVA_SR_CMD_REG 0x5C #define OMAP4_VDD_CORE_SR_VOLT_REG 0x61 #define OMAP4_VDD_CORE_SR_CMD_REG 0x62 static bool is_offset_valid; static u8 smps_offset; #define REG_SMPS_OFFSET 0xE0 static unsigned long twl4030_vsel_to_uv(const u8 vsel) { return (((vsel * 125) + 6000)) * 100; } static u8 twl4030_uv_to_vsel(unsigned long uv) { return DIV_ROUND_UP(uv - 600000, 12500); } static unsigned long twl6030_vsel_to_uv(const u8 vsel) { /* * In TWL6030 depending on the value of SMPS_OFFSET * efuse register the voltage range supported in * standard mode can be either between 0.6V - 1.3V or * 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse * is programmed to all 0's where as starting from * TWL6030 ES1.1 the efuse is programmed to 1 */ if (!is_offset_valid) { twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset, REG_SMPS_OFFSET); is_offset_valid = true; } if (!vsel) return 0; /* * There is no specific formula for voltage to vsel * conversion above 1.3V. There are special hardcoded * values for voltages above 1.3V. Currently we are * hardcoding only for 1.35 V which is used for 1GH OPP for * OMAP4430. */ if (vsel == 0x3A) return 1350000; if (smps_offset & 0x8) return ((((vsel - 1) * 1266) + 70900)) * 10; else return ((((vsel - 1) * 1266) + 60770)) * 10; } static u8 twl6030_uv_to_vsel(unsigned long uv) { /* * In TWL6030 depending on the value of SMPS_OFFSET * efuse register the voltage range supported in * standard mode can be either between 0.6V - 1.3V or * 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse * is programmed to all 0's where as starting from * TWL6030 ES1.1 the efuse is programmed to 1 */ if (!is_offset_valid) { twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset, REG_SMPS_OFFSET); is_offset_valid = true; } if (!uv) return 0x00; /* * There is no specific formula for voltage to vsel * conversion above 1.3V. There are special hardcoded * values for voltages above 1.3V. Currently we are * hardcoding only for 1.35 V which is used for 1GH OPP for * OMAP4430. */ if (uv > twl6030_vsel_to_uv(0x39)) { if (uv == 1350000) return 0x3A; pr_err("%s:OUT OF RANGE! non mapped vsel for %ld Vs max %ld\n", __func__, uv, twl6030_vsel_to_uv(0x39)); return 0x3A; } if (smps_offset & 0x8) return DIV_ROUND_UP(uv - 709000, 12660) + 1; else return DIV_ROUND_UP(uv - 607700, 12660) + 1; } static struct omap_voltdm_pmic omap3_mpu_pmic = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX, .vddmin = 600000, .vddmax = 1450000, .vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR, .volt_reg_addr = OMAP3_VDD_MPU_SR_CONTROL_REG, .i2c_high_speed = true, .vsel_to_uv = twl4030_vsel_to_uv, .uv_to_vsel = twl4030_uv_to_vsel, }; static struct omap_voltdm_pmic omap3_core_pmic = { .slew_rate = 4000, .step_size = 12500, .vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX, .vddmin = 600000, .vddmax = 1450000, .vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR, .volt_reg_addr = OMAP3_VDD_CORE_SR_CONTROL_REG, .i2c_high_speed = true, .vsel_to_uv = twl4030_vsel_to_uv, .uv_to_vsel = twl4030_uv_to_vsel, }; static struct omap_voltdm_pmic omap4_mpu_pmic = { .slew_rate = 4000, .step_size = 12660, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 0, .vddmax = 2100000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR, .volt_reg_addr = OMAP4_VDD_MPU_SR_VOLT_REG, .cmd_reg_addr = OMAP4_VDD_MPU_SR_CMD_REG, .i2c_high_speed = true, .i2c_pad_load = 3, .vsel_to_uv = twl6030_vsel_to_uv, .uv_to_vsel = twl6030_uv_to_vsel, }; static struct omap_voltdm_pmic omap4_iva_pmic = { .slew_rate = 4000, .step_size = 12660, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 0, .vddmax = 2100000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR, .volt_reg_addr = OMAP4_VDD_IVA_SR_VOLT_REG, .cmd_reg_addr = OMAP4_VDD_IVA_SR_CMD_REG, .i2c_high_speed = true, .i2c_pad_load = 3, .vsel_to_uv = twl6030_vsel_to_uv, .uv_to_vsel = twl6030_uv_to_vsel, }; static struct omap_voltdm_pmic omap4_core_pmic = { .slew_rate = 4000, .step_size = 12660, .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET, .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN, .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX, .vddmin = 0, .vddmax = 2100000, .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US, .i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR, .volt_reg_addr = OMAP4_VDD_CORE_SR_VOLT_REG, .cmd_reg_addr = OMAP4_VDD_CORE_SR_CMD_REG, .i2c_high_speed = true, .i2c_pad_load = 3, .vsel_to_uv = twl6030_vsel_to_uv, .uv_to_vsel = twl6030_uv_to_vsel, }; int __init omap4_twl_init(void) { struct voltagedomain *voltdm; if (!cpu_is_omap44xx() || of_find_compatible_node(NULL, NULL, "motorola,cpcap")) return -ENODEV; voltdm = voltdm_lookup("mpu"); omap_voltage_register_pmic(voltdm, &omap4_mpu_pmic); voltdm = voltdm_lookup("iva"); omap_voltage_register_pmic(voltdm, &omap4_iva_pmic); voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap4_core_pmic); return 0; } int __init omap3_twl_init(void) { struct voltagedomain *voltdm; if (!cpu_is_omap34xx()) return -ENODEV; voltdm = voltdm_lookup("mpu_iva"); omap_voltage_register_pmic(voltdm, &omap3_mpu_pmic); voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap3_core_pmic); return 0; }
linux-master
arch/arm/mach-omap2/omap_twl.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP54XX Clock domains framework * * Copyright (C) 2013 Texas Instruments, Inc. * * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm1_54xx.h" #include "cm2_54xx.h" #include "cm-regbits-54xx.h" #include "prm54xx.h" #include "prcm44xx.h" #include "prcm_mpu54xx.h" /* Static Dependencies for OMAP4 Clock Domains */ static struct clkdm_dep c2c_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep cam_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep dma_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dss_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { NULL }, }; static struct clkdm_dep gpu_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep ipu_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep iva_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep l3init_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep l4sec_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mipiext_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clockdomain l4sec_54xx_clkdm = { .name = "l4sec_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4SEC_CDOFFS, .dep_bit = OMAP54XX_L4SEC_STATDEP_SHIFT, .wkdep_srcs = l4sec_wkup_sleep_deps, .sleepdep_srcs = l4sec_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain iva_54xx_clkdm = { .name = "iva_clkdm", .pwrdm = { .name = "iva_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_IVA_INST, .clkdm_offs = OMAP54XX_CM_CORE_IVA_IVA_CDOFFS, .dep_bit = OMAP54XX_IVA_STATDEP_SHIFT, .wkdep_srcs = iva_wkup_sleep_deps, .sleepdep_srcs = iva_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mipiext_54xx_clkdm = { .name = "mipiext_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_MIPIEXT_CDOFFS, .wkdep_srcs = mipiext_wkup_sleep_deps, .sleepdep_srcs = mipiext_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3main2_54xx_clkdm = { .name = "l3main2_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3MAIN2_CDOFFS, .dep_bit = OMAP54XX_L3MAIN2_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain l3main1_54xx_clkdm = { .name = "l3main1_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3MAIN1_CDOFFS, .dep_bit = OMAP54XX_L3MAIN1_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain custefuse_54xx_clkdm = { .name = "custefuse_clkdm", .pwrdm = { .name = "custefuse_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CUSTEFUSE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu_54xx_clkdm = { .name = "ipu_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_IPU_CDOFFS, .dep_bit = OMAP54XX_IPU_STATDEP_SHIFT, .wkdep_srcs = ipu_wkup_sleep_deps, .sleepdep_srcs = ipu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4cfg_54xx_clkdm = { .name = "l4cfg_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4CFG_CDOFFS, .dep_bit = OMAP54XX_L4CFG_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain abe_54xx_clkdm = { .name = "abe_clkdm", .pwrdm = { .name = "abe_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_ABE_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_ABE_ABE_CDOFFS, .dep_bit = OMAP54XX_ABE_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dss_54xx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_DSS_INST, .clkdm_offs = OMAP54XX_CM_CORE_DSS_DSS_CDOFFS, .dep_bit = OMAP54XX_DSS_STATDEP_SHIFT, .wkdep_srcs = dss_wkup_sleep_deps, .sleepdep_srcs = dss_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dsp_54xx_clkdm = { .name = "dsp_clkdm", .pwrdm = { .name = "dsp_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_DSP_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_DSP_DSP_CDOFFS, .dep_bit = OMAP54XX_DSP_STATDEP_SHIFT, .wkdep_srcs = dsp_wkup_sleep_deps, .sleepdep_srcs = dsp_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain c2c_54xx_clkdm = { .name = "c2c_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_C2C_CDOFFS, .wkdep_srcs = c2c_wkup_sleep_deps, .sleepdep_srcs = c2c_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l4per_54xx_clkdm = { .name = "l4per_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4PER_CDOFFS, .dep_bit = OMAP54XX_L4PER_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain gpu_54xx_clkdm = { .name = "gpu_clkdm", .pwrdm = { .name = "gpu_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_GPU_INST, .clkdm_offs = OMAP54XX_CM_CORE_GPU_GPU_CDOFFS, .dep_bit = OMAP54XX_GPU_STATDEP_SHIFT, .wkdep_srcs = gpu_wkup_sleep_deps, .sleepdep_srcs = gpu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain wkupaon_54xx_clkdm = { .name = "wkupaon_clkdm", .pwrdm = { .name = "wkupaon_pwrdm" }, .prcm_partition = OMAP54XX_PRM_PARTITION, .cm_inst = OMAP54XX_PRM_WKUPAON_CM_INST, .clkdm_offs = OMAP54XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS, .dep_bit = OMAP54XX_WKUPAON_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu0_54xx_clkdm = { .name = "mpu0_clkdm", .pwrdm = { .name = "cpu0_pwrdm" }, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .cm_inst = OMAP54XX_PRCM_MPU_CM_C0_INST, .clkdm_offs = OMAP54XX_PRCM_MPU_CM_C0_CPU0_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu1_54xx_clkdm = { .name = "mpu1_clkdm", .pwrdm = { .name = "cpu1_pwrdm" }, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .cm_inst = OMAP54XX_PRCM_MPU_CM_C1_INST, .clkdm_offs = OMAP54XX_PRCM_MPU_CM_C1_CPU1_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain coreaon_54xx_clkdm = { .name = "coreaon_clkdm", .pwrdm = { .name = "coreaon_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_COREAON_INST, .clkdm_offs = OMAP54XX_CM_CORE_COREAON_COREAON_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu_54xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_MPU_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_MPU_MPU_CDOFFS, .wkdep_srcs = mpu_wkup_sleep_deps, .sleepdep_srcs = mpu_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3init_54xx_clkdm = { .name = "l3init_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_L3INIT_INST, .clkdm_offs = OMAP54XX_CM_CORE_L3INIT_L3INIT_CDOFFS, .dep_bit = OMAP54XX_L3INIT_STATDEP_SHIFT, .wkdep_srcs = l3init_wkup_sleep_deps, .sleepdep_srcs = l3init_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dma_54xx_clkdm = { .name = "dma_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_DMA_CDOFFS, .wkdep_srcs = dma_wkup_sleep_deps, .sleepdep_srcs = dma_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3instr_54xx_clkdm = { .name = "l3instr_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3INSTR_CDOFFS, }; static struct clockdomain emif_54xx_clkdm = { .name = "emif_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_EMIF_CDOFFS, .dep_bit = OMAP54XX_EMIF_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain emu_54xx_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .prcm_partition = OMAP54XX_PRM_PARTITION, .cm_inst = OMAP54XX_PRM_EMU_CM_INST, .clkdm_offs = OMAP54XX_PRM_EMU_CM_EMU_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain cam_54xx_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CAM_INST, .clkdm_offs = OMAP54XX_CM_CORE_CAM_CAM_CDOFFS, .wkdep_srcs = cam_wkup_sleep_deps, .sleepdep_srcs = cam_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; /* As clockdomains are added or removed above, this list must also be changed */ static struct clockdomain *clockdomains_omap54xx[] __initdata = { &l4sec_54xx_clkdm, &iva_54xx_clkdm, &mipiext_54xx_clkdm, &l3main2_54xx_clkdm, &l3main1_54xx_clkdm, &custefuse_54xx_clkdm, &ipu_54xx_clkdm, &l4cfg_54xx_clkdm, &abe_54xx_clkdm, &dss_54xx_clkdm, &dsp_54xx_clkdm, &c2c_54xx_clkdm, &l4per_54xx_clkdm, &gpu_54xx_clkdm, &wkupaon_54xx_clkdm, &mpu0_54xx_clkdm, &mpu1_54xx_clkdm, &coreaon_54xx_clkdm, &mpu_54xx_clkdm, &l3init_54xx_clkdm, &dma_54xx_clkdm, &l3instr_54xx_clkdm, &emif_54xx_clkdm, &emu_54xx_clkdm, &cam_54xx_clkdm, NULL }; void __init omap54xx_clockdomains_init(void) { clkdm_register_platform_funcs(&omap4_clkdm_operations); clkdm_register_clkdms(clockdomains_omap54xx); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains54xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP MPUSS low power code * * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar <[email protected]> * * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, * CPU0 and CPU1 LPRM modules. * CPU0, CPU1 and MPUSS each have there own power domain and * hence multiple low power combinations of MPUSS are possible. * * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) * because the mode is not supported by hw constraints of dormant * mode. While waking up from the dormant mode, a reset signal * to the Cortex-A9 processor must be asserted by the external * power controller. * * With architectural inputs and hardware recommendations, only * below modes are supported from power gain vs latency point of view. * * CPU0 CPU1 MPUSS * ---------------------------------------------- * ON ON ON * ON(Inactive) OFF ON(Inactive) * OFF OFF CSWR * OFF OFF OSWR * OFF OFF OFF(Device OFF *TBD) * ---------------------------------------------- * * Note: CPU0 is the master core and it is the last CPU to go down * and first to wake-up when MPUSS low power states are excercised */ #include <linux/cpuidle.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/smp_scu.h> #include <asm/suspend.h> #include <asm/virt.h> #include <asm/hardware/cache-l2x0.h> #include "soc.h" #include "common.h" #include "omap44xx.h" #include "omap4-sar-layout.h" #include "pm.h" #include "prcm_mpu44xx.h" #include "prcm_mpu54xx.h" #include "prminst44xx.h" #include "prcm44xx.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" static void __iomem *sar_base; static u32 old_cpu1_ns_pa_addr; #if defined(CONFIG_PM) && defined(CONFIG_SMP) struct omap4_cpu_pm_info { struct powerdomain *pwrdm; void __iomem *scu_sar_addr; void __iomem *wkup_sar_addr; void __iomem *l2x0_sar_addr; }; /** * struct cpu_pm_ops - CPU pm operations * @finish_suspend: CPU suspend finisher function pointer * @resume: CPU resume function pointer * @scu_prepare: CPU Snoop Control program function pointer * @hotplug_restart: CPU restart function pointer * * Structure holds functions pointer for CPU low power operations like * suspend, resume and scu programming. */ struct cpu_pm_ops { int (*finish_suspend)(unsigned long cpu_state); void (*resume)(void); void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); void (*hotplug_restart)(void); }; static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); static struct powerdomain *mpuss_pd; static u32 cpu_context_offset; static int default_finish_suspend(unsigned long cpu_state) { omap_do_wfi(); return 0; } static void dummy_cpu_resume(void) {} static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) {} static struct cpu_pm_ops omap_pm_ops = { .finish_suspend = default_finish_suspend, .resume = dummy_cpu_resume, .scu_prepare = dummy_scu_prepare, .hotplug_restart = dummy_cpu_resume, }; /* * Program the wakeup routine address for the CPU0 and CPU1 * used for OFF or DORMANT wakeup. */ static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); if (pm_info->wkup_sar_addr) writel_relaxed(addr, pm_info->wkup_sar_addr); } /* * Store the SCU power status value to scratchpad memory */ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); u32 scu_pwr_st; switch (cpu_state) { case PWRDM_POWER_RET: scu_pwr_st = SCU_PM_DORMANT; break; case PWRDM_POWER_OFF: scu_pwr_st = SCU_PM_POWEROFF; break; case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: default: scu_pwr_st = SCU_PM_NORMAL; break; } if (pm_info->scu_sar_addr) writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr); } /* Helper functions for MPUSS OSWR */ static inline void mpuss_clear_prev_logic_pwrst(void) { u32 reg; reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); } static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) { u32 reg; if (cpu_id) { reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, cpu_context_offset); omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, cpu_context_offset); } else { reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, cpu_context_offset); omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, cpu_context_offset); } } /* * Store the CPU cluster state for L2X0 low power operations. */ static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); if (pm_info->l2x0_sar_addr) writel_relaxed(save_state, pm_info->l2x0_sar_addr); } /* * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to * in every restore MPUSS OFF path. */ #ifdef CONFIG_CACHE_L2X0 static void __init save_l2x0_context(void) { void __iomem *l2x0_base = omap4_get_l2cache_base(); if (l2x0_base && sar_base) { writel_relaxed(l2x0_saved_regs.aux_ctrl, sar_base + L2X0_AUXCTRL_OFFSET); writel_relaxed(l2x0_saved_regs.prefetch_ctrl, sar_base + L2X0_PREFETCH_CTRL_OFFSET); } } #else static void __init save_l2x0_context(void) {} #endif /** * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function * The purpose of this function is to manage low power programming * of OMAP4 MPUSS subsystem * @cpu : CPU ID * @power_state: Low power state. * @rcuidle: RCU needs to be idled * * MPUSS states for the context save: * save_state = * 0 - Nothing lost and no need to save: MPUSS INACTIVE * 1 - CPUx L1 and logic lost: MPUSS CSWR * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF */ __cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state, bool rcuidle) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET; if (omap_rev() == OMAP4430_REV_ES1_0) return -ENXIO; switch (power_state) { case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: save_state = 0; break; case PWRDM_POWER_OFF: cpu_logic_state = PWRDM_POWER_OFF; save_state = 1; break; case PWRDM_POWER_RET: if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) save_state = 0; break; default: /* * CPUx CSWR is invalid hardware state. Also CPUx OSWR * doesn't make much scense, since logic is lost and $L1 * needs to be cleaned because of coherency. This makes * CPUx OSWR equivalent to CPUX OFF and hence not supported */ WARN_ON(1); return -ENXIO; } pwrdm_pre_transition(NULL); /* * Check MPUSS next state and save interrupt controller if needed. * In MPUSS OSWR or device OFF, interrupt controller contest is lost. */ mpuss_clear_prev_logic_pwrst(); if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) save_state = 2; cpu_clear_prev_logic_pwrst(cpu); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); if (rcuidle) ct_cpuidle_enter(); set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); omap_pm_ops.scu_prepare(cpu, power_state); l2x0_pwrst_prepare(cpu, save_state); /* * Call low level function with targeted low power state. */ if (save_state) cpu_suspend(save_state, omap_pm_ops.finish_suspend); else omap_pm_ops.finish_suspend(save_state); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) gic_dist_enable(); if (rcuidle) ct_cpuidle_exit(); /* * Restore the CPUx power state to ON otherwise CPUx * power domain can transitions to programmed low power * state while doing WFI outside the low powe code. On * secure devices, CPUx does WFI which can result in * domain transition */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pwrdm_post_transition(NULL); return 0; } /** * omap4_hotplug_cpu: OMAP4 CPU hotplug entry * @cpu : CPU ID * @power_state: CPU low power state. */ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); unsigned int cpu_state = 0; if (omap_rev() == OMAP4430_REV_ES1_0) return -ENXIO; /* Use the achievable power state for the domain */ power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm, false, power_state); if (power_state == PWRDM_POWER_OFF) cpu_state = 1; pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart)); omap_pm_ops.scu_prepare(cpu, power_state); /* * CPU never retuns back if targeted power state is OFF mode. * CPU ONLINE follows normal CPU ONLINE ptah via * omap4_secondary_startup(). */ omap_pm_ops.finish_suspend(cpu_state); pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); return 0; } /* * Enable Mercury Fast HG retention mode by default. */ static void enable_mercury_retention_mode(void) { u32 reg; reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST, OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); /* Enable HG_EN, HG_RAMPUP = fast mode */ reg |= BIT(24) | BIT(25); omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST, OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET); } /* * Initialise OMAP4 MPUSS */ int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); if (sar_base) { pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; if (cpu_is_omap44xx()) pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; else pm_info->wkup_sar_addr = sar_base + OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; } pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pm_info = &per_cpu(omap4_pm_info, 0x1); if (sar_base) { pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; if (cpu_is_omap44xx()) pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; else pm_info->wkup_sar_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; } pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!mpuss_pd) { pr_err("Failed to lookup MPUSS power domain\n"); return -ENODEV; } pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); if (sar_base) { /* Save device type on scratchpad for low level code to use */ writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0, sar_base + OMAP_TYPE_OFFSET); save_l2x0_context(); } if (cpu_is_omap44xx()) { omap_pm_ops.finish_suspend = omap4_finish_suspend; omap_pm_ops.resume = omap4_cpu_resume; omap_pm_ops.scu_prepare = scu_pwrst_prepare; omap_pm_ops.hotplug_restart = omap4_secondary_startup; cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; } else if (soc_is_omap54xx() || soc_is_dra7xx()) { cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; enable_mercury_retention_mode(); } if (cpu_is_omap446x()) omap_pm_ops.hotplug_restart = omap4460_secondary_startup; return 0; } #endif u32 omap4_get_cpu1_ns_pa_addr(void) { return old_cpu1_ns_pa_addr; } /* * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to * current kernel's secondary_startup() early before * clockdomains_init(). Otherwise clockdomain_init() can * wake CPU1 and cause a hang. */ void __init omap4_mpuss_early_init(void) { unsigned long startup_pa; void __iomem *ns_pa_addr; if (!(soc_is_omap44xx() || soc_is_omap54xx())) return; sar_base = omap4_get_sar_ram_base(); /* Save old NS_PA_ADDR for validity checks later on */ if (soc_is_omap44xx()) ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; else ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); if (soc_is_omap443x()) startup_pa = __pa_symbol(omap4_secondary_startup); else if (soc_is_omap446x()) startup_pa = __pa_symbol(omap4460_secondary_startup); else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) startup_pa = __pa_symbol(omap5_secondary_hyp_startup); else startup_pa = __pa_symbol(omap5_secondary_startup); if (soc_is_omap44xx()) writel_relaxed(startup_pa, sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET); else writel_relaxed(startup_pa, sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET); }
linux-master
arch/arm/mach-omap2/omap-mpuss-lowpower.c
// SPDX-License-Identifier: GPL-2.0-only /* * DRA7xx Clock domains framework * * Copyright (C) 2009-2013 Texas Instruments, Inc. * Copyright (C) 2009-2011 Nokia Corporation * * Generated by code originally written by: * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm1_7xx.h" #include "cm2_7xx.h" #include "cm-regbits-7xx.h" #include "prm7xx.h" #include "prcm44xx.h" #include "prcm_mpu7xx.h" /* Static Dependencies for DRA7xx Clock Domains */ static struct clkdm_dep cam_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { NULL }, }; static struct clkdm_dep dma_wkup_sleep_deps[] = { { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp1_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp2_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dss_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve1_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve2_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve3_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve4_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep gmac_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { NULL }, }; static struct clkdm_dep gpu_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep ipu1_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep ipu2_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep iva_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { NULL }, }; static struct clkdm_dep l3init_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep l4per2_wkup_sleep_deps[] = { { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { NULL }, }; static struct clkdm_dep l4sec_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_wkup_sleep_deps[] = { { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep pcie_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { NULL }, }; static struct clkdm_dep vpe_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { NULL }, }; static struct clockdomain l4per3_7xx_clkdm = { .name = "l4per3_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER3_CDOFFS, .dep_bit = DRA7XX_L4PER3_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4per2_7xx_clkdm = { .name = "l4per2_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER2_CDOFFS, .dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT, .wkdep_srcs = l4per2_wkup_sleep_deps, .sleepdep_srcs = l4per2_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu0_7xx_clkdm = { .name = "mpu0_clkdm", .pwrdm = { .name = "cpu0_pwrdm" }, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .cm_inst = DRA7XX_MPU_PRCM_CM_C0_INST, .clkdm_offs = DRA7XX_MPU_PRCM_CM_C0_CPU0_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain iva_7xx_clkdm = { .name = "iva_clkdm", .pwrdm = { .name = "iva_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_IVA_INST, .clkdm_offs = DRA7XX_CM_CORE_IVA_IVA_CDOFFS, .dep_bit = DRA7XX_IVA_STATDEP_SHIFT, .wkdep_srcs = iva_wkup_sleep_deps, .sleepdep_srcs = iva_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain coreaon_7xx_clkdm = { .name = "coreaon_clkdm", .pwrdm = { .name = "coreaon_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_COREAON_INST, .clkdm_offs = DRA7XX_CM_CORE_COREAON_COREAON_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu1_7xx_clkdm = { .name = "ipu1_clkdm", .pwrdm = { .name = "ipu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU1_CDOFFS, .dep_bit = DRA7XX_IPU1_STATDEP_SHIFT, .wkdep_srcs = ipu1_wkup_sleep_deps, .sleepdep_srcs = ipu1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain ipu2_7xx_clkdm = { .name = "ipu2_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_IPU2_CDOFFS, .dep_bit = DRA7XX_IPU2_STATDEP_SHIFT, .wkdep_srcs = ipu2_wkup_sleep_deps, .sleepdep_srcs = ipu2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l3init_7xx_clkdm = { .name = "l3init_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_L3INIT_CDOFFS, .dep_bit = DRA7XX_L3INIT_STATDEP_SHIFT, .wkdep_srcs = l3init_wkup_sleep_deps, .sleepdep_srcs = l3init_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4sec_7xx_clkdm = { .name = "l4sec_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4SEC_CDOFFS, .dep_bit = DRA7XX_L4SEC_STATDEP_SHIFT, .wkdep_srcs = l4sec_wkup_sleep_deps, .sleepdep_srcs = l4sec_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3main1_7xx_clkdm = { .name = "l3main1_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L3MAIN1_CDOFFS, .dep_bit = DRA7XX_L3MAIN1_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain vpe_7xx_clkdm = { .name = "vpe_clkdm", .pwrdm = { .name = "vpe_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_VPE_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_VPE_VPE_CDOFFS, .dep_bit = DRA7XX_VPE_STATDEP_SHIFT, .wkdep_srcs = vpe_wkup_sleep_deps, .sleepdep_srcs = vpe_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mpu_7xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_MPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_MPU_MPU_CDOFFS, .wkdep_srcs = mpu_wkup_sleep_deps, .sleepdep_srcs = mpu_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain custefuse_7xx_clkdm = { .name = "custefuse_clkdm", .pwrdm = { .name = "custefuse_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CUSTEFUSE_INST, .clkdm_offs = DRA7XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu_7xx_clkdm = { .name = "ipu_clkdm", .pwrdm = { .name = "ipu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu1_7xx_clkdm = { .name = "mpu1_clkdm", .pwrdm = { .name = "cpu1_pwrdm" }, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .cm_inst = DRA7XX_MPU_PRCM_CM_C1_INST, .clkdm_offs = DRA7XX_MPU_PRCM_CM_C1_CPU1_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain gmac_7xx_clkdm = { .name = "gmac_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_GMAC_CDOFFS, .dep_bit = DRA7XX_GMAC_STATDEP_SHIFT, .wkdep_srcs = gmac_wkup_sleep_deps, .sleepdep_srcs = gmac_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4cfg_7xx_clkdm = { .name = "l4cfg_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L4CFG_CDOFFS, .dep_bit = DRA7XX_L4CFG_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain dma_7xx_clkdm = { .name = "dma_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_DMA_CDOFFS, .wkdep_srcs = dma_wkup_sleep_deps, .sleepdep_srcs = dma_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain rtc_7xx_clkdm = { .name = "rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_RTC_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_RTC_RTC_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain pcie_7xx_clkdm = { .name = "pcie_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_PCIE_CDOFFS, .dep_bit = DRA7XX_PCIE_STATDEP_SHIFT, .wkdep_srcs = pcie_wkup_sleep_deps, .sleepdep_srcs = pcie_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain atl_7xx_clkdm = { .name = "atl_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_ATL_CDOFFS, .dep_bit = DRA7XX_ATL_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3instr_7xx_clkdm = { .name = "l3instr_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L3INSTR_CDOFFS, }; static struct clockdomain dss_7xx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_DSS_INST, .clkdm_offs = DRA7XX_CM_CORE_DSS_DSS_CDOFFS, .dep_bit = DRA7XX_DSS_STATDEP_SHIFT, .wkdep_srcs = dss_wkup_sleep_deps, .sleepdep_srcs = dss_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain emif_7xx_clkdm = { .name = "emif_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_EMIF_CDOFFS, .dep_bit = DRA7XX_EMIF_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain emu_7xx_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .prcm_partition = DRA7XX_PRM_PARTITION, .cm_inst = DRA7XX_PRM_EMU_CM_INST, .clkdm_offs = DRA7XX_PRM_EMU_CM_EMU_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain dsp2_7xx_clkdm = { .name = "dsp2_clkdm", .pwrdm = { .name = "dsp2_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_DSP2_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_DSP2_DSP2_CDOFFS, .dep_bit = DRA7XX_DSP2_STATDEP_SHIFT, .wkdep_srcs = dsp2_wkup_sleep_deps, .sleepdep_srcs = dsp2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dsp1_7xx_clkdm = { .name = "dsp1_clkdm", .pwrdm = { .name = "dsp1_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_DSP1_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_DSP1_DSP1_CDOFFS, .dep_bit = DRA7XX_DSP1_STATDEP_SHIFT, .wkdep_srcs = dsp1_wkup_sleep_deps, .sleepdep_srcs = dsp1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain cam_7xx_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CAM_INST, .clkdm_offs = DRA7XX_CM_CORE_CAM_CAM_CDOFFS, .dep_bit = DRA7XX_CAM_STATDEP_SHIFT, .wkdep_srcs = cam_wkup_sleep_deps, .sleepdep_srcs = cam_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4per_7xx_clkdm = { .name = "l4per_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER_CDOFFS, .dep_bit = DRA7XX_L4PER_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain gpu_7xx_clkdm = { .name = "gpu_clkdm", .pwrdm = { .name = "gpu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_GPU_INST, .clkdm_offs = DRA7XX_CM_CORE_GPU_GPU_CDOFFS, .dep_bit = DRA7XX_GPU_STATDEP_SHIFT, .wkdep_srcs = gpu_wkup_sleep_deps, .sleepdep_srcs = gpu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve4_7xx_clkdm = { .name = "eve4_clkdm", .pwrdm = { .name = "eve4_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE4_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE4_EVE4_CDOFFS, .dep_bit = DRA7XX_EVE4_STATDEP_SHIFT, .wkdep_srcs = eve4_wkup_sleep_deps, .sleepdep_srcs = eve4_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve2_7xx_clkdm = { .name = "eve2_clkdm", .pwrdm = { .name = "eve2_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE2_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE2_EVE2_CDOFFS, .dep_bit = DRA7XX_EVE2_STATDEP_SHIFT, .wkdep_srcs = eve2_wkup_sleep_deps, .sleepdep_srcs = eve2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve3_7xx_clkdm = { .name = "eve3_clkdm", .pwrdm = { .name = "eve3_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE3_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE3_EVE3_CDOFFS, .dep_bit = DRA7XX_EVE3_STATDEP_SHIFT, .wkdep_srcs = eve3_wkup_sleep_deps, .sleepdep_srcs = eve3_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain wkupaon_7xx_clkdm = { .name = "wkupaon_clkdm", .pwrdm = { .name = "wkupaon_pwrdm" }, .prcm_partition = DRA7XX_PRM_PARTITION, .cm_inst = DRA7XX_PRM_WKUPAON_CM_INST, .clkdm_offs = DRA7XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS, .dep_bit = DRA7XX_WKUPAON_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain eve1_7xx_clkdm = { .name = "eve1_clkdm", .pwrdm = { .name = "eve1_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE1_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE1_EVE1_CDOFFS, .dep_bit = DRA7XX_EVE1_STATDEP_SHIFT, .wkdep_srcs = eve1_wkup_sleep_deps, .sleepdep_srcs = eve1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; /* As clockdomains are added or removed above, this list must also be changed */ static struct clockdomain *clockdomains_dra7xx[] __initdata = { &l4per3_7xx_clkdm, &l4per2_7xx_clkdm, &mpu0_7xx_clkdm, &iva_7xx_clkdm, &coreaon_7xx_clkdm, &ipu1_7xx_clkdm, &ipu2_7xx_clkdm, &l3init_7xx_clkdm, &l4sec_7xx_clkdm, &l3main1_7xx_clkdm, &vpe_7xx_clkdm, &mpu_7xx_clkdm, &custefuse_7xx_clkdm, &ipu_7xx_clkdm, &mpu1_7xx_clkdm, &gmac_7xx_clkdm, &l4cfg_7xx_clkdm, &dma_7xx_clkdm, &rtc_7xx_clkdm, &pcie_7xx_clkdm, &atl_7xx_clkdm, &l3instr_7xx_clkdm, &dss_7xx_clkdm, &emif_7xx_clkdm, &emu_7xx_clkdm, &dsp2_7xx_clkdm, &dsp1_7xx_clkdm, &cam_7xx_clkdm, &l4per_7xx_clkdm, &gpu_7xx_clkdm, &eve4_7xx_clkdm, &eve2_7xx_clkdm, &eve3_7xx_clkdm, &wkupaon_7xx_clkdm, &eve1_7xx_clkdm, NULL }; void __init dra7xx_clockdomains_init(void) { clkdm_register_platform_funcs(&omap4_clkdm_operations); clkdm_register_clkdms(clockdomains_dra7xx); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains7xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4+ Power Management Routines * * Copyright (C) 2010-2013 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Santosh Shilimkar <[email protected]> */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/list.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/system_misc.h> #include "soc.h" #include "common.h" #include "clockdomain.h" #include "powerdomain.h" #include "pm.h" u16 pm44xx_errata; struct power_state { struct powerdomain *pwrdm; u32 next_state; u32 next_logic_state; #ifdef CONFIG_SUSPEND u32 saved_state; u32 saved_logic_state; #endif struct list_head node; }; /** * struct static_dep_map - Static dependency map * @from: from clockdomain * @to: to clockdomain */ struct static_dep_map { const char *from; const char *to; }; static u32 cpu_suspend_state = PWRDM_POWER_OFF; static LIST_HEAD(pwrst_list); #ifdef CONFIG_SUSPEND static int omap4_pm_suspend(void) { struct power_state *pwrst; int state, ret = 0; u32 cpu_id = smp_processor_id(); /* Save current powerdomain state */ list_for_each_entry(pwrst, &pwrst_list, node) { pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm); } /* Set targeted power domain states by suspend */ list_for_each_entry(pwrst, &pwrst_list, node) { omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->next_logic_state); } /* * For MPUSS to hit power domain retention(CSWR or OSWR), * CPU0 and CPU1 power domains need to be in OFF or DORMANT state, * since CPU power domain CSWR is not supported by hardware * Only master CPU follows suspend path. All other CPUs follow * CPU hotplug path in system wide suspend. On OMAP4, CPU power * domain CSWR is not supported by hardware. * More details can be found in OMAP4430 TRM section 4.3.4.2. */ omap4_enter_lowpower(cpu_id, cpu_suspend_state, false); /* Restore next powerdomain state */ list_for_each_entry(pwrst, &pwrst_list, node) { state = pwrdm_read_prev_pwrst(pwrst->pwrdm); if (state > pwrst->next_state) { pr_info("Powerdomain (%s) didn't enter target state %d\n", pwrst->pwrdm->name, pwrst->next_state); ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state); } if (ret) { pr_crit("Could not enter target state in pm_suspend\n"); /* * OMAP4 chip PM currently works only with certain (newer) * versions of bootloaders. This is due to missing code in the * kernel to properly reset and initialize some devices. * Warn the user about the bootloader version being one of the * possible causes. * http://www.spinics.net/lists/arm-kernel/msg218641.html */ pr_warn("A possible cause could be an old bootloader - try u-boot >= v2012.07\n"); } else { pr_info("Successfully put all powerdomains to target state\n"); } return 0; } #else #define omap4_pm_suspend NULL #endif /* CONFIG_SUSPEND */ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) { struct power_state *pwrst; if (!pwrdm->pwrsts) return 0; /* * Skip CPU0 and CPU1 power domains. CPU1 is programmed * through hotplug path and CPU0 explicitly programmed * further down in the code path */ if (!strncmp(pwrdm->name, "cpu", 3)) { if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) cpu_suspend_state = PWRDM_POWER_RET; return 0; } if (!strncmp(pwrdm->name, "core", 4) || !strncmp(pwrdm->name, "l4per", 5)) pwrdm_set_logic_retst(pwrdm, PWRDM_POWER_OFF); pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); if (!pwrst) return -ENOMEM; pwrst->pwrdm = pwrdm; pwrst->next_state = pwrdm_get_valid_lp_state(pwrdm, false, PWRDM_POWER_RET); pwrst->next_logic_state = pwrdm_get_valid_lp_state(pwrdm, true, PWRDM_POWER_OFF); list_add(&pwrst->node, &pwrst_list); return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } /** * omap_default_idle - OMAP4 default ilde routine.' * * Implements OMAP4 memory, IO ordering requirements which can't be addressed * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPU_IDLE and * by secondary CPU with CONFIG_CPU_IDLE. */ static void omap_default_idle(void) { omap_do_wfi(); } /* * The dynamic dependency between MPUSS -> MEMIF and * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as * expected. The hardware recommendation is to enable static * dependencies for these to avoid system lock ups or random crashes. * The L4 wakeup depedency is added to workaround the OCP sync hardware * BUG with 32K synctimer which lead to incorrect timer value read * from the 32K counter. The BUG applies for GPTIMER1 and WDT2 which * are part of L4 wakeup clockdomain. */ static const struct static_dep_map omap4_static_dep_map[] = { {.from = "mpuss_clkdm", .to = "l3_emif_clkdm"}, {.from = "mpuss_clkdm", .to = "l3_1_clkdm"}, {.from = "mpuss_clkdm", .to = "l3_2_clkdm"}, {.from = "ducati_clkdm", .to = "l3_1_clkdm"}, {.from = "ducati_clkdm", .to = "l3_2_clkdm"}, {.from = NULL} /* TERMINATION */ }; static const struct static_dep_map omap5_dra7_static_dep_map[] = { {.from = "mpu_clkdm", .to = "emif_clkdm"}, {.from = NULL} /* TERMINATION */ }; /** * omap4plus_init_static_deps() - Initialize a static dependency map * @map: Mapping of clock domains */ static inline int omap4plus_init_static_deps(const struct static_dep_map *map) { int ret; struct clockdomain *from, *to; if (!map) return 0; while (map->from) { from = clkdm_lookup(map->from); to = clkdm_lookup(map->to); if (!from || !to) { pr_err("Failed lookup %s or %s for wakeup dependency\n", map->from, map->to); return -EINVAL; } ret = clkdm_add_wkdep(from, to); if (ret) { pr_err("Failed to add %s -> %s wakeup dependency(%d)\n", map->from, map->to, ret); return ret; } map++; } return 0; } /** * omap4_pm_init_early - Does early initialization necessary for OMAP4+ devices * * Initializes basic stuff for power management functionality. */ int __init omap4_pm_init_early(void) { if (cpu_is_omap446x()) pm44xx_errata |= PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD; if (soc_is_omap54xx() || soc_is_dra7xx()) pm44xx_errata |= PM_OMAP4_CPU_OSWR_DISABLE; return 0; } /** * omap4_pm_init - Init routine for OMAP4+ devices * * Initializes all powerdomain and clockdomain target states * and all PRCM settings. * Return: Returns the error code returned by called functions. */ int __init omap4_pm_init(void) { int ret = 0; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } pr_info("Power Management for TI OMAP4+ devices.\n"); /* * OMAP4 chip PM currently works only with certain (newer) * versions of bootloaders. This is due to missing code in the * kernel to properly reset and initialize some devices. * http://www.spinics.net/lists/arm-kernel/msg218641.html */ if (cpu_is_omap44xx()) pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n"); ret = pwrdm_for_each(pwrdms_setup, NULL); if (ret) { pr_err("Failed to setup powerdomains.\n"); goto err2; } if (cpu_is_omap44xx()) ret = omap4plus_init_static_deps(omap4_static_dep_map); else if (soc_is_omap54xx() || soc_is_dra7xx()) ret = omap4plus_init_static_deps(omap5_dra7_static_dep_map); if (ret) { pr_err("Failed to initialise static dependencies.\n"); goto err2; } ret = omap4_mpuss_init(); if (ret) { pr_err("Failed to initialise OMAP4 MPUSS\n"); goto err2; } (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); omap_common_suspend_init(omap4_pm_suspend); /* Overwrite the default cpu_do_idle() */ arm_pm_idle = omap_default_idle; if (cpu_is_omap44xx() || soc_is_omap54xx()) omap4_idle_init(); err2: return ret; }
linux-master
arch/arm/mach-omap2/pm44xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP5 Voltage Management Routines * * Based on voltagedomains44xx_data.c * * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm54xx.h" #include "voltage.h" #include "omap_opp_data.h" #include "vc.h" #include "vp.h" static const struct omap_vfsm_instance omap5_vdd_mpu_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET, }; static const struct omap_vfsm_instance omap5_vdd_mm_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MM_RET_SLEEP_OFFSET, }; static const struct omap_vfsm_instance omap5_vdd_core_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET, }; static struct voltagedomain omap5_voltdm_mpu = { .name = "mpu", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_mpu, .vfsm = &omap5_vdd_mpu_vfsm, .vp = &omap4_vp_mpu, }; static struct voltagedomain omap5_voltdm_mm = { .name = "mm", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_iva, .vfsm = &omap5_vdd_mm_vfsm, .vp = &omap4_vp_iva, }; static struct voltagedomain omap5_voltdm_core = { .name = "core", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_core, .vfsm = &omap5_vdd_core_vfsm, .vp = &omap4_vp_core, }; static struct voltagedomain omap5_voltdm_wkup = { .name = "wkup", }; static struct voltagedomain *voltagedomains_omap5[] __initdata = { &omap5_voltdm_mpu, &omap5_voltdm_mm, &omap5_voltdm_core, &omap5_voltdm_wkup, NULL, }; static const char *const sys_clk_name __initconst = "sys_clkin"; void __init omap54xx_voltagedomains_init(void) { struct voltagedomain *voltdm; int i; for (i = 0; voltdm = voltagedomains_omap5[i], voltdm; i++) voltdm->sys_clk.name = sys_clk_name; voltdm_init(voltagedomains_omap5); };
linux-master
arch/arm/mach-omap2/voltagedomains54xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 PRCM_MPU module functions * * Copyright (C) 2009 Nokia Corporation * Paul Walmsley */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "iomap.h" #include "common.h" #include "prcm_mpu44xx.h" #include "cm-regbits-44xx.h" /* * prcm_mpu_base: the virtual address of the start of the PRCM_MPU IP * block registers */ struct omap_domain_base prcm_mpu_base; /* PRCM_MPU low-level functions */ u32 omap4_prcm_mpu_read_inst_reg(s16 inst, u16 reg) { return readl_relaxed(OMAP44XX_PRCM_MPU_REGADDR(inst, reg)); } void omap4_prcm_mpu_write_inst_reg(u32 val, s16 inst, u16 reg) { writel_relaxed(val, OMAP44XX_PRCM_MPU_REGADDR(inst, reg)); } /** * omap2_set_globals_prcm_mpu - set the MPU PRCM base address (for early use) * @prcm_mpu: PRCM_MPU base virtual address * * XXX Will be replaced when the PRM/CM drivers are completed. */ void __init omap2_set_globals_prcm_mpu(void __iomem *prcm_mpu) { prcm_mpu_base.va = prcm_mpu; }
linux-master
arch/arm/mach-omap2/prcm_mpu44xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * DM81xx hwmod data. * * Copyright (C) 2010 Texas Instruments, Inc. - https://www.ti.com/ * Copyright (C) 2013 SKTB SKiT, http://www.skitlab.ru/ */ #include <linux/types.h> #include <linux/platform_data/hsmmc-omap.h> #include "omap_hwmod_common_data.h" #include "cm81xx.h" #include "ti81xx.h" #include "wd_timer.h" /* * DM816X hardware modules integration data * * Note: This is incomplete and at present, not generated from h/w database. */ /* * Common alwon .clkctrl_offs from dm814x TRM "Table 2-278. CM_ALWON REGISTERS" * also dm816x TRM 18.7.17 CM_ALWON device register values minus 0x1400. */ #define DM81XX_CM_ALWON_MCASP0_CLKCTRL 0x140 #define DM81XX_CM_ALWON_MCASP1_CLKCTRL 0x144 #define DM81XX_CM_ALWON_MCASP2_CLKCTRL 0x148 #define DM81XX_CM_ALWON_MCBSP_CLKCTRL 0x14c #define DM81XX_CM_ALWON_UART_0_CLKCTRL 0x150 #define DM81XX_CM_ALWON_UART_1_CLKCTRL 0x154 #define DM81XX_CM_ALWON_UART_2_CLKCTRL 0x158 #define DM81XX_CM_ALWON_GPIO_0_CLKCTRL 0x15c #define DM81XX_CM_ALWON_GPIO_1_CLKCTRL 0x160 #define DM81XX_CM_ALWON_I2C_0_CLKCTRL 0x164 #define DM81XX_CM_ALWON_I2C_1_CLKCTRL 0x168 #define DM81XX_CM_ALWON_WDTIMER_CLKCTRL 0x18c #define DM81XX_CM_ALWON_SPI_CLKCTRL 0x190 #define DM81XX_CM_ALWON_MAILBOX_CLKCTRL 0x194 #define DM81XX_CM_ALWON_SPINBOX_CLKCTRL 0x198 #define DM81XX_CM_ALWON_MMUDATA_CLKCTRL 0x19c #define DM81XX_CM_ALWON_MMUCFG_CLKCTRL 0x1a8 #define DM81XX_CM_ALWON_CONTROL_CLKCTRL 0x1c4 #define DM81XX_CM_ALWON_GPMC_CLKCTRL 0x1d0 #define DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL 0x1d4 #define DM81XX_CM_ALWON_L3_CLKCTRL 0x1e4 #define DM81XX_CM_ALWON_L4HS_CLKCTRL 0x1e8 #define DM81XX_CM_ALWON_L4LS_CLKCTRL 0x1ec #define DM81XX_CM_ALWON_RTC_CLKCTRL 0x1f0 #define DM81XX_CM_ALWON_TPCC_CLKCTRL 0x1f4 #define DM81XX_CM_ALWON_TPTC0_CLKCTRL 0x1f8 #define DM81XX_CM_ALWON_TPTC1_CLKCTRL 0x1fc #define DM81XX_CM_ALWON_TPTC2_CLKCTRL 0x200 #define DM81XX_CM_ALWON_TPTC3_CLKCTRL 0x204 /* Registers specific to dm814x */ #define DM814X_CM_ALWON_MCASP_3_4_5_CLKCTRL 0x16c #define DM814X_CM_ALWON_ATL_CLKCTRL 0x170 #define DM814X_CM_ALWON_MLB_CLKCTRL 0x174 #define DM814X_CM_ALWON_PATA_CLKCTRL 0x178 #define DM814X_CM_ALWON_UART_3_CLKCTRL 0x180 #define DM814X_CM_ALWON_UART_4_CLKCTRL 0x184 #define DM814X_CM_ALWON_UART_5_CLKCTRL 0x188 #define DM814X_CM_ALWON_OCM_0_CLKCTRL 0x1b4 #define DM814X_CM_ALWON_VCP_CLKCTRL 0x1b8 #define DM814X_CM_ALWON_MPU_CLKCTRL 0x1dc #define DM814X_CM_ALWON_DEBUGSS_CLKCTRL 0x1e0 #define DM814X_CM_ALWON_DCAN_0_1_CLKCTRL 0x218 #define DM814X_CM_ALWON_MMCHS_0_CLKCTRL 0x21c #define DM814X_CM_ALWON_MMCHS_1_CLKCTRL 0x220 #define DM814X_CM_ALWON_MMCHS_2_CLKCTRL 0x224 #define DM814X_CM_ALWON_CUST_EFUSE_CLKCTRL 0x228 /* Registers specific to dm816x */ #define DM816X_DM_ALWON_BASE 0x1400 #define DM816X_CM_ALWON_TIMER_1_CLKCTRL (0x1570 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_2_CLKCTRL (0x1574 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_3_CLKCTRL (0x1578 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_4_CLKCTRL (0x157c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_5_CLKCTRL (0x1580 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_6_CLKCTRL (0x1584 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_7_CLKCTRL (0x1588 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SDIO_CLKCTRL (0x15b0 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_0_CLKCTRL (0x15b4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_1_CLKCTRL (0x15b8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_ETHERNET_1_CLKCTRL (0x15d8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MPU_CLKCTRL (0x15dc - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_0_CLKCTRL (0x1608 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_1_CLKCTRL (0x160c - DM816X_DM_ALWON_BASE) /* * The default .clkctrl_offs field is offset from CM_DEFAULT, that's * TRM 18.7.6 CM_DEFAULT device register values minus 0x500 */ #define DM81XX_CM_DEFAULT_OFFSET 0x500 #define DM81XX_CM_DEFAULT_USB_CLKCTRL (0x558 - DM81XX_CM_DEFAULT_OFFSET) #define DM81XX_CM_DEFAULT_SATA_CLKCTRL (0x560 - DM81XX_CM_DEFAULT_OFFSET) /* L3 Interconnect entries clocked at 125, 250 and 500MHz */ static struct omap_hwmod dm81xx_alwon_l3_slow_hwmod = { .name = "alwon_l3_slow", .clkdm_name = "alwon_l3s_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod dm81xx_default_l3_slow_hwmod = { .name = "default_l3_slow", .clkdm_name = "default_l3_slow_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod dm81xx_alwon_l3_med_hwmod = { .name = "l3_med", .clkdm_name = "alwon_l3_med_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* * L4 standard peripherals, see TRM table 1-12 for devices using this. * See TRM table 1-73 for devices using the 125MHz SYSCLK6 clock. */ static struct omap_hwmod dm81xx_l4_ls_hwmod = { .name = "l4_ls", .clkdm_name = "alwon_l3s_clkdm", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* * L4 high-speed peripherals. For devices using this, please see the TRM * table 1-13. On dm816x, only EMAC, MDIO and SATA use this. See also TRM * table 1-73 for devices using 250MHz SYSCLK5 clock. */ static struct omap_hwmod dm81xx_l4_hs_hwmod = { .name = "l4_hs", .clkdm_name = "alwon_l3_med_clkdm", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L3 slow -> L4 ls peripheral interface running at 125MHz */ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_ls = { .master = &dm81xx_alwon_l3_slow_hwmod, .slave = &dm81xx_l4_ls_hwmod, .user = OCP_USER_MPU, }; /* L3 med -> L4 fast peripheral interface running at 250MHz */ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_hs = { .master = &dm81xx_alwon_l3_med_hwmod, .slave = &dm81xx_l4_hs_hwmod, .user = OCP_USER_MPU, }; /* MPU */ static struct omap_hwmod dm814x_mpu_hwmod = { .name = "mpu", .clkdm_name = "alwon_l3s_clkdm", .class = &mpu_hwmod_class, .flags = HWMOD_INIT_NO_IDLE, .main_clk = "mpu_ck", .prcm = { .omap4 = { .clkctrl_offs = DM814X_CM_ALWON_MPU_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_slow = { .master = &dm814x_mpu_hwmod, .slave = &dm81xx_alwon_l3_slow_hwmod, .user = OCP_USER_MPU, }; /* L3 med peripheral interface running at 200MHz */ static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_med = { .master = &dm814x_mpu_hwmod, .slave = &dm81xx_alwon_l3_med_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_mpu_hwmod = { .name = "mpu", .clkdm_name = "alwon_mpu_clkdm", .class = &mpu_hwmod_class, .flags = HWMOD_INIT_NO_IDLE, .main_clk = "mpu_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_MPU_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_slow = { .master = &dm816x_mpu_hwmod, .slave = &dm81xx_alwon_l3_slow_hwmod, .user = OCP_USER_MPU, }; /* L3 med peripheral interface running at 250MHz */ static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_med = { .master = &dm816x_mpu_hwmod, .slave = &dm81xx_alwon_l3_med_hwmod, .user = OCP_USER_MPU, }; /* RTC */ static struct omap_hwmod_class_sysconfig ti81xx_rtc_sysc = { .rev_offs = 0x74, .sysc_offs = 0x78, .sysc_flags = SYSC_HAS_SIDLEMODE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type3, }; static struct omap_hwmod_class ti81xx_rtc_hwmod_class = { .name = "rtc", .sysc = &ti81xx_rtc_sysc, }; static struct omap_hwmod ti81xx_rtc_hwmod = { .name = "rtc", .class = &ti81xx_rtc_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .flags = HWMOD_NO_IDLEST, .main_clk = "sysclk18_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_RTC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if ti81xx_l4_ls__rtc = { .master = &dm81xx_l4_ls_hwmod, .slave = &ti81xx_rtc_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* UART common */ static struct omap_hwmod_class_sysconfig uart_sysc = { .rev_offs = 0x50, .sysc_offs = 0x54, .syss_offs = 0x58, .sysc_flags = SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class uart_class = { .name = "uart", .sysc = &uart_sysc, }; static struct omap_hwmod dm81xx_uart1_hwmod = { .name = "uart1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_UART_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART1_FLAGS, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_uart1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm81xx_uart2_hwmod = { .name = "uart2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_UART_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART2_FLAGS, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart2 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_uart2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm81xx_uart3_hwmod = { .name = "uart3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_UART_2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART3_FLAGS, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart3 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_uart3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig wd_timer_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class wd_timer_class = { .name = "wd_timer", .sysc = &wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, .reset = &omap2_wd_timer_reset, }; static struct omap_hwmod dm81xx_wd_timer_hwmod = { .name = "wd_timer", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk18_ck", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_WDTIMER_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &wd_timer_class, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__wd_timer1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_wd_timer_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x90, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, }; static struct omap_hwmod dm81xx_i2c1_hwmod = { .name = "i2c1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_I2C_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &i2c_class, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_i2c1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm81xx_i2c2_hwmod = { .name = "i2c2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_I2C_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &i2c_class, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_i2c2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_elm_hwmod_class = { .name = "elm", .sysc = &dm81xx_elm_sysc, }; static struct omap_hwmod dm81xx_elm_hwmod = { .name = "elm", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_elm_hwmod_class, .main_clk = "sysclk6_ck", }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__elm = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_elm_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0114, .sysc_flags = SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_gpio_hwmod_class = { .name = "gpio", .sysc = &dm81xx_gpio_sysc, }; static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio1_hwmod = { .name = "gpio1", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_GPIO_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio2_hwmod = { .name = "gpio2", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio3_hwmod = { .name = "gpio3", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio3 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio4_hwmod = { .name = "gpio4", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio4 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio4_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_gpmc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_gpmc_hwmod_class = { .name = "gpmc", .sysc = &dm81xx_gpmc_sysc, }; static struct omap_hwmod dm81xx_gpmc_hwmod = { .name = "gpmc", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpmc_hwmod_class, .main_clk = "sysclk6_ck", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_GPMC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = { .master = &dm81xx_alwon_l3_slow_hwmod, .slave = &dm81xx_gpmc_hwmod, .user = OCP_USER_MPU, }; /* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */ static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .srst_udelay = 2, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SOFTRESET, .idlemodes = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm81xx_usbotg_class = { .name = "usbotg", .sysc = &dm81xx_usbhsotg_sysc, }; static struct omap_hwmod dm814x_usbss_hwmod = { .name = "usb_otg_hs", .clkdm_name = "default_l3_slow_clkdm", .main_clk = "pll260dcoclkldo", /* 481c5260.adpll.dcoclkldo */ .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_DEFAULT_USB_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm81xx_usbotg_class, }; static struct omap_hwmod_ocp_if dm814x_default_l3_slow__usbss = { .master = &dm81xx_default_l3_slow_hwmod, .slave = &dm814x_usbss_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_usbss_hwmod = { .name = "usb_otg_hs", .clkdm_name = "default_l3_slow_clkdm", .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_DEFAULT_USB_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm81xx_usbotg_class, }; static struct omap_hwmod_ocp_if dm816x_default_l3_slow__usbss = { .master = &dm81xx_default_l3_slow_hwmod, .slave = &dm816x_usbss_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm816x_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm816x_timer_hwmod_class = { .name = "timer", .sysc = &dm816x_timer_sysc, }; static struct omap_hwmod dm816x_timer3_hwmod = { .name = "timer3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_3_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer3 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer4_hwmod = { .name = "timer4", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer4_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_4_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer4 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer4_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer5_hwmod = { .name = "timer5", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer5_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_5_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer5 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer5_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer6_hwmod = { .name = "timer6", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer6_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_6_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer6 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer6_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer7_hwmod = { .name = "timer7", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer7_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_7_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer7 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer7_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* EMAC Ethernet */ static struct omap_hwmod_class_sysconfig dm816x_emac_sysc = { .rev_offs = 0x0, .sysc_offs = 0x4, .sysc_flags = SYSC_HAS_SOFTRESET, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm816x_emac_hwmod_class = { .name = "emac", .sysc = &dm816x_emac_sysc, }; /* * On dm816x the MDIO is within EMAC0. As the MDIO driver is a separate * driver probed before EMAC0, we let MDIO do the clock idling. */ static struct omap_hwmod dm816x_emac0_hwmod = { .name = "emac0", .clkdm_name = "alwon_ethernet_clkdm", .class = &dm816x_emac_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { .master = &dm81xx_l4_hs_hwmod, .slave = &dm816x_emac0_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class dm81xx_mdio_hwmod_class = { .name = "davinci_mdio", .sysc = &dm816x_emac_sysc, }; static struct omap_hwmod dm81xx_emac0_mdio_hwmod = { .name = "davinci_mdio", .class = &dm81xx_mdio_hwmod_class, .clkdm_name = "alwon_ethernet_clkdm", .main_clk = "sysclk24_ck", .flags = HWMOD_NO_IDLEST, /* * REVISIT: This should be moved to the emac0_hwmod * once we have a better way to handle device slaves. */ .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm81xx_emac0__mdio = { .master = &dm81xx_l4_hs_hwmod, .slave = &dm81xx_emac0_mdio_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_emac1_hwmod = { .name = "emac1", .clkdm_name = "alwon_ethernet_clkdm", .main_clk = "sysclk24_ck", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_ETHERNET_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_emac_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_hs__emac1 = { .master = &dm81xx_l4_hs_hwmod, .slave = &dm816x_emac1_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_sata_sysc = { .rev_offs = 0x00fc, .sysc_offs = 0x1100, .sysc_flags = SYSC_HAS_SIDLEMODE, .idlemodes = SIDLE_FORCE, .sysc_fields = &omap_hwmod_sysc_type3, }; static struct omap_hwmod_class dm81xx_sata_hwmod_class = { .name = "sata", .sysc = &dm81xx_sata_sysc, }; static struct omap_hwmod dm81xx_sata_hwmod = { .name = "sata", .clkdm_name = "default_clkdm", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_DEFAULT_SATA_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm81xx_sata_hwmod_class, }; static struct omap_hwmod_ocp_if dm81xx_l4_hs__sata = { .master = &dm81xx_l4_hs_hwmod, .slave = &dm81xx_sata_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_mmc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x110, .syss_offs = 0x114, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_mmc_class = { .name = "mmc", .sysc = &dm81xx_mmc_sysc, }; static struct omap_hwmod_opt_clk dm81xx_mmc_opt_clks[] = { { .role = "dbck", .clk = "sysclk18_ck", }, }; static struct omap_hsmmc_dev_attr mmc_dev_attr = { }; static struct omap_hwmod dm814x_mmc1_hwmod = { .name = "mmc1", .clkdm_name = "alwon_l3s_clkdm", .opt_clks = dm81xx_mmc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dm81xx_mmc_opt_clks), .main_clk = "sysclk8_ck", .prcm = { .omap4 = { .clkctrl_offs = DM814X_CM_ALWON_MMCHS_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc_dev_attr, .class = &dm81xx_mmc_class, }; static struct omap_hwmod_ocp_if dm814x_l4_ls__mmc1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm814x_mmc1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, .flags = OMAP_FIREWALL_L4 }; static struct omap_hwmod dm814x_mmc2_hwmod = { .name = "mmc2", .clkdm_name = "alwon_l3s_clkdm", .opt_clks = dm81xx_mmc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dm81xx_mmc_opt_clks), .main_clk = "sysclk8_ck", .prcm = { .omap4 = { .clkctrl_offs = DM814X_CM_ALWON_MMCHS_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc_dev_attr, .class = &dm81xx_mmc_class, }; static struct omap_hwmod_ocp_if dm814x_l4_ls__mmc2 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm814x_mmc2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, .flags = OMAP_FIREWALL_L4 }; static struct omap_hwmod dm814x_mmc3_hwmod = { .name = "mmc3", .clkdm_name = "alwon_l3_med_clkdm", .opt_clks = dm81xx_mmc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dm81xx_mmc_opt_clks), .main_clk = "sysclk8_ck", .prcm = { .omap4 = { .clkctrl_offs = DM814X_CM_ALWON_MMCHS_2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc_dev_attr, .class = &dm81xx_mmc_class, }; static struct omap_hwmod_ocp_if dm814x_alwon_l3_med__mmc3 = { .master = &dm81xx_alwon_l3_med_hwmod, .slave = &dm814x_mmc3_hwmod, .clk = "sysclk4_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_mmc1_hwmod = { .name = "mmc1", .clkdm_name = "alwon_l3s_clkdm", .opt_clks = dm81xx_mmc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dm81xx_mmc_opt_clks), .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_SDIO_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc_dev_attr, .class = &dm81xx_mmc_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__mmc1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_mmc1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, .flags = OMAP_FIREWALL_L4 }; static struct omap_hwmod_class_sysconfig dm816x_mcspi_sysc = { .rev_offs = 0x0, .sysc_offs = 0x110, .syss_offs = 0x114, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm816x_mcspi_class = { .name = "mcspi", .sysc = &dm816x_mcspi_sysc, }; static struct omap_hwmod dm81xx_mcspi1_hwmod = { .name = "mcspi1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_mcspi_class, }; static struct omap_hwmod dm81xx_mcspi2_hwmod = { .name = "mcspi2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_mcspi_class, }; static struct omap_hwmod dm81xx_mcspi3_hwmod = { .name = "mcspi3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_mcspi_class, }; static struct omap_hwmod dm81xx_mcspi4_hwmod = { .name = "mcspi4", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_mcspi_class, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mcspi1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi2 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mcspi2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi3 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mcspi3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi4 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mcspi4_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &dm81xx_mailbox_sysc, }; static struct omap_hwmod dm81xx_mailbox_hwmod = { .name = "mailbox", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_mailbox_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_MAILBOX_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__mailbox = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mailbox_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_spinbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_spinbox_hwmod_class = { .name = "spinbox", .sysc = &dm81xx_spinbox_sysc, }; static struct omap_hwmod dm81xx_spinbox_hwmod = { .name = "spinbox", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_spinbox_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM81XX_CM_ALWON_SPINBOX_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__spinbox = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_spinbox_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* * REVISIT: Test and enable the following once clocks work: * dm81xx_l4_ls__mailbox * * Also note that some devices share a single clkctrl_offs.. * For example, i2c1 and 3 share one, and i2c2 and 4 share one. */ static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = { &dm814x_mpu__alwon_l3_slow, &dm814x_mpu__alwon_l3_med, &dm81xx_alwon_l3_slow__l4_ls, &dm81xx_alwon_l3_slow__l4_hs, &dm81xx_l4_ls__uart1, &dm81xx_l4_ls__uart2, &dm81xx_l4_ls__uart3, &dm81xx_l4_ls__wd_timer1, &dm81xx_l4_ls__i2c1, &dm81xx_l4_ls__i2c2, &dm81xx_l4_ls__gpio1, &dm81xx_l4_ls__gpio2, &dm81xx_l4_ls__gpio3, &dm81xx_l4_ls__gpio4, &dm81xx_l4_ls__elm, &dm81xx_l4_ls__mcspi1, &dm81xx_l4_ls__mcspi2, &dm81xx_l4_ls__mcspi3, &dm81xx_l4_ls__mcspi4, &dm814x_l4_ls__mmc1, &dm814x_l4_ls__mmc2, &ti81xx_l4_ls__rtc, &dm81xx_alwon_l3_slow__gpmc, &dm814x_default_l3_slow__usbss, &dm814x_alwon_l3_med__mmc3, NULL, }; int __init dm814x_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(dm814x_hwmod_ocp_ifs); } static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = { &dm816x_mpu__alwon_l3_slow, &dm816x_mpu__alwon_l3_med, &dm81xx_alwon_l3_slow__l4_ls, &dm81xx_alwon_l3_slow__l4_hs, &dm81xx_l4_ls__uart1, &dm81xx_l4_ls__uart2, &dm81xx_l4_ls__uart3, &dm81xx_l4_ls__wd_timer1, &dm81xx_l4_ls__i2c1, &dm81xx_l4_ls__i2c2, &dm81xx_l4_ls__gpio1, &dm81xx_l4_ls__gpio2, &dm81xx_l4_ls__elm, &ti81xx_l4_ls__rtc, &dm816x_l4_ls__mmc1, &dm816x_l4_ls__timer3, &dm816x_l4_ls__timer4, &dm816x_l4_ls__timer5, &dm816x_l4_ls__timer6, &dm816x_l4_ls__timer7, &dm81xx_l4_ls__mcspi1, &dm81xx_l4_ls__mailbox, &dm81xx_l4_ls__spinbox, &dm81xx_l4_hs__emac0, &dm81xx_emac0__mdio, &dm816x_l4_hs__emac1, &dm81xx_l4_hs__sata, &dm81xx_alwon_l3_slow__gpmc, &dm816x_default_l3_slow__usbss, NULL, }; int __init dm816x_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(dm816x_hwmod_ocp_ifs); }
linux-master
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * IP block integration code for the HDQ1W/1-wire IP block * * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * Based on the I2C reset code in arch/arm/mach-omap2/i2c.c by * Avinash.H.M <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include "soc.h" #include "omap_hwmod.h" #include "omap_device.h" #include "hdq1w.h" #include "prm.h" #include "common.h" /** * omap_hdq1w_reset - reset the OMAP HDQ1W module * @oh: struct omap_hwmod * * * OCP soft reset the HDQ1W IP block. Section 20.6.1.4 "HDQ1W/1-Wire * Software Reset" of the OMAP34xx Technical Reference Manual Revision * ZR (SWPU223R) does not include the rather important fact that, for * the reset to succeed, the HDQ1W module's internal clock gate must be * programmed to allow the clock to propagate to the rest of the * module. In this sense, it's rather similar to the I2C custom reset * function. Returns 0. */ int omap_hdq1w_reset(struct omap_hwmod *oh) { u32 v; int c = 0; /* Write to the SOFTRESET bit */ omap_hwmod_softreset(oh); /* Enable the module's internal clocks */ v = omap_hwmod_read(oh, HDQ_CTRL_STATUS_OFFSET); v |= 1 << HDQ_CTRL_STATUS_CLOCKENABLE_SHIFT; omap_hwmod_write(v, oh, HDQ_CTRL_STATUS_OFFSET); /* Poll on RESETDONE bit */ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warn("%s: %s: softreset failed (waited %d usec)\n", __func__, oh->name, MAX_MODULE_SOFTRESET_WAIT); else pr_debug("%s: %s: softreset in %d usec\n", __func__, oh->name, c); return 0; }
linux-master
arch/arm/mach-omap2/hdq1w.c