Browse Source

Merge changes from topic "tegra-downstream-02182020" into integration

* changes:
  Tegra186: store TZDRAM base/size to scratch registers
  Tegra186: add SE support to generate SHA256 of TZRAM
  Tegra186: add support for bpmp_ipc driver
  Tegra210: disable ERRATA_A57_829520
  Tegra194: memctrl: add support for MIU4 and MIU5
  Tegra194: memctrl: remove support to reconfigure MSS
  Tegra: fiq_glue: remove bakery locks from interrupt handler
  Tegra210: SE: add context save support
  Tegra210: update the PMC blacklisted registers
  Tegra: disable CPUACTLR access from lower exception levels
  cpus: denver: fixup register used to store return address
pull/1938/head
Olivier Deprez 5 years ago
committed by TrustedFirmware Code Review
parent
commit
65012c0892
  1. 8
      lib/cpus/aarch64/denver.S
  2. 34
      plat/nvidia/tegra/common/aarch64/tegra_helpers.S
  3. 8
      plat/nvidia/tegra/common/tegra_fiq_glue.c
  4. 33
      plat/nvidia/tegra/include/drivers/pmc.h
  5. 3
      plat/nvidia/tegra/include/drivers/security_engine.h
  6. 25
      plat/nvidia/tegra/include/t186/tegra_def.h
  7. 3
      plat/nvidia/tegra/include/t210/tegra_def.h
  8. 278
      plat/nvidia/tegra/soc/t186/drivers/se/se.c
  9. 100
      plat/nvidia/tegra/soc/t186/drivers/se/se_private.h
  10. 21
      plat/nvidia/tegra/soc/t186/plat_memctrl.c
  11. 35
      plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
  12. 6
      plat/nvidia/tegra/soc/t186/plat_setup.c
  13. 7
      plat/nvidia/tegra/soc/t186/platform_t186.mk
  14. 422
      plat/nvidia/tegra/soc/t194/plat_memctrl.c
  15. 6
      plat/nvidia/tegra/soc/t210/drivers/se/se_private.h
  16. 124
      plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c
  17. 9
      plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
  18. 4
      plat/nvidia/tegra/soc/t210/plat_setup.c
  19. 39
      plat/nvidia/tegra/soc/t210/plat_sip_calls.c
  20. 1
      plat/nvidia/tegra/soc/t210/platform_t210.mk

8
lib/cpus/aarch64/denver.S

@ -156,12 +156,12 @@ endfunc denver_disable_ext_debug
* ----------------------------------------------------
*/
func denver_enable_dco
mov x3, x30
mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
msr s3_0_c15_c0_2, x1
mov x30, x3
mov x30, x18
ret
endfunc denver_enable_dco
@ -171,7 +171,7 @@ endfunc denver_enable_dco
*/
func denver_disable_dco
mov x3, x30
mov x18, x30
/* turn off background work */
bl plat_my_core_pos
@ -188,7 +188,7 @@ func denver_disable_dco
and x2, x2, x1
cbnz x2, 1b
mov x30, x3
mov x30, x18
ret
endfunc denver_disable_dco

34
plat/nvidia/tegra/common/aarch64/tegra_helpers.S

@ -1,5 +1,6 @@
/*
* Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -18,21 +19,16 @@
/*******************************************************************************
* Implementation defined ACTLR_EL3 bit definitions
******************************************************************************/
#define ACTLR_EL3_L2ACTLR_BIT (U(1) << 6)
#define ACTLR_EL3_L2ECTLR_BIT (U(1) << 5)
#define ACTLR_EL3_L2CTLR_BIT (U(1) << 4)
#define ACTLR_EL3_CPUECTLR_BIT (U(1) << 1)
#define ACTLR_EL3_CPUACTLR_BIT (U(1) << 0)
#define ACTLR_EL3_ENABLE_ALL_MASK (ACTLR_EL3_L2ACTLR_BIT | \
ACTLR_EL3_L2ECTLR_BIT | \
ACTLR_EL3_L2CTLR_BIT | \
ACTLR_EL3_CPUECTLR_BIT | \
ACTLR_EL3_CPUACTLR_BIT)
#define ACTLR_EL3_ENABLE_ALL_ACCESS (ACTLR_EL3_L2ACTLR_BIT | \
ACTLR_EL3_L2ECTLR_BIT | \
ACTLR_EL3_L2CTLR_BIT | \
ACTLR_EL3_CPUECTLR_BIT | \
ACTLR_EL3_CPUACTLR_BIT)
#define ACTLR_ELx_L2ACTLR_BIT (U(1) << 6)
#define ACTLR_ELx_L2ECTLR_BIT (U(1) << 5)
#define ACTLR_ELx_L2CTLR_BIT (U(1) << 4)
#define ACTLR_ELx_CPUECTLR_BIT (U(1) << 1)
#define ACTLR_ELx_CPUACTLR_BIT (U(1) << 0)
#define ACTLR_ELx_ENABLE_ALL_ACCESS (ACTLR_ELx_L2ACTLR_BIT | \
ACTLR_ELx_L2ECTLR_BIT | \
ACTLR_ELx_L2CTLR_BIT | \
ACTLR_ELx_CPUECTLR_BIT | \
ACTLR_ELx_CPUACTLR_BIT)
/* Global functions */
.globl plat_is_my_cpu_primary
@ -93,15 +89,11 @@
* -------------------------------------------------------
*/
mrs x0, actlr_el3
mov x1, #ACTLR_EL3_ENABLE_ALL_MASK
bic x0, x0, x1
mov x1, #ACTLR_EL3_ENABLE_ALL_ACCESS
mov x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
orr x0, x0, x1
msr actlr_el3, x0
mrs x0, actlr_el2
mov x1, #ACTLR_EL3_ENABLE_ALL_MASK
bic x0, x0, x1
mov x1, #ACTLR_EL3_ENABLE_ALL_ACCESS
mov x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
orr x0, x0, x1
msr actlr_el2, x0
isb

8
plat/nvidia/tegra/common/tegra_fiq_glue.c

@ -1,5 +1,6 @@
/*
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -12,7 +13,6 @@
#include <common/debug.h>
#include <context.h>
#include <denver.h>
#include <lib/bakery_lock.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <plat/common/platform.h>
@ -25,8 +25,6 @@
/* Legacy FIQ used by earlier Tegra platforms */
#define LEGACY_FIQ_PPI_WDT 28U
static DEFINE_BAKERY_LOCK(tegra_fiq_lock);
/*******************************************************************************
* Static variables
******************************************************************************/
@ -57,8 +55,6 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
*/
irq = plat_ic_get_pending_interrupt_id();
bakery_lock_get(&tegra_fiq_lock);
/*
* Jump to NS world only if the NS world's FIQ handler has
* been registered
@ -107,8 +103,6 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
plat_ic_end_of_interrupt(irq);
}
bakery_lock_release(&tegra_fiq_lock);
return 0;
}

33
plat/nvidia/tegra/include/drivers/pmc.h

@ -1,5 +1,6 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -18,27 +19,37 @@
#define PMC_DPD_ENABLE_0 U(0x24)
#define PMC_PWRGATE_STATUS U(0x38)
#define PMC_PWRGATE_TOGGLE U(0x30)
#define PMC_SECURE_SCRATCH0 U(0xb0)
#define PMC_SECURE_SCRATCH5 U(0xc4)
#define PMC_SCRATCH1 U(0x54)
#define PMC_CRYPTO_OP_0 U(0xf4)
#define PMC_TOGGLE_START U(0x100)
#define PMC_SCRATCH31 U(0x118)
#define PMC_SCRATCH32 U(0x11C)
#define PMC_SCRATCH33 U(0x120)
#define PMC_SCRATCH39 U(0x138)
#define PMC_SCRATCH40 U(0x13C)
#define PMC_SCRATCH41 U(0x140)
#define PMC_SECURE_SCRATCH6 U(0x224)
#define PMC_SECURE_SCRATCH7 U(0x228)
#define PMC_SECURE_DISABLE2 U(0x2c4)
#define PMC_SCRATCH42 U(0x144)
#define PMC_SCRATCH43 U(0x22C)
#define PMC_SCRATCH44 U(0x230)
#define PMC_SCRATCH45 U(0x234)
#define PMC_SCRATCH46 U(0x238)
#define PMC_SCRATCH47 U(0x23C)
#define PMC_SCRATCH48 U(0x240)
#define PMC_SCRATCH50 U(0x248)
#define PMC_SCRATCH51 U(0x24C)
#define PMC_TSC_MULT_0 U(0x2B4)
#define PMC_STICKY_BIT U(0x2C0)
#define PMC_SECURE_DISABLE2 U(0x2C4)
#define PMC_SECURE_DISABLE2_WRITE22_ON (U(1) << 28)
#define PMC_SECURE_SCRATCH8 U(0x300)
#define PMC_SECURE_SCRATCH79 U(0x41c)
#define PMC_FUSE_CONTROL_0 U(0x450)
#define PMC_SECURE_SCRATCH22 U(0x338)
#define PMC_SECURE_DISABLE3 U(0x2d8)
#define PMC_SECURE_DISABLE3 U(0x2D8)
#define PMC_SECURE_DISABLE3_WRITE34_ON (U(1) << 20)
#define PMC_SECURE_DISABLE3_WRITE35_ON (U(1) << 22)
#define PMC_SECURE_SCRATCH22 U(0x338)
#define PMC_SECURE_SCRATCH34 U(0x368)
#define PMC_SECURE_SCRATCH35 U(0x36c)
#define PMC_SECURE_SCRATCH80 U(0xa98)
#define PMC_SECURE_SCRATCH119 U(0xb34)
#define PMC_SCRATCH56 U(0x600)
#define PMC_SCRATCH57 U(0x604)
#define PMC_SCRATCH201 U(0x844)
static inline uint32_t tegra_pmc_read_32(uint32_t off)

3
plat/nvidia/tegra/include/drivers/security_engine.h

@ -1,6 +1,6 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -55,5 +55,6 @@ void tegra_se_init(void);
int tegra_se_suspend(void);
void tegra_se_resume(void);
int tegra_se_save_tzram(void);
int32_t tegra_se_save_sha256_hash(uint64_t bl31_base, uint32_t src_len_inbyte);
#endif /* SECURITY_ENGINE_H */

25
plat/nvidia/tegra/include/t186/tegra_def.h

@ -212,6 +212,14 @@
#define TEGRA_RNG1_BASE U(0x03AE0000)
#define RNG_MUTEX_WATCHDOG_NS_LIMIT U(0xFE0)
/*******************************************************************************
* Tegra HSP doorbell #0 constants
******************************************************************************/
#define TEGRA_HSP_DBELL_BASE U(0x03C90000)
#define HSP_DBELL_1_ENABLE U(0x104)
#define HSP_DBELL_3_TRIGGER U(0x300)
#define HSP_DBELL_3_ENABLE U(0x304)
/*******************************************************************************
* Tegra Clock and Reset Controller constants
******************************************************************************/
@ -238,6 +246,7 @@
* Tegra scratch registers constants
******************************************************************************/
#define TEGRA_SCRATCH_BASE U(0x0C390000)
#define SECURE_SCRATCH_RSV0_HI U(0x654)
#define SECURE_SCRATCH_RSV1_LO U(0x658)
#define SECURE_SCRATCH_RSV1_HI U(0x65C)
#define SECURE_SCRATCH_RSV6 U(0x680)
@ -247,6 +256,15 @@
#define SECURE_SCRATCH_RSV53_HI U(0x7FC)
#define SECURE_SCRATCH_RSV55_LO U(0x808)
#define SECURE_SCRATCH_RSV55_HI U(0x80C)
#define SECURE_SCRATCH_RSV63_LO U(0x848)
#define SECURE_SCRATCH_RSV63_HI U(0x84C)
#define SECURE_SCRATCH_RSV64_LO U(0x850)
#define SECURE_SCRATCH_RSV64_HI U(0x854)
#define SECURE_SCRATCH_RSV65_LO U(0x858)
#define SECURE_SCRATCH_RSV65_HI U(0x85c)
#define SECURE_SCRATCH_RSV66_LO U(0x860)
#define SECURE_SCRATCH_RSV66_HI U(0x864)
#define SECURE_SCRATCH_RSV68_LO U(0x870)
#define SCRATCH_RESET_VECTOR_LO SECURE_SCRATCH_RSV1_LO
#define SCRATCH_RESET_VECTOR_HI SECURE_SCRATCH_RSV1_HI
@ -280,6 +298,13 @@
#define TEGRA_TZRAM_BASE U(0x30000000)
#define TEGRA_TZRAM_SIZE U(0x40000)
/*******************************************************************************
* Tegra CCPLEX-BPMP IPC constants
******************************************************************************/
#define TEGRA_BPMP_IPC_TX_PHYS_BASE U(0x3004C000)
#define TEGRA_BPMP_IPC_RX_PHYS_BASE U(0x3004D000)
#define TEGRA_BPMP_IPC_CH_MAP_SIZE U(0x1000) /* 4KB */
/*******************************************************************************
* Tegra DRAM memory base address
******************************************************************************/

3
plat/nvidia/tegra/include/t210/tegra_def.h

@ -144,6 +144,9 @@
#define SE_CLK_ENB_BIT (U(1) << 31)
#define TEGRA_CLK_OUT_ENB_W U(0x364)
#define ENTROPY_RESET_BIT (U(1) << 21)
#define TEGRA_CLK_RST_CTL_CLK_SRC_SE U(0x42C)
#define SE_CLK_SRC_MASK (U(7) << 29)
#define SE_CLK_SRC_CLK_M (U(6) << 29)
#define TEGRA_RST_DEV_SET_V U(0x430)
#define SE_RESET_BIT (U(1) << 31)
#define HDA_RESET_BIT (U(1) << 29)

278
plat/nvidia/tegra/soc/t186/drivers/se/se.c

@ -0,0 +1,278 @@
/*
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <drivers/delay_timer.h>
#include <errno.h>
#include <string.h>
#include <bpmp_ipc.h>
#include <pmc.h>
#include <security_engine.h>
#include <tegra186_private.h>
#include <tegra_private.h>
#include "se_private.h"
/*******************************************************************************
* Constants and Macros
******************************************************************************/
#define SE0_MAX_BUSY_TIMEOUT_MS U(100) /* 100ms */
#define BYTES_IN_WORD U(4)
#define SHA256_MAX_HASH_RESULT U(7)
#define SHA256_DST_SIZE U(32)
#define SHA_FIRST_OP U(1)
#define MAX_SHA_ENGINE_CHUNK_SIZE U(0xFFFFFF)
#define SHA256_MSG_LENGTH_ONETIME U(0xffff)
/*
* Check that SE operation has completed after kickoff
* This function is invoked after an SE operation has been started,
* and it checks the following conditions:
* 1. SE0_INT_STATUS = SE0_OP_DONE
* 2. SE0_STATUS = IDLE
* 3. SE0_ERR_STATUS is clean.
*/
static int32_t tegra_se_operation_complete(void)
{
uint32_t val = 0U;
/* Read SE0 interrupt register to ensure H/W operation complete */
val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
if (SE0_INT_OP_DONE(val) == SE0_INT_OP_DONE_CLEAR) {
ERROR("%s: Engine busy state too many times! val = 0x%x\n",
__func__, val);
return -ETIMEDOUT;
}
/* Read SE0 status idle to ensure H/W operation complete */
val = tegra_se_read_32(SE0_SHA_STATUS_0);
if (val != SE0_SHA_STATUS_IDLE) {
ERROR("%s: Idle state timeout! val = 0x%x\n", __func__,
val);
return -ETIMEDOUT;
}
/* Ensure that no errors are thrown during operation */
val = tegra_se_read_32(SE0_ERR_STATUS_REG_OFFSET);
if (val != SE0_ERR_STATUS_CLEAR) {
ERROR("%s: Error during SE operation! val = 0x%x",
__func__, val);
return -ENOTSUP;
}
return 0;
}
/*
* Security engine primitive normal operations
*/
static int32_t tegra_se_start_normal_operation(uint64_t src_addr,
uint32_t nbytes, uint32_t last_buf, uint32_t src_len_inbytes)
{
int32_t ret = 0;
uint32_t val = 0U;
uint32_t src_in_lo;
uint32_t src_in_msb;
uint32_t src_in_hi;
if ((src_addr == 0UL) || (nbytes == 0U))
return -EINVAL;
src_in_lo = (uint32_t)src_addr;
src_in_msb = ((uint32_t)(src_addr >> 32U) & 0xffU);
src_in_hi = ((src_in_msb << SE0_IN_HI_ADDR_HI_0_MSB_SHIFT) |
(nbytes & 0xffffffU));
/* set SRC_IN_ADDR_LO and SRC_IN_ADDR_HI*/
tegra_se_write_32(SE0_IN_ADDR, src_in_lo);
tegra_se_write_32(SE0_IN_HI_ADDR_HI, src_in_hi);
val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
if (val > 0U) {
tegra_se_write_32(SE0_INT_STATUS_REG_OFFSET, 0x00000U);
}
/* Enable SHA interrupt for SE0 Operation */
tegra_se_write_32(SE0_SHA_INT_ENABLE, 0x1aU);
/* flush to DRAM for SE to use the updated contents */
flush_dcache_range(src_addr, src_len_inbytes);
/* Start SHA256 operation */
if (last_buf == 1U) {
tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START |
SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD);
} else {
tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START);
}
/* Wait for SE-operation to finish */
udelay(SE0_MAX_BUSY_TIMEOUT_MS * 100U);
/* Check SE0 operation status */
ret = tegra_se_operation_complete();
if (ret != 0) {
ERROR("SE operation complete Failed! 0x%x", ret);
return ret;
}
return 0;
}
static int32_t tegra_se_calculate_sha256_hash(uint64_t src_addr,
uint32_t src_len_inbyte)
{
uint32_t val, last_buf, i;
int32_t ret = 0;
uint32_t operations;
uint64_t src_len_inbits;
uint32_t len_bits_msb;
uint32_t len_bits_lsb;
uint32_t number_of_operations, max_bytes, bytes_left, remaining_bytes;
if (src_len_inbyte > MAX_SHA_ENGINE_CHUNK_SIZE) {
ERROR("SHA input chunk size too big: 0x%x\n", src_len_inbyte);
return -EINVAL;
}
if (src_addr == 0UL) {
return -EINVAL;
}
/* number of bytes per operation */
max_bytes = SHA256_HASH_SIZE_BYTES * SHA256_MSG_LENGTH_ONETIME;
src_len_inbits = src_len_inbyte * 8U;
len_bits_msb = (uint32_t)(src_len_inbits >> 32U);
len_bits_lsb = (uint32_t)(src_len_inbits & 0xFFFFFFFF);
/* program SE0_CONFIG for SHA256 operation */
val = SE0_CONFIG_ENC_ALG_SHA | SE0_CONFIG_ENC_MODE_SHA256 |
SE0_CONFIG_DEC_ALG_NOP | SE0_CONFIG_DST_HASHREG;
tegra_se_write_32(SE0_SHA_CONFIG, val);
/* set SE0_SHA_MSG_LENGTH registers */
tegra_se_write_32(SE0_SHA_MSG_LENGTH_0, len_bits_lsb);
tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
tegra_se_write_32(SE0_SHA_MSG_LENGTH_1, len_bits_msb);
/* zero out unused SE0_SHA_MSG_LENGTH and SE0_SHA_MSG_LEFT */
tegra_se_write_32(SE0_SHA_MSG_LENGTH_2, 0U);
tegra_se_write_32(SE0_SHA_MSG_LENGTH_3, 0U);
tegra_se_write_32(SE0_SHA_MSG_LEFT_1, 0U);
tegra_se_write_32(SE0_SHA_MSG_LEFT_2, 0U);
tegra_se_write_32(SE0_SHA_MSG_LEFT_3, 0U);
number_of_operations = src_len_inbyte / max_bytes;
remaining_bytes = src_len_inbyte % max_bytes;
if (remaining_bytes > 0U) {
number_of_operations += 1U;
}
/*
* 1. Operations == 1: program SE0_SHA_TASK register to initiate SHA256
* hash generation by setting
* 1(SE0_SHA_CONFIG_HW_INIT_HASH) to SE0_SHA_TASK
* and start SHA256-normal operation.
* 2. 1 < Operations < number_of_operations: program SE0_SHA_TASK to
* 0(SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE) to load
* intermediate SHA256 digest result from
* HASH_RESULT register to continue SHA256
* generation and start SHA256-normal operation.
* 3. Operations == number_of_operations: continue with step 2 and set
* max_bytes to bytes_left to process final
* hash-result generation and
* start SHA256-normal operation.
*/
bytes_left = src_len_inbyte;
for (operations = 1U; operations <= number_of_operations;
operations++) {
if (operations == SHA_FIRST_OP) {
val = SE0_SHA_CONFIG_HW_INIT_HASH;
} else {
/* Load intermediate SHA digest result to
* SHA:HASH_RESULT(0..7) to continue the SHA
* calculation and tell the SHA engine to use it.
*/
for (i = 0U; (i / BYTES_IN_WORD) <=
SHA256_MAX_HASH_RESULT; i += BYTES_IN_WORD) {
val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 +
i);
tegra_se_write_32(SE0_SHA_HASH_RESULT_0 + i,
val);
}
val = SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE;
if (len_bits_lsb <= (max_bytes * 8U)) {
len_bits_lsb = (remaining_bytes * 8U);
} else {
len_bits_lsb -= (max_bytes * 8U);
}
tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
}
tegra_se_write_32(SE0_SHA_TASK_CONFIG, val);
max_bytes = (SHA256_HASH_SIZE_BYTES *
SHA256_MSG_LENGTH_ONETIME);
if (bytes_left < max_bytes) {
max_bytes = bytes_left;
last_buf = 1U;
} else {
bytes_left = bytes_left - max_bytes;
last_buf = 0U;
}
/* start operation */
ret = tegra_se_start_normal_operation(src_addr, max_bytes,
last_buf, src_len_inbyte);
if (ret != 0) {
ERROR("Error during SE operation! 0x%x", ret);
return -EINVAL;
}
}
return ret;
}
/*
* Handler to generate SHA256 and save SHA256 hash to PMC-Scratch register.
*/
int32_t tegra_se_save_sha256_hash(uint64_t bl31_base, uint32_t src_len_inbyte)
{
int32_t ret = 0;
uint32_t val = 0U, hash_offset = 0U, scratch_offset = 0U, security;
/*
* Set SE_SOFT_SETTINGS=SE_SECURE to prevent NS process to change SE
* registers.
*/
security = tegra_se_read_32(SE0_SECURITY);
tegra_se_write_32(SE0_SECURITY, security | SE0_SECURITY_SE_SOFT_SETTING);
ret = tegra_se_calculate_sha256_hash(bl31_base, src_len_inbyte);
if (ret != 0L) {
ERROR("%s: SHA256 generation failed\n", __func__);
return ret;
}
/*
* Reset SE_SECURE to previous value.
*/
tegra_se_write_32(SE0_SECURITY, security);
/* read SHA256_HASH_RESULT and save to PMC Scratch registers */
scratch_offset = SECURE_SCRATCH_TZDRAM_SHA256_HASH_START;
while (scratch_offset <= SECURE_SCRATCH_TZDRAM_SHA256_HASH_END) {
val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 + hash_offset);
mmio_write_32(TEGRA_SCRATCH_BASE + scratch_offset, val);
hash_offset += BYTES_IN_WORD;
scratch_offset += BYTES_IN_WORD;
}
return ret;
}

100
plat/nvidia/tegra/soc/t186/drivers/se/se_private.h

@ -0,0 +1,100 @@
/*
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SE_PRIVATE_H
#define SE_PRIVATE_H
#include <lib/utils_def.h>
/* SE0 security register */
#define SE0_SECURITY U(0x18)
#define SE0_SECURITY_SE_SOFT_SETTING (((uint32_t)1) << 16U)
/* SE0 config register */
#define SE0_SHA_CONFIG U(0x104)
#define SE0_SHA_TASK_CONFIG U(0x108)
#define SE0_SHA_CONFIG_HW_INIT_HASH ((1U) << 0U)
#define SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE U(0)
#define SE0_CONFIG_ENC_ALG_SHIFT U(12)
#define SE0_CONFIG_ENC_ALG_SHA \
(((uint32_t)3) << SE0_CONFIG_ENC_ALG_SHIFT)
#define SE0_CONFIG_DEC_ALG_SHIFT U(8)
#define SE0_CONFIG_DEC_ALG_NOP \
(((uint32_t)0) << SE0_CONFIG_DEC_ALG_SHIFT)
#define SE0_CONFIG_DST_SHIFT U(2)
#define SE0_CONFIG_DST_HASHREG \
(((uint32_t)1) << SE0_CONFIG_DST_SHIFT)
#define SHA256_HASH_SIZE_BYTES U(256)
#define SE0_CONFIG_ENC_MODE_SHIFT U(24)
#define SE0_CONFIG_ENC_MODE_SHA256 \
(((uint32_t)5) << SE0_CONFIG_ENC_MODE_SHIFT)
/* SHA input message length */
#define SE0_SHA_MSG_LENGTH_0 U(0x11c)
#define SE0_SHA_MSG_LENGTH_1 U(0x120)
#define SE0_SHA_MSG_LENGTH_2 U(0x124)
#define SE0_SHA_MSG_LENGTH_3 U(0x128)
/* SHA input message left */
#define SE0_SHA_MSG_LEFT_0 U(0x12c)
#define SE0_SHA_MSG_LEFT_1 U(0x130)
#define SE0_SHA_MSG_LEFT_2 U(0x134)
#define SE0_SHA_MSG_LEFT_3 U(0x138)
/* SE Hash Result */
#define SE0_SHA_HASH_RESULT_0 U(0x13c)
/* SE OPERATION */
#define SE0_OPERATION_REG_OFFSET U(0x17c)
#define SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT U(16)
#define SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD \
(((uint32_t)0x1) << SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT)
#define SE0_OPERATION_SHIFT U(0)
#define SE0_OP_START \
(((uint32_t)0x1) << SE0_OPERATION_SHIFT)
/* SE Interrupt */
#define SE0_SHA_INT_ENABLE U(0x180)
#define SE0_INT_STATUS_REG_OFFSET U(0x184)
#define SE0_INT_OP_DONE_SHIFT U(4)
#define SE0_INT_OP_DONE_CLEAR \
(((uint32_t)0) << SE0_INT_OP_DONE_SHIFT)
#define SE0_INT_OP_DONE(x) \
((x) & (((uint32_t)0x1) << SE0_INT_OP_DONE_SHIFT))
/* SE SHA status */
#define SE0_SHA_STATUS_0 U(0x188)
#define SE0_SHA_STATUS_IDLE U(0)
/* SE error status */
#define SE0_ERR_STATUS_REG_OFFSET U(0x18c)
#define SE0_ERR_STATUS_CLEAR U(0)
#define SE0_IN_ADDR U(0x10c)
#define SE0_IN_HI_ADDR_HI U(0x110)
#define SE0_IN_HI_ADDR_HI_0_MSB_SHIFT U(24)
/* SE error status */
#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_START SECURE_SCRATCH_RSV63_LO
#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_END SECURE_SCRATCH_RSV66_HI
/*******************************************************************************
* Inline functions definition
******************************************************************************/
static inline uint32_t tegra_se_read_32(uint32_t offset)
{
return mmio_read_32((uint32_t)(TEGRA_SE0_BASE + offset));
}
static inline void tegra_se_write_32(uint32_t offset, uint32_t val)
{
mmio_write_32(((uint32_t)(TEGRA_SE0_BASE + offset)), val);
}
#endif /* SE_PRIVATE_H */

21
plat/nvidia/tegra/soc/t186/plat_memctrl.c

@ -1,5 +1,6 @@
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,7 +11,11 @@
#include <mce.h>
#include <memctrl_v2.h>
#include <tegra_mc_def.h>
#include <tegra186_private.h>
#include <tegra_platform.h>
#include <tegra_private.h>
extern uint64_t tegra_bl31_phys_base;
/*******************************************************************************
* Array to hold stream_id override config register offsets
@ -540,6 +545,13 @@ tegra_mc_settings_t *tegra_get_mc_settings(void)
void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes)
{
uint32_t val;
uint64_t src_base_tzdram;
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint64_t src_len_in_bytes = BL31_END - BL31_START;
/* base address of BL3-1 source in TZDRAM */
src_base_tzdram = params_from_bl2->tzdram_base +
tegra186_get_cpu_reset_handler_size();
/*
* Setup the Memory controller to allow only secure accesses to
@ -568,6 +580,15 @@ void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes)
val = tegra_mc_read_32(MC_SECURITY_CFG3_0) & MC_SECURITY_BOM_HI_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_TZDRAM_ADDR_HI, val);
/*
* save tzdram_addr_lo and ATF-size, this would be used in SC7-RF to
* generate SHA256.
*/
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV68_LO,
(uint32_t)src_base_tzdram);
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV0_HI,
(uint32_t)src_len_in_bytes);
/*
* MCE propagates the security configuration values across the
* CCPLEX.

35
plat/nvidia/tegra/soc/t186/plat_psci_handlers.c

@ -6,6 +6,7 @@
*/
#include <assert.h>
#include <stdbool.h>
#include <string.h>
#include <arch.h>
@ -19,9 +20,10 @@
#include <lib/psci/psci.h>
#include <plat/common/platform.h>
#include <bpmp_ipc.h>
#include <mce.h>
#include <security_engine.h>
#include <smmu.h>
#include <stdbool.h>
#include <t18x_ari.h>
#include <tegra186_private.h>
#include <tegra_private.h>
@ -280,8 +282,33 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta
uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
TEGRA186_STATE_ID_MASK;
uint64_t val;
uint64_t src_len_in_bytes = (uint64_t)(((uintptr_t)(&__BL31_END__) -
(uintptr_t)BL31_BASE));
int32_t ret;
if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
val = params_from_bl2->tzdram_base +
tegra186_get_cpu_reset_handler_size();
/* Initialise communication channel with BPMP */
assert(tegra_bpmp_ipc_init() == 0);
/* Enable SE clock */
ret = tegra_bpmp_ipc_enable_clock(TEGRA_CLK_SE);
if (ret != 0) {
ERROR("Failed to enable clock\n");
return ret;
}
/*
* Generate/save SHA256 of ATF during SC7 entry
*/
if (tegra_se_save_sha256_hash(BL31_BASE,
(uint32_t)src_len_in_bytes) != 0) {
ERROR("Hash calculation failed. Reboot\n");
(void)tegra_soc_prepare_system_reset();
}
/*
* The TZRAM loses power when we enter system suspend. To
* allow graceful exit from system suspend, we need to copy
@ -291,6 +318,12 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta
tegra186_get_cpu_reset_handler_size();
memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
(uintptr_t)BL31_END - (uintptr_t)BL31_BASE);
ret = tegra_bpmp_ipc_disable_clock(TEGRA_CLK_SE);
if (ret != 0) {
ERROR("Failed to disable clock\n");
return ret;
}
}
return PSCI_E_SUCCESS;

6
plat/nvidia/tegra/soc/t186/plat_setup.c

@ -106,6 +106,12 @@ static const mmap_region_t tegra_mmap[] = {
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_HSP_DBELL_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_BPMP_IPC_TX_PHYS_BASE, TEGRA_BPMP_IPC_CH_MAP_SIZE, /* 4KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_BPMP_IPC_RX_PHYS_BASE, TEGRA_BPMP_IPC_CH_MAP_SIZE, /* 4KB */
MT_DEVICE | MT_RW | MT_SECURE),
{0}
};

7
plat/nvidia/tegra/soc/t186/platform_t186.mk

@ -30,10 +30,10 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
PLATFORM_MAX_CPUS_PER_CLUSTER := 4
$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
MAX_XLAT_TABLES := 24
MAX_XLAT_TABLES := 25
$(eval $(call add_define,MAX_XLAT_TABLES))
MAX_MMAP_REGIONS := 25
MAX_MMAP_REGIONS := 27
$(eval $(call add_define,MAX_MMAP_REGIONS))
# platform files
@ -42,6 +42,8 @@ PLAT_INCLUDES += -I${SOC_DIR}/drivers/include
BL31_SOURCES += drivers/ti/uart/aarch64/16550_console.S \
lib/cpus/aarch64/denver.S \
lib/cpus/aarch64/cortex_a57.S \
${COMMON_DIR}/drivers/bpmp_ipc/intf.c \
${COMMON_DIR}/drivers/bpmp_ipc/ivc.c \
${COMMON_DIR}/drivers/gpcdma/gpcdma.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \
${COMMON_DIR}/drivers/smmu/smmu.c \
@ -49,6 +51,7 @@ BL31_SOURCES += drivers/ti/uart/aarch64/16550_console.S \
${SOC_DIR}/drivers/mce/ari.c \
${SOC_DIR}/drivers/mce/nvg.c \
${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
$(SOC_DIR)/drivers/se/se.c \
${SOC_DIR}/plat_memctrl.c \
${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \

422
plat/nvidia/tegra/soc/t194/plat_memctrl.c

@ -138,7 +138,11 @@ const static uint32_t tegra194_streamid_override_regs[] = {
MC_STREAMID_OVERRIDE_CFG_MIU2R,
MC_STREAMID_OVERRIDE_CFG_MIU2W,
MC_STREAMID_OVERRIDE_CFG_MIU3R,
MC_STREAMID_OVERRIDE_CFG_MIU3W
MC_STREAMID_OVERRIDE_CFG_MIU3W,
MC_STREAMID_OVERRIDE_CFG_MIU4R,
MC_STREAMID_OVERRIDE_CFG_MIU4W,
MC_STREAMID_OVERRIDE_CFG_MIU5R,
MC_STREAMID_OVERRIDE_CFG_MIU5W
};
/*******************************************************************************
@ -268,416 +272,13 @@ const static mc_streamid_security_cfg_t tegra194_streamid_sec_cfgs[] = {
mc_make_sec_cfg(MIU2R, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU2W, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU3R, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU3W, NON_SECURE, OVERRIDE, DISABLE)
mc_make_sec_cfg(MIU3W, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU4R, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU4W, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU5R, NON_SECURE, OVERRIDE, DISABLE),
mc_make_sec_cfg(MIU5W, NON_SECURE, OVERRIDE, DISABLE)
};
/* To be called by common memctrl_v2.c */
static void tegra194_memctrl_reconfig_mss_clients(void)
{
uint32_t reg_val, wdata_0, wdata_1, wdata_2;
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB;
if (tegra_platform_is_silicon()) {
wdata_0 |= MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB;
}
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
reg_val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
} while ((reg_val & wdata_0) != wdata_0);
wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB|
MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_VIFAL_FLUSH_ENB;
if (tegra_platform_is_silicon()) {
wdata_1 |= MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_RCE_FLUSH_ENB;
}
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
reg_val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
} while ((reg_val & wdata_1) != wdata_1);
wdata_2 = MC_CLIENT_HOTRESET_CTRL2_PCIE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_AONDMA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_BPMPDMA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_SCEDMA_FLUSH_ENB;
if (tegra_platform_is_silicon()) {
wdata_2 |= MC_CLIENT_HOTRESET_CTRL2_RCEDMA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE5A_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE3A_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE3_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE0A_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE0A2_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL2_PCIE4A_FLUSH_ENB;
}
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL2, wdata_2);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
reg_val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS2);
} while ((reg_val & wdata_2) != wdata_2);
/*
* Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
* strongly ordered MSS clients.
*
* MC clients with default SO_DEV override still enabled at TSA:
* EQOSW, SATAW, XUSB_DEVW, XUSB_HOSTW, PCIe0w, PCIe1w, PCIe2w,
* PCIe3w, PCIe4w and PCIe5w.
*/
mc_set_tsa_w_passthrough(AONDMAW);
mc_set_tsa_w_passthrough(AONW);
mc_set_tsa_w_passthrough(APEDMAW);
mc_set_tsa_w_passthrough(APEW);
mc_set_tsa_w_passthrough(AXISW);
mc_set_tsa_w_passthrough(BPMPDMAW);
mc_set_tsa_w_passthrough(BPMPW);
mc_set_tsa_w_passthrough(EQOSW);
mc_set_tsa_w_passthrough(ETRW);
mc_set_tsa_w_passthrough(RCEDMAW);
mc_set_tsa_w_passthrough(RCEW);
mc_set_tsa_w_passthrough(SCEDMAW);
mc_set_tsa_w_passthrough(SCEW);
mc_set_tsa_w_passthrough(SDMMCW);
mc_set_tsa_w_passthrough(SDMMCWA);
mc_set_tsa_w_passthrough(SDMMCWAB);
mc_set_tsa_w_passthrough(SESWR);
mc_set_tsa_w_passthrough(TSECSWR);
mc_set_tsa_w_passthrough(TSECSWRB);
mc_set_tsa_w_passthrough(UFSHCW);
mc_set_tsa_w_passthrough(VICSWR);
mc_set_tsa_w_passthrough(VIFALW);
/*
* set HUB2 as SO_DEV_HUBID
*/
reg_val = tsa_read_32(PCIE0W);
mc_set_tsa_hub2(reg_val, PCIE0W);
reg_val = tsa_read_32(PCIE1W);
mc_set_tsa_hub2(reg_val, PCIE1W);
reg_val = tsa_read_32(PCIE2AW);
mc_set_tsa_hub2(reg_val, PCIE2AW);
reg_val = tsa_read_32(PCIE3W);
mc_set_tsa_hub2(reg_val, PCIE3W);
reg_val = tsa_read_32(PCIE4W);
mc_set_tsa_hub2(reg_val, PCIE4W);
reg_val = tsa_read_32(SATAW);
mc_set_tsa_hub2(reg_val, SATAW);
reg_val = tsa_read_32(XUSB_DEVW);
mc_set_tsa_hub2(reg_val, XUSB_DEVW);
reg_val = tsa_read_32(XUSB_HOSTW);
mc_set_tsa_hub2(reg_val, XUSB_HOSTW);
/*
* Hw Bug: 200385660, 200394107
* PCIE datapath hangs when there are more than 28 outstanding
* requests on data backbone for x1 controller. This is seen
* on third party PCIE IP, C1 - PCIE1W, C2 - PCIE2AW and C3 - PCIE3W.
*
* Setting Reorder depth limit, 16 which is < 28.
*/
mc_set_tsa_depth_limit(REORDER_DEPTH_LIMIT, PCIE1W);
mc_set_tsa_depth_limit(REORDER_DEPTH_LIMIT, PCIE2AW);
mc_set_tsa_depth_limit(REORDER_DEPTH_LIMIT, PCIE3W);
/* Ordered MC Clients on Xavier are EQOS, SATA, XUSB, PCIe1 and PCIe3
* ISO clients(DISP, VI, EQOS) should never snoop caches and
* don't need ROC/PCFIFO ordering.
* ISO clients(EQOS) that need ordering should use PCFIFO ordering
* and bypass ROC ordering by using FORCE_NON_COHERENT path.
* FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
* over SMMU attributes.
* Force all Normal memory transactions from ISO and non-ISO to be
* non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
* Force the SO_DEV transactions from ordered ISO clients(EQOS) to
* non-coherent path and enable MC PCFIFO interlock for ordering.
* Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
* XUSB, SATA) to coherent so that the transactions are
* ordered by ROC.
* PCFIFO ensure write ordering.
* Read after Write ordering is maintained/enforced by MC clients.
* Clients that need PCIe type write ordering must
* go through ROC ordering.
* Ordering enable for Read clients is not necessary.
* R5's and A9 would get necessary ordering from AXI and
* don't need ROC ordering enable:
* - MMIO ordering is through dev mapping and MMIO
* accesses bypass SMMU.
* - Normal memory is accessed through SMMU and ordering is
* ensured by client and AXI.
* - Ack point for Normal memory is WCAM in MC.
* - MMIO's can be early acked and AXI ensures dev memory ordering,
* Client ensures read/write direction change ordering.
* - See Bug 200312466 for more details.
*/
mc_set_txn_override(AONDMAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(AONDMAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(AONR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(AONW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(APEDMAR, CGID_TAG_ADR, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(APEDMAW, CGID_TAG_ADR, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(APER, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(APEW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AXISR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(AXISW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(BPMPDMAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(BPMPDMAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(BPMPR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(BPMPW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(EQOSR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(EQOSW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ETRR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ETRW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(HOST1XDMAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(MPCORER, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MPCOREW, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(NVDISPLAYR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDISPLAYR1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(PCIE0R, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE0R1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE0W, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE1R, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE1W, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
if (tegra_platform_is_silicon()) {
mc_set_txn_override(PCIE2AR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE2AW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE3R, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE3W, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE4R, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE4W, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE5R, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE5W, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(PCIE5R1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
}
mc_set_txn_override(PTCR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(RCEDMAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(RCEDMAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(RCER, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(RCEW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SATAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(SATAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(SCEDMAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SCEDMAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SCER, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SCEW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCRAB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCWA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SDMMCWAB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
/*
* TO DO: make SESRD/WR FORCE_COHERENT once SE+TZ with SMMU is enabled.
*/
mc_set_txn_override(SESRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(SESWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(TSECSRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(TSECSRDB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(TSECSWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(TSECSWRB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(UFSHCR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(UFSHCW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(VICSRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(VICSRD1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(VICSWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(VIFALR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(VIFALW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(XUSB_DEVR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(XUSB_DEVW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(XUSB_HOSTR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(XUSB_HOSTW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT_SNOOP, FORCE_COHERENT_SNOOP);
mc_set_txn_override(AXIAPR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(AXIAPW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA0FALRDB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA0FALWRB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA0RDA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA0RDA1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA0WRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA1FALRDB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA1FALWRB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA1RDA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA1RDA1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(DLA1WRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(HDAR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(HDAW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ISPFALR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ISPFALW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ISPRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ISPRA1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ISPWA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ISPWB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDEC1SRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDEC1SRD1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDEC1SWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDECSRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDECSRD1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVDECSWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENC1SRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENC1SRD1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENC1SWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENCSRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENCSRD1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVENCSWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVJPGSRD, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(NVJPGSWR, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0RDA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0RDA1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0RDB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0RDB1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0RDC, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0WRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0WRB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA0WRC, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1RDA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1RDA1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1RDB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1RDB1, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1RDC, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1WRA, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1WRB, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(PVA1WRC, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_COHERENT, FORCE_COHERENT);
mc_set_txn_override(VIW, CGID_TAG_ADR, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
if (tegra_platform_is_silicon()) {
mc_set_txn_override(MIU0R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU0W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU1R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU1W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU2R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU2W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU3R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU3W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU4R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU4W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU5R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU5W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU6R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU6W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU7R, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(MIU7W, CGID_TAG_ADR, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
}
/*
* At this point, ordering can occur at SCF. So, remove PCFIFO's
* control over ordering requests.
*
* Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
* boot and strongly ordered MSS clients
*/
reg_val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
mc_set_pcfifo_unordered_boot_so_mss(2, TSECSWR);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, reg_val);
reg_val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWA) &
mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCW) &
mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB) &
mc_set_pcfifo_unordered_boot_so_mss(3, VICSWR) &
mc_set_pcfifo_unordered_boot_so_mss(3, APEW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, reg_val);
reg_val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
mc_set_pcfifo_unordered_boot_so_mss(4, TSECSWRB) &
mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
mc_set_pcfifo_unordered_boot_so_mss(4, BPMPW) &
mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(4, AONW) &
mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(4, SCEW) &
mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
/* EQOSW has PCFIFO order enabled. */
reg_val |= mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, reg_val);
reg_val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(5, VIFALW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, reg_val);
reg_val = MC_PCFIFO_CLIENT_CONFIG6_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(6, RCEW) &
mc_set_pcfifo_unordered_boot_so_mss(6, RCEDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(6, PCIE0W);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG6, reg_val);
reg_val = MC_PCFIFO_CLIENT_CONFIG7_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(7, PCIE4W) &
mc_set_pcfifo_unordered_boot_so_mss(7, PCIE5W);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG7, reg_val);
/* Set Order Id only for the clients having non zero order id */
reg_val = mc_client_order_id(MC_CLIENT_ORDER_ID_9_RESET_VAL, 9, XUSB_HOSTW);
tegra_mc_write_32(MC_CLIENT_ORDER_ID_9, reg_val);
reg_val = mc_client_order_id(MC_CLIENT_ORDER_ID_27_RESET_VAL, 27, PCIE0W);
tegra_mc_write_32(MC_CLIENT_ORDER_ID_27, reg_val);
reg_val = mc_client_order_id(MC_CLIENT_ORDER_ID_28_RESET_VAL, 28, PCIE4W);
reg_val = mc_client_order_id(reg_val, 28, PCIE5W);
tegra_mc_write_32(MC_CLIENT_ORDER_ID_28, reg_val);
/*
* Set VC Id only for the clients having different reset values like
* SDMMCRAB, SDMMCWAB, SESRD, SESWR, TSECSRD,TSECSRDB, TSECSWR and
* TSECSWRB clients
*/
reg_val = mc_hub_vc_id(MC_HUB_PC_VC_ID_0_RESET_VAL, 0, APB);
tegra_mc_write_32(MC_HUB_PC_VC_ID_0, reg_val);
/* SDMMCRAB and SDMMCWAB clients */
reg_val = mc_hub_vc_id(MC_HUB_PC_VC_ID_2_RESET_VAL, 2, SD);
tegra_mc_write_32(MC_HUB_PC_VC_ID_2, reg_val);
reg_val = mc_hub_vc_id(MC_HUB_PC_VC_ID_4_RESET_VAL, 4, NIC);
tegra_mc_write_32(MC_HUB_PC_VC_ID_4, reg_val);
reg_val = mc_hub_vc_id(MC_HUB_PC_VC_ID_12_RESET_VAL, 12, UFSHCPC2);
tegra_mc_write_32(MC_HUB_PC_VC_ID_12, reg_val);
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
wdata_2 = MC_CLIENT_HOTRESET_CTRL2_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL2, wdata_2);
reg_val = MC_COALESCE_CTRL_COALESCER_ENABLE;
tegra_mc_write_32(MC_COALESCE_CTRL, reg_val);
/*
* WAR to hardware bug 1953865: Coalescer must be disabled
* for PVA0RDC and PVA1RDC interfaces.
*/
reg_val = tegra_mc_read_32(MC_COALESCE_CONFIG_6_0);
reg_val &= ~(MC_COALESCE_CONFIG_6_0_PVA0RDC_COALESCER_ENABLED |
MC_COALESCE_CONFIG_6_0_PVA1RDC_COALESCER_ENABLED);
tegra_mc_write_32(MC_COALESCE_CONFIG_6_0, reg_val);
}
/*******************************************************************************
* Struct to hold the memory controller settings
******************************************************************************/
@ -685,8 +286,7 @@ static tegra_mc_settings_t tegra194_mc_settings = {
.streamid_override_cfg = tegra194_streamid_override_regs,
.num_streamid_override_cfgs = (uint32_t)ARRAY_SIZE(tegra194_streamid_override_regs),
.streamid_security_cfg = tegra194_streamid_sec_cfgs,
.num_streamid_security_cfgs = (uint32_t)ARRAY_SIZE(tegra194_streamid_sec_cfgs),
.reconfig_mss_clients = tegra194_memctrl_reconfig_mss_clients
.num_streamid_security_cfgs = (uint32_t)ARRAY_SIZE(tegra194_streamid_sec_cfgs)
};
/*******************************************************************************

6
plat/nvidia/tegra/soc/t210/drivers/se/se_private.h

@ -1,6 +1,6 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -15,6 +15,9 @@
* PMC registers
*/
/* SC7 context save scratch register for T210 */
#define PMC_SCRATCH43_REG_OFFSET U(0x22C)
/* Secure scratch registers */
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
@ -435,6 +438,7 @@
((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT))
/* SE Interrupt */
#define SE_INT_ENABLE_REG_OFFSET U(0xC)
#define SE_INT_STATUS_REG_OFFSET 0x10U
#define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_CLEAR \

124
plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c

@ -1,6 +1,6 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -20,8 +20,8 @@
* Constants and Macros
******************************************************************************/
#define TIMEOUT_100MS 100U // Timeout in 100ms
#define RNG_AES_KEY_INDEX 1
#define TIMEOUT_100MS 100U /* Timeout in 100ms */
#define RNG_AES_KEY_INDEX 1
/*******************************************************************************
* Data structure and global variables
@ -68,14 +68,12 @@
* #--------------------------------#
*/
/* Known pattern data */
static const uint32_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE_WORDS] = {
/* Known pattern data for T210 */
static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = {
/* 128 bit AES block */
0x0C0D0E0F,
0x08090A0B,
0x04050607,
0x00010203,
};
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
};
/* SE input and output linked list buffers */
static tegra_se_io_lst_t se1_src_ll_buf;
@ -85,6 +83,9 @@ static tegra_se_io_lst_t se1_dst_ll_buf;
static tegra_se_io_lst_t se2_src_ll_buf;
static tegra_se_io_lst_t se2_dst_ll_buf;
/* SE1 context buffer, 132 blocks */
static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE];
/* SE1 security engine device handle */
static tegra_se_dev_t se_dev_1 = {
.se_num = 1,
@ -97,10 +98,10 @@ static tegra_se_dev_t se_dev_1 = {
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se1_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE),
.ctx_save_buf = (uint32_t *)&se1_ctx_buf
};
/* SE2 security engine device handle */
/* SE2 security engine device handle (T210B01 only) */
static tegra_se_dev_t se_dev_2 = {
.se_num = 2,
/* Setup base address for se */
@ -112,7 +113,7 @@ static tegra_se_dev_t se_dev_2 = {
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se2_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000),
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000)
};
static bool ecid_valid;
@ -201,18 +202,6 @@ static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
return ret;
}
/*
* Returns true if the SE engine is configured to perform SE context save in
* hardware.
*/
static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev)
{
uint32_t val;
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
return (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN);
}
/*
* Wait for SE engine to be idle and clear pending interrupts before
* starting the next SE operation.
@ -223,6 +212,9 @@ static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
uint32_t val = 0;
uint32_t timeout;
/* disable SE interrupt to prevent interrupt issued by SE operation */
tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U);
/* Wait for previous operation to finish */
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
@ -629,19 +621,19 @@ static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
/* First the modulus and then the exponent must be
/* For T210, First the modulus and then exponent must be
* encrypted and saved. This is repeated for SLOT 0
* and SLOT 1. Hence the order:
* SLOT 0 exponent : RSA_KEY_INDEX : 0
* SLOT 0 modulus : RSA_KEY_INDEX : 1
* SLOT 1 exponent : RSA_KEY_INDEX : 2
* SLOT 0 exponent : RSA_KEY_INDEX : 0
* SLOT 1 modulus : RSA_KEY_INDEX : 3
* SLOT 1 exponent : RSA_KEY_INDEX : 2
*/
const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
/* RSA key slot 0 */
{SE_RSA_KEY_INDEX_SLOT0_EXP, SE_RSA_KEY_INDEX_SLOT0_MOD},
{SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP},
/* RSA key slot 1 */
{SE_RSA_KEY_INDEX_SLOT1_EXP, SE_RSA_KEY_INDEX_SLOT1_MOD},
{SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP},
};
se_dev->dst_ll_buf->last_buff_num = 0;
@ -876,8 +868,8 @@ static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
/* Write lp context buffer address into PMC scratch register */
if (se_dev->se_num == 1) {
/* SE context address */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH117_OFFSET,
/* SE context address, support T210 only */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET,
((uint64_t)(se_dev->ctx_save_buf)));
} else if (se_dev->se_num == 2) {
/* SE2 & PKA1 context address */
@ -909,7 +901,10 @@ void tegra_se_init(void)
/* Generate random SRK to initialize DRBG */
tegra_se_generate_srk(&se_dev_1);
tegra_se_generate_srk(&se_dev_2);
if (tegra_chipid_is_t210_b01()) {
tegra_se_generate_srk(&se_dev_2);
}
/* determine if ECID is valid */
val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
@ -932,6 +927,18 @@ static void tegra_se_enable_clocks(void)
val &= ~ENTROPY_RESET_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
if (!tegra_chipid_is_t210_b01()) {
/*
* T210 SE clock source is turned off in kernel, to simplify
* SE clock source setting, we switch SE clock source to
* CLK_M, SE_CLK_DIVISOR = 0. T210 B01 SE clock source is
* always on, so don't need this setting.
*/
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE,
SE_CLK_SRC_CLK_M);
}
/* Enable SE clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
val |= SE_CLK_ENB_BIT;
@ -975,43 +982,25 @@ int32_t tegra_se_suspend(void)
tegra_se_enable_clocks();
if (tegra_se_atomic_save_enabled(&se_dev_2) &&
tegra_se_atomic_save_enabled(&se_dev_1)) {
/* Atomic context save se2 and pka1 */
if (tegra_chipid_is_t210_b01()) {
/* It is T210 B01, Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_atomic(&se_dev_2);
}
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
ret = tegra_se_context_save_atomic(&se_dev_2);
if (ret != 0) {
ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
} else if (!tegra_se_atomic_save_enabled(&se_dev_2) &&
!tegra_se_atomic_save_enabled(&se_dev_1)) {
/* SW context save se2 and pka1 */
INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_sw(&se_dev_2);
}
/* SW context save se */
if (ret == 0) {
INFO("%s: SE1 legacy(SW) context save\n", __func__);
ret = tegra_se_context_save_sw(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE SW context save done\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
if (ret != 0) {
ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
}
} else {
ERROR("%s: One SE set for atomic CTX save, the other is not\n",
__func__);
/* It is T210, SW context save se */
INFO("%s: SE1 legacy(SW) context save\n", __func__);
ret = tegra_se_context_save_sw(&se_dev_1);
if (ret != 0) {
ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
}
}
tegra_se_disable_clocks();
@ -1080,5 +1069,8 @@ static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
void tegra_se_resume(void)
{
tegra_se_warm_boot_resume(&se_dev_1);
tegra_se_warm_boot_resume(&se_dev_2);
if (tegra_chipid_is_t210_b01()) {
tegra_se_warm_boot_resume(&se_dev_2);
}
}

9
plat/nvidia/tegra/soc/t210/plat_psci_handlers.c

@ -210,12 +210,9 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
if (tegra_chipid_is_t210_b01()) {
/* Suspend se/se2 and pka1 */
if (tegra_se_suspend() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
/* Suspend se/se2 and pka1 for T210 B01 and se for T210 */
if (tegra_se_suspend() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {

4
plat/nvidia/tegra/soc/t210/plat_setup.c

@ -174,9 +174,7 @@ void plat_early_platform_setup(void)
}
/* Initialize security engine driver */
if (tegra_chipid_is_t210_b01()) {
tegra_se_init();
}
tegra_se_init();
}
/* Secure IRQs for Tegra186 */

39
plat/nvidia/tegra/soc/t210/plat_sip_calls.c

@ -1,5 +1,6 @@
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -50,24 +51,32 @@ int plat_sip_handler(uint32_t smc_fid,
if (!ns)
SMC_RET1(handle, SMC_UNK);
switch (smc_fid) {
case TEGRA_SIP_PMC_COMMANDS:
if (smc_fid == TEGRA_SIP_PMC_COMMANDS) {
/* check the address is within PMC range and is 4byte aligned */
if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3))
return -EINVAL;
/* pmc_secure_scratch registers are not accessible */
if (((x2 >= PMC_SECURE_SCRATCH0) && (x2 <= PMC_SECURE_SCRATCH5)) ||
((x2 >= PMC_SECURE_SCRATCH6) && (x2 <= PMC_SECURE_SCRATCH7)) ||
((x2 >= PMC_SECURE_SCRATCH8) && (x2 <= PMC_SECURE_SCRATCH79)) ||
((x2 >= PMC_SECURE_SCRATCH80) && (x2 <= PMC_SECURE_SCRATCH119)))
return -EFAULT;
switch (x2) {
/* Black listed PMC registers */
case PMC_SCRATCH1:
case PMC_SCRATCH31 ... PMC_SCRATCH33:
case PMC_SCRATCH40:
case PMC_SCRATCH42:
case PMC_SCRATCH43 ... PMC_SCRATCH48:
case PMC_SCRATCH50 ... PMC_SCRATCH51:
case PMC_SCRATCH56 ... PMC_SCRATCH57:
/* PMC secure-only registers are not accessible */
if ((x2 == PMC_DPD_ENABLE_0) || (x2 == PMC_FUSE_CONTROL_0) ||
(x2 == PMC_CRYPTO_OP_0))
case PMC_DPD_ENABLE_0:
case PMC_FUSE_CONTROL_0:
case PMC_CRYPTO_OP_0:
case PMC_TSC_MULT_0:
case PMC_STICKY_BIT:
ERROR("%s: error offset=0x%llx\n", __func__, x2);
return -EFAULT;
default:
/* Valid register */
break;
}
/* Perform PMC read/write */
if (x1 == PMC_READ) {
@ -78,13 +87,9 @@ int plat_sip_handler(uint32_t smc_fid,
} else {
return -EINVAL;
}
break;
default:
} else {
ERROR("%s: unsupported function ID\n", __func__);
return -ENOTSUP;
}
return 0;
}

1
plat/nvidia/tegra/soc/t210/platform_t210.mk

@ -46,7 +46,6 @@ A57_DISABLE_NON_TEMPORAL_HINT := 1
ERRATA_A57_826974 := 1
ERRATA_A57_826977 := 1
ERRATA_A57_828024 := 1
ERRATA_A57_829520 := 1
ERRATA_A57_833471 := 1
# Enable workarounds for selected Cortex-A53 erratas.

Loading…
Cancel
Save