Commit 4f6ad66a authored by Achin Gupta's avatar Achin Gupta Committed by James Morrissey
Browse files

ARMv8 Trusted Firmware release v0.2

parents
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Portions copyright (c) 2009-2013, ARM Ltd. All rights reserved.
* ---------------------------------------------------------------
* File: lib/strtoull.c
*/
#include "types.h"
#include "ctype.h"
#include "limits.h"
/*
* Convert a string to an unsigned long long integer.
*
* Assumes that the upper and lower case
* alphabets and digits are each contiguous.
*/
static unsigned long long
bsd_strtoull(const char *nptr, char **endptr, int base)
{
const char *s;
unsigned long long acc;
char c;
unsigned long long cutoff;
int neg, any, cutlim;
/*
* See strtoq for comments as to the logic used.
*/
s = nptr;
do {
c = *s++;
} while (isspace((unsigned char)c));
if (c == '-') {
neg = 1;
c = *s++;
} else {
neg = 0;
if (c == '+')
c = *s++;
}
if ((base == 0 || base == 16) &&
c == '0' && (*s == 'x' || *s == 'X') &&
((s[1] >= '0' && s[1] <= '9') ||
(s[1] >= 'A' && s[1] <= 'F') ||
(s[1] >= 'a' && s[1] <= 'f'))) {
c = s[1];
s += 2;
base = 16;
}
if (base == 0)
base = c == '0' ? 8 : 10;
acc = any = 0;
cutoff = ULLONG_MAX / base;
cutlim = ULLONG_MAX % base;
for ( ; ; c = *s++) {
if (c >= '0' && c <= '9')
c -= '0';
else if (c >= 'A' && c <= 'Z')
c -= 'A' - 10;
else if (c >= 'a' && c <= 'z')
c -= 'a' - 10;
else
break;
if (c >= base)
break;
if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
any = -1;
else {
any = 1;
acc *= base;
acc += c;
}
}
if (any < 0) {
acc = ULLONG_MAX;
} else if (neg)
acc = -acc;
if (endptr != NULL)
*endptr = (char *)(any ? s - 1 : nptr);
return (acc);
}
int strict_strtoull(const char *str, unsigned int base, long long *result)
{
*result = bsd_strtoull(str, NULL, base);
return 0;
}
/*-
* Copyright (c) 1986, 1988, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
*/
/*
* Portions copyright (c) 2009-2013, ARM Ltd. All rights reserved.
* ---------------------------------------------------------------
* File: lib/subr_prf.c
*/
/*
#include "types.h"
#include "varargs.h"
#include "ctype.h"
#include "string.h"
*/
#include <stddef.h>
#include <sys/types.h> /* For ssize_t */
#include <stdint.h>
#include <string.h>
#include "ctype.h"
typedef uint64_t uintmax_t;
typedef int64_t intmax_t;
typedef unsigned char u_char;
typedef unsigned int u_int;
typedef int64_t quad_t;
typedef uint64_t u_quad_t;
typedef unsigned long u_long;
typedef unsigned short u_short;
static inline int imax(int a, int b) { return (a > b ? a : b); }
/*
* Note that stdarg.h and the ANSI style va_start macro is used for both
* ANSI and traditional C compilers.
*/
#define TOCONS 0x01
#define TOTTY 0x02
#define TOLOG 0x04
/* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */
#define MAXNBUF (sizeof(intmax_t) * 8 + 1)
struct putchar_arg {
int flags;
int pri;
struct tty *tty;
char *p_bufr;
size_t n_bufr;
char *p_next;
size_t remain;
};
struct snprintf_arg {
char *str;
size_t remain;
};
extern int log_open;
static char *ksprintn(char *nbuf, uintmax_t num, int base, int *len, int upper);
static void snprintf_func(int ch, void *arg);
static int kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap);
int vsnprintf(char *str, size_t size, const char *format, va_list ap);
static char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz";
#define hex2ascii(hex) (hex2ascii_data[hex])
/*
* Scaled down version of sprintf(3).
*/
int
sprintf(char *buf, const char *cfmt, ...)
{
int retval;
va_list ap;
va_start(ap, cfmt);
retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
buf[retval] = '\0';
va_end(ap);
return (retval);
}
/*
* Scaled down version of vsprintf(3).
*/
int
vsprintf(char *buf, const char *cfmt, va_list ap)
{
int retval;
retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
buf[retval] = '\0';
return (retval);
}
/*
* Scaled down version of snprintf(3).
*/
int
snprintf(char *str, size_t size, const char *format, ...)
{
int retval;
va_list ap;
va_start(ap, format);
retval = vsnprintf(str, size, format, ap);
va_end(ap);
return(retval);
}
/*
* Scaled down version of vsnprintf(3).
*/
int
vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
struct snprintf_arg info;
int retval;
info.str = str;
info.remain = size;
retval = kvprintf(format, snprintf_func, &info, 10, ap);
if (info.remain >= 1)
*info.str++ = '\0';
return (retval);
}
static void
snprintf_func(int ch, void *arg)
{
struct snprintf_arg *const info = arg;
if (info->remain >= 2) {
*info->str++ = ch;
info->remain--;
}
}
/*
* Kernel version which takes radix argument vsnprintf(3).
*/
int
vsnrprintf(char *str, size_t size, int radix, const char *format, va_list ap)
{
struct snprintf_arg info;
int retval;
info.str = str;
info.remain = size;
retval = kvprintf(format, snprintf_func, &info, radix, ap);
if (info.remain >= 1)
*info.str++ = '\0';
return (retval);
}
/*
* Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
* order; return an optional length and a pointer to the last character
* written in the buffer (i.e., the first character of the string).
* The buffer pointed to by `nbuf' must have length >= MAXNBUF.
*/
static char *
ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper)
{
char *p, c;
p = nbuf;
*p = '\0';
do {
c = hex2ascii(num % base);
*++p = upper ? toupper(c) : c;
} while (num /= base);
if (lenp)
*lenp = p - nbuf;
return (p);
}
/*
* Scaled down version of printf(3).
*
* Two additional formats:
*
* The format %b is supported to decode error registers.
* Its usage is:
*
* printf("reg=%b\n", regval, "<base><arg>*");
*
* where <base> is the output base expressed as a control character, e.g.
* \10 gives octal; \20 gives hex. Each arg is a sequence of characters,
* the first of which gives the bit number to be inspected (origin 1), and
* the next characters (up to a control character, i.e. a character <= 32),
* give the name of the register. Thus:
*
* kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n");
*
* would produce output:
*
* reg=3<BITTWO,BITONE>
*
* XXX: %D -- Hexdump, takes pointer and separator string:
* ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX
* ("%*D", len, ptr, " " -> XX XX XX XX ...
*/
int
kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap)
{
#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; }
char nbuf[MAXNBUF];
char *d;
const char *p, *percent, *q;
u_char *up;
int ch, n;
uintmax_t num;
int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
int cflag, hflag, jflag, tflag, zflag;
int dwidth, upper;
char padc;
int stop = 0, retval = 0;
num = 0;
if (!func)
d = (char *) arg;
else
d = NULL;
if (fmt == NULL)
fmt = "(fmt null)\n";
if (radix < 2 || radix > 36)
radix = 10;
for (;;) {
padc = ' ';
width = 0;
while ((ch = (u_char)*fmt++) != '%' || stop) {
if (ch == '\0')
return (retval);
PCHAR(ch);
}
percent = fmt - 1;
qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
sign = 0; dot = 0; dwidth = 0; upper = 0;
cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0;
reswitch: switch (ch = (u_char)*fmt++) {
case '.':
dot = 1;
goto reswitch;
case '#':
sharpflag = 1;
goto reswitch;
case '+':
sign = 1;
goto reswitch;
case '-':
ladjust = 1;
goto reswitch;
case '%':
PCHAR(ch);
break;
case '*':
if (!dot) {
width = va_arg(ap, int);
if (width < 0) {
ladjust = !ladjust;
width = -width;
}
} else {
dwidth = va_arg(ap, int);
}
goto reswitch;
case '0':
if (!dot) {
padc = '0';
goto reswitch;
}
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
for (n = 0;; ++fmt) {
n = n * 10 + ch - '0';
ch = *fmt;
if (ch < '0' || ch > '9')
break;
}
if (dot)
dwidth = n;
else
width = n;
goto reswitch;
case 'b':
num = (u_int)va_arg(ap, int);
p = va_arg(ap, char *);
for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;)
PCHAR(*q--);
if (num == 0)
break;
for (tmp = 0; *p;) {
n = *p++;
if (num & (1 << (n - 1))) {
PCHAR(tmp ? ',' : '<');
for (; (n = *p) > ' '; ++p)
PCHAR(n);
tmp = 1;
} else
for (; *p > ' '; ++p)
continue;
}
if (tmp)
PCHAR('>');
break;
case 'c':
PCHAR(va_arg(ap, int));
break;
case 'D':
up = va_arg(ap, u_char *);
p = va_arg(ap, char *);
if (!width)
width = 16;
while(width--) {
PCHAR(hex2ascii(*up >> 4));
PCHAR(hex2ascii(*up & 0x0f));
up++;
if (width)
for (q=p;*q;q++)
PCHAR(*q);
}
break;
case 'd':
case 'i':
base = 10;
sign = 1;
goto handle_sign;
case 'h':
if (hflag) {
hflag = 0;
cflag = 1;
} else
hflag = 1;
goto reswitch;
case 'j':
jflag = 1;
goto reswitch;
case 'l':
if (lflag) {
lflag = 0;
qflag = 1;
} else
lflag = 1;
goto reswitch;
case 'n':
if (jflag)
*(va_arg(ap, intmax_t *)) = retval;
else if (qflag)
*(va_arg(ap, quad_t *)) = retval;
else if (lflag)
*(va_arg(ap, long *)) = retval;
else if (zflag)
*(va_arg(ap, size_t *)) = retval;
else if (hflag)
*(va_arg(ap, short *)) = retval;
else if (cflag)
*(va_arg(ap, char *)) = retval;
else
*(va_arg(ap, int *)) = retval;
break;
case 'o':
base = 8;
goto handle_nosign;
case 'p':
base = 16;
sharpflag = (width == 0);
sign = 0;
num = (uintptr_t)va_arg(ap, void *);
goto number;
case 'q':
qflag = 1;
goto reswitch;
case 'r':
base = radix;
if (sign)
goto handle_sign;
goto handle_nosign;
case 's':
p = va_arg(ap, char *);
if (p == NULL)
p = "(null)";
if (!dot)
n = strlen (p);
else
for (n = 0; n < dwidth && p[n]; n++)
continue;
width -= n;
if (!ladjust && width > 0)
while (width--)
PCHAR(padc);
while (n--)
PCHAR(*p++);
if (ladjust && width > 0)
while (width--)
PCHAR(padc);
break;
case 't':
tflag = 1;
goto reswitch;
case 'u':
base = 10;
goto handle_nosign;
case 'X':
upper = 1;
case 'x':
base = 16;
goto handle_nosign;
case 'y':
base = 16;
sign = 1;
goto handle_sign;
case 'z':
zflag = 1;
goto reswitch;
handle_nosign:
sign = 0;
if (jflag)
num = va_arg(ap, uintmax_t);
else if (qflag)
num = va_arg(ap, u_quad_t);
else if (tflag)
num = va_arg(ap, ptrdiff_t);
else if (lflag)
num = va_arg(ap, u_long);
else if (zflag)
num = va_arg(ap, size_t);
else if (hflag)
num = (u_short)va_arg(ap, int);
else if (cflag)
num = (u_char)va_arg(ap, int);
else
num = va_arg(ap, u_int);
goto number;
handle_sign:
if (jflag)
num = va_arg(ap, intmax_t);
else if (qflag)
num = va_arg(ap, quad_t);
else if (tflag)
num = va_arg(ap, ptrdiff_t);
else if (lflag)
num = va_arg(ap, long);
else if (zflag)
num = va_arg(ap, ssize_t);
else if (hflag)
num = (short)va_arg(ap, int);
else if (cflag)
num = (char)va_arg(ap, int);
else
num = va_arg(ap, int);
number:
if (sign && (intmax_t)num < 0) {
neg = 1;
num = -(intmax_t)num;
}
p = ksprintn(nbuf, num, base, &n, upper);
tmp = 0;
if (sharpflag && num != 0) {
if (base == 8)
tmp++;
else if (base == 16)
tmp += 2;
}
if (neg)
tmp++;
if (!ladjust && padc == '0')
dwidth = width - tmp;
width -= tmp + imax(dwidth, n);
dwidth -= n;
if (!ladjust)
while (width-- > 0)
PCHAR(' ');
if (neg)
PCHAR('-');
if (sharpflag && num != 0) {
if (base == 8) {
PCHAR('0');
} else if (base == 16) {
PCHAR('0');
PCHAR('x');
}
}
while (dwidth-- > 0)
PCHAR('0');
while (*p)
PCHAR(*p--);
if (ladjust)
while (width-- > 0)
PCHAR(' ');
break;
default:
while (percent < fmt)
PCHAR(*percent++);
/*
* Since we ignore an formatting argument it is no
* longer safe to obey the remaining formatting
* arguments as the arguments will no longer match
* the format specs.
*/
stop = 1;
break;
}
}
#undef PCHAR
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
.globl semihosting_call
.section .text, "ax"
semihosting_call:; .type semihosting_call, %function
hlt #0xf000
ret
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <semihosting.h>
#ifndef SEMIHOSTING_SUPPORTED
#define SEMIHOSTING_SUPPORTED 1
#endif
extern int semihosting_call(unsigned int operation,
void *system_block_address);
typedef struct {
const char *file_name;
unsigned int mode;
unsigned int name_length;
} smh_file_open_block;
typedef struct {
int handle;
void *buffer;
unsigned int length;
} smh_file_read_write_block;
typedef struct {
int handle;
unsigned int location;
} smh_file_seek_block;
typedef struct {
char *command_line;
unsigned int command_length;
} smh_system_block;
int semihosting_connection_supported(void)
{
return SEMIHOSTING_SUPPORTED;
}
int semihosting_file_open(const char *file_name, unsigned int mode)
{
smh_file_open_block open_block;
open_block.file_name = file_name;
open_block.mode = mode;
open_block.name_length = strlen(file_name);
return semihosting_call(SEMIHOSTING_SYS_OPEN,
(void *) &open_block);
}
int semihosting_file_seek(int file_handle, unsigned int offset)
{
smh_file_seek_block seek_block;
int result;
seek_block.handle = file_handle;
seek_block.location = offset;
result = semihosting_call(SEMIHOSTING_SYS_SEEK,
(void *) &seek_block);
if (result)
result = semihosting_call(SEMIHOSTING_SYS_ERRNO, 0);
return result;
}
int semihosting_file_read(int file_handle, int *length, void *buffer)
{
smh_file_read_write_block read_block;
int result = -EINVAL;
if ((length == NULL) || (buffer == NULL))
return result;
read_block.handle = file_handle;
read_block.buffer = buffer;
read_block.length = *length;
result = semihosting_call(SEMIHOSTING_SYS_READ,
(void *) &read_block);
if (result == *length) {
return -EINVAL;
} else if (result < *length) {
*length -= result;
return 0;
} else
return result;
}
int semihosting_file_write(int file_handle, int *length, void *buffer)
{
smh_file_read_write_block write_block;
if ((length == NULL) || (buffer == NULL))
return -EINVAL;
write_block.handle = file_handle;
write_block.buffer = buffer;
write_block.length = *length;
*length = semihosting_call(SEMIHOSTING_SYS_WRITE,
(void *) &write_block);
return *length;
}
int semihosting_file_close(int file_handle)
{
return semihosting_call(SEMIHOSTING_SYS_CLOSE,
(void *) &file_handle);
}
int semihosting_file_length(int file_handle)
{
return semihosting_call(SEMIHOSTING_SYS_FLEN,
(void *) &file_handle);
}
char semihosting_read_char(void)
{
return semihosting_call(SEMIHOSTING_SYS_READC, NULL);
}
void semihosting_write_char(char character)
{
semihosting_call(SEMIHOSTING_SYS_WRITEC, (void *) &character);
}
void semihosting_write_string(char *string)
{
semihosting_call(SEMIHOSTING_SYS_WRITE0, (void *) string);
}
int semihosting_system(char *command_line)
{
smh_system_block system_block;
system_block.command_line = command_line;
system_block.command_length = strlen(command_line);
return semihosting_call(SEMIHOSTING_SYS_SYSTEM,
(void *) &system_block);
}
int semihosting_get_flen(const char *file_name)
{
int file_handle, length;
assert(semihosting_connection_supported());
file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
if (file_handle == -1)
return file_handle;
/* Find the length of the file */
length = semihosting_file_length(file_handle);
return semihosting_file_close(file_handle) ? -1 : length;
}
int semihosting_download_file(const char *file_name,
int buf_size,
void *buf)
{
int ret = -EINVAL, file_handle, length;
/* Null pointer check */
if (!buf)
return ret;
assert(semihosting_connection_supported());
file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
if (file_handle == -1)
return ret;
/* Find the actual length of the file */
length = semihosting_file_length(file_handle);
if (length == -1)
goto semihosting_fail;
/* Signal error if we do not have enough space for the file */
if (length > buf_size)
goto semihosting_fail;
/*
* A successful read will return 0 in which case we pass back
* the actual number of bytes read. Else we pass a negative
* value indicating an error.
*/
ret = semihosting_file_read(file_handle, &length, buf);
if (ret)
goto semihosting_fail;
else
ret = length;
semihosting_fail:
semihosting_file_close(file_handle);
return ret;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <string.h>
#include <bakery_lock.h>
#define assert_bakery_entry_valid(entry, bakery) do { \
assert(bakery); \
assert(entry < BAKERY_LOCK_MAX_CPUS); \
} while(0)
void bakery_lock_init(bakery_lock * bakery)
{
assert(bakery);
memset(bakery, 0, sizeof(*bakery));
bakery->owner = NO_OWNER;
}
void bakery_lock_get(unsigned long mpidr, bakery_lock * bakery)
{
unsigned int i, max = 0, my_full_number, his_full_number, entry;
entry = platform_get_core_pos(mpidr);
assert_bakery_entry_valid(entry, bakery);
// Catch recursive attempts to take the lock under the same entry:
assert(bakery->owner != entry);
// Get a ticket
bakery->entering[entry] = 1;
for (i = 0; i < BAKERY_LOCK_MAX_CPUS; ++i) {
if (bakery->number[i] > max) {
max = bakery->number[i];
}
}
++max;
bakery->number[entry] = max;
bakery->entering[entry] = 0;
// Wait for our turn
my_full_number = (max << 8) + entry;
for (i = 0; i < BAKERY_LOCK_MAX_CPUS; ++i) {
while (bakery->entering[i]) ; /* Wait */
do {
his_full_number = bakery->number[i];
if (his_full_number) {
his_full_number = (his_full_number << 8) + i;
}
}
while (his_full_number && (his_full_number < my_full_number));
}
bakery->owner = entry;
}
void bakery_lock_release(unsigned long mpidr, bakery_lock * bakery)
{
unsigned int entry = platform_get_core_pos(mpidr);
assert_bakery_entry_valid(entry, bakery);
assert(bakery_lock_held(entry, bakery));
bakery->owner = NO_OWNER;
bakery->number[entry] = 0;
}
int bakery_lock_held(unsigned long mpidr, const bakery_lock * bakery)
{
unsigned int entry = platform_get_core_pos(mpidr);
assert_bakery_entry_valid(entry, bakery);
return bakery->owner == entry;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
.globl spin_lock
.globl spin_unlock
.section .text, "ax";
spin_lock:; .type spin_lock, %function
mov w2, #1
sevl
l1: wfe
l2: ldaxr w1, [x0]
cbnz w1, l1
stxr w1, w2, [x0]
cbnz w1, l2
ret
spin_unlock:; .type spin_unlock, %function
stlr wzr, [x0]
ret
Copyright (c) 2013, ARM Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of ARM nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform.h>
.globl pcpu_dv_mem_stack
.weak platform_get_core_pos
.weak platform_set_stack
.weak platform_is_primary_cpu
.weak platform_set_coherent_stack
.weak platform_check_mpidr
.weak plat_report_exception
/* -----------------------------------------------------
* 512 bytes of coherent stack for each cpu
* -----------------------------------------------------
*/
#define PCPU_DV_MEM_STACK_SIZE 0x200
.section .text, "ax"; .align 3
/* -----------------------------------------------------
* unsigned long long platform_set_coherent_stack
* (unsigned mpidr);
* For a given mpidr, this function returns the stack
* pointer allocated in device memory. This stack can
* be used by C code which enables/disables the SCTLR.M
* SCTLR.C bit e.g. while powering down a cpu
* -----------------------------------------------------
*/
platform_set_coherent_stack:; .type platform_set_coherent_stack, %function
mov x5, x30 // lr
bl platform_get_core_pos
add x0, x0, #1
mov x1, #PCPU_DV_MEM_STACK_SIZE
mul x0, x0, x1
ldr x1, =pcpu_dv_mem_stack
add sp, x1, x0
ret x5
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
platform_get_core_pos:; .type platform_get_core_pos, %function
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
/* -----------------------------------------------------
* void platform_is_primary_cpu (unsigned int mpid);
*
* Given the mpidr say whether this cpu is the primary
* cpu (applicable ony after a cold boot)
* -----------------------------------------------------
*/
platform_is_primary_cpu:; .type platform_is_primary_cpu, %function
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #PRIMARY_CPU
cset x0, eq
ret
/* -----------------------------------------------------
* void platform_set_stack (int mpidr)
* -----------------------------------------------------
*/
platform_set_stack:; .type platform_set_stack, %function
mov x9, x30 // lr
bl platform_get_core_pos
add x0, x0, #1
mov x1, #PLATFORM_STACK_SIZE
mul x0, x0, x1
ldr x1, =platform_normal_stacks
add sp, x1, x0
ret x9
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
platform_check_mpidr:; .type platform_check_mpidr, %function
mov x0, xzr
ret
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
plat_report_exception:
ret
/* -----------------------------------------------------
* Per-cpu stacks in device memory.
* Used for C code just before power down or right after
* power up when the MMU or caches need to be turned on
* or off. Each cpu gets a stack of 512 bytes.
* -----------------------------------------------------
*/
.section tzfw_coherent_mem, "aw", %nobits; .align 6
pcpu_dv_mem_stack:
/* Zero fill */
.space (PLATFORM_CORE_COUNT * PCPU_DV_MEM_STACK_SIZE), 0
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform.h>
#include <fvp_pwrc.h>
#include <gic.h>
.globl platform_get_entrypoint
.globl platform_cold_boot_init
.globl plat_secondary_cold_boot_setup
.section platform_code, "ax"; .align 3
.macro platform_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
ldr \w_tmp, [\x_tmp]
ubfx \w_tmp, \w_tmp, #SYS_ID_BLD_SHIFT, #SYS_ID_BLD_LENGTH
cmp \w_tmp, #BLD_GIC_VE_MMAP
csel \res, \param1, \param2, eq
.endm
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset e.g
* mark the cpu's presence, mechanism to place it in a
* holding pen etc.
* TODO: Should we read the PSYS register to make sure
* that the request has gone through.
* -----------------------------------------------------
*/
plat_secondary_cold_boot_setup:; .type plat_secondary_cold_boot_setup, %function
bl read_mpidr
mov x19, x0
bl platform_get_core_pos
mov x20, x0
/* ---------------------------------------------
* Mark this cpu as being present. This is a
* SO write. This array will be read using
* normal memory so invalidate any prefetched
* stale copies first.
* ---------------------------------------------
*/
ldr x1, =TZDRAM_BASE
mov x0, #AFFMAP_OFF
add x1, x0, x1
mov x2, #PLATFORM_CACHE_LINE_SIZE
mul x2, x2, x20
add x0, x1, x2
bl dcivac
str x19, [x1, x2]
/* ---------------------------------------------
* Power down this cpu.
* TODO: Do we need to worry about powering the
* cluster down as well here. That will need
* locks which we won't have unless an elf-
* loader zeroes out the zi section.
* ---------------------------------------------
*/
ldr x1, =PWRC_BASE
str w19, [x1, #PPOFFR_OFF]
/* ---------------------------------------------
* Deactivate the gic cpu interface as well
* ---------------------------------------------
*/
ldr x0, =VE_GICC_BASE
ldr x1, =BASE_GICC_BASE
platform_choose_gicmmap x0, x1, x2, w2, x1
mov w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
orr w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
str w0, [x1, #GICC_CTLR]
/* ---------------------------------------------
* There is no sane reason to come out of this
* wfi so panic if we do. This cpu will be pow-
* ered on and reset by the cpu_on pm api
* ---------------------------------------------
*/
dsb sy
wfi
cb_panic:
b cb_panic
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
*
* TODO: Not a good idea to save lr in a temp reg
* TODO: PSYSR is a common register and should be
* accessed using locks. Since its not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* -----------------------------------------------------
*/
platform_get_entrypoint:; .type platform_get_entrypoint, %function
mov x9, x30 // lr
mov x2, x0
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
ubfx w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_MASK
cbnz w2, warm_reset
mov x0, x2
b exit
warm_reset:
/* ---------------------------------------------
* A per-cpu mailbox is maintained in the tru-
* sted DRAM. Its flushed out of the caches
* after every update using normal memory so
* its safe to read it here with SO attributes
* ---------------------------------------------
*/
ldr x10, =TZDRAM_BASE + MBOX_OFF
bl platform_get_core_pos
lsl x0, x0, #CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
/* -----------------------------------------------------
* void platform_mem_init (void);
*
* Zero out the mailbox registers in the TZDRAM. The
* mmu is turned off right now and only the primary can
* ever execute this code. Secondaries will read the
* mailboxes using SO accesses. In short, BL31 will
* update the mailboxes after mapping the tzdram as
* normal memory. It will flush its copy after update.
* BL1 will always read the mailboxes with the MMU off
* -----------------------------------------------------
*/
platform_mem_init:; .type platform_mem_init, %function
ldr x0, =TZDRAM_BASE + MBOX_OFF
stp xzr, xzr, [x0, #0]
stp xzr, xzr, [x0, #0x10]
stp xzr, xzr, [x0, #0x20]
stp xzr, xzr, [x0, #0x30]
ret
/* -----------------------------------------------------
* void platform_cold_boot_init (bl1_main function);
*
* Routine called only by the primary cpu after a cold
* boot to perform early platform initialization
* -----------------------------------------------------
*/
platform_cold_boot_init:; .type platform_cold_boot_init, %function
mov x20, x0
bl platform_mem_init
bl read_mpidr
mov x19, x0
/* ---------------------------------------------
* Give ourselves a small coherent stack to
* ease the pain of initializing the MMU and
* CCI in assembler
* ---------------------------------------------
*/
bl platform_set_coherent_stack
/* ---------------------------------------------
* Mark this cpu as being present. This is a
* SO write. Invalidate any stale copies out of
* paranoia as there is no one else around.
* ---------------------------------------------
*/
mov x0, x19
bl platform_get_core_pos
mov x21, x0
ldr x1, =TZDRAM_BASE
mov x0, #AFFMAP_OFF
add x1, x0, x1
mov x2, #PLATFORM_CACHE_LINE_SIZE
mul x2, x2, x21
add x0, x1, x2
bl dcivac
str x19, [x1, x2]
/* ---------------------------------------------
* Enable CCI-400 for this cluster. No need
* for locks as no other cpu is active at the
* moment
* ---------------------------------------------
*/
mov x0, x19
bl cci_enable_coherency
/* ---------------------------------------------
* Architectural init. can be generic e.g.
* enabling stack alignment and platform spec-
* ific e.g. MMU & page table setup as per the
* platform memory map. Perform the latter here
* and the former in bl1_main.
* ---------------------------------------------
*/
bl bl1_early_platform_setup
bl bl1_plat_arch_setup
/* ---------------------------------------------
* Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* ---------------------------------------------
*/
mov x0, x19
bl platform_set_stack
/* ---------------------------------------------
* Jump to the main function. Returning from it
* is a terminal error.
* ---------------------------------------------
*/
blr x20
cb_init_panic:
b cb_init_panic
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl_common.h>
/* Included only for error codes */
#include <psci.h>
unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT]
__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
section("tzfw_normal_stacks")));
/*******************************************************************************
* This array holds the characteristics of the differences between the three
* FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold
* boot at each boot stage by the primary before enabling the MMU (to allow cci
* configuration) & used thereafter. Each BL will have its own copy to allow
* independent operation.
******************************************************************************/
static unsigned long platform_config[CONFIG_LIMIT];
/*******************************************************************************
* TODO: Check page table alignment to avoid space wastage
******************************************************************************/
/*******************************************************************************
* Level 1 translation tables need 4 entries for the 4GB address space accessib-
* le by the secure firmware. Input address space will be restricted using the
* T0SZ settings in the TCR.
******************************************************************************/
static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30]
__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3)));
/*******************************************************************************
* Level 2 translation tables describe the first & second gb of the address
* space needed to address secure peripherals e.g. trusted ROM and RAM.
******************************************************************************/
static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
__attribute__ ((aligned(NUM_2MB_IN_GB << 3)));
/*******************************************************************************
* Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
* regions at a granularity of 4K.
******************************************************************************/
static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
__attribute__ ((aligned(NUM_4K_IN_2MB << 3)));
/*******************************************************************************
* Helper to create a level 1/2 table descriptor which points to a level 2/3
* table.
******************************************************************************/
static unsigned long create_table_desc(unsigned long *next_table_ptr)
{
unsigned long desc = (unsigned long) next_table_ptr;
/* Clear the last 12 bits */
desc >>= FOUR_KB_SHIFT;
desc <<= FOUR_KB_SHIFT;
desc |= TABLE_DESC;
return desc;
}
/*******************************************************************************
* Helper to create a level 1/2/3 block descriptor which maps the va to addr
******************************************************************************/
static unsigned long create_block_desc(unsigned long desc,
unsigned long addr,
unsigned int level)
{
switch (level) {
case LEVEL1:
desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC;
break;
case LEVEL2:
desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC;
break;
case LEVEL3:
desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC;
break;
default:
assert(0);
}
return desc;
}
/*******************************************************************************
* Helper to create a level 1/2/3 block descriptor which maps the va to output_
* addr with Device nGnRE attributes.
******************************************************************************/
static unsigned long create_device_block(unsigned long output_addr,
unsigned int level,
unsigned int ns)
{
unsigned long upper_attrs, lower_attrs, desc;
lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW);
lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX);
upper_attrs = UPPER_ATTRS(XN);
desc = upper_attrs | lower_attrs;
return create_block_desc(desc, output_addr, level);
}
/*******************************************************************************
* Helper to create a level 1/2/3 block descriptor which maps the va to output_
* addr with inner-shareable normal wbwa read-only memory attributes.
******************************************************************************/
static unsigned long create_romem_block(unsigned long output_addr,
unsigned int level,
unsigned int ns)
{
unsigned long upper_attrs, lower_attrs, desc;
lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO);
lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
upper_attrs = UPPER_ATTRS(0ull);
desc = upper_attrs | lower_attrs;
return create_block_desc(desc, output_addr, level);
}
/*******************************************************************************
* Helper to create a level 1/2/3 block descriptor which maps the va to output_
* addr with inner-shareable normal wbwa read-write memory attributes.
******************************************************************************/
static unsigned long create_rwmem_block(unsigned long output_addr,
unsigned int level,
unsigned int ns)
{
unsigned long upper_attrs, lower_attrs, desc;
lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW);
lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
upper_attrs = UPPER_ATTRS(XN);
desc = upper_attrs | lower_attrs;
return create_block_desc(desc, output_addr, level);
}
/*******************************************************************************
* Create page tables as per the platform memory map. Certain aspects of page
* talble creating have been abstracted in the above routines. This can be impr-
* oved further.
* TODO: Move the page table setup helpers into the arch or lib directory
*******************************************************************************/
static unsigned long fill_xlation_tables(meminfo *tzram_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit)
{
unsigned long l2_desc, l3_desc;
unsigned long *xt_addr = 0, *pt_addr, off = 0;
unsigned long trom_start_index, trom_end_index;
unsigned long tzram_start_index, tzram_end_index;
unsigned long flash0_start_index, flash0_end_index;
unsigned long flash1_start_index, flash1_end_index;
unsigned long vram_start_index, vram_end_index;
unsigned long nsram_start_index, nsram_end_index;
unsigned long tdram_start_index, tdram_end_index;
unsigned long dram_start_index, dram_end_index;
unsigned long dev0_start_index, dev0_end_index;
unsigned long dev1_start_index, dev1_end_index;
unsigned int idx;
/*****************************************************************
* LEVEL1 PAGETABLE SETUP
*
* Find the start and end indices of the memory peripherals in the
* first level pagetables. These are the main areas we care about.
* Also bump the end index by one if its equal to the start to
* allow for regions which lie completely in a GB.
*****************************************************************/
trom_start_index = ONE_GB_INDEX(TZROM_BASE);
dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
dram_start_index = ONE_GB_INDEX(DRAM_BASE);
dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
if (dram_end_index == dram_start_index)
dram_end_index++;
/*
* Fill up the level1 translation table first
*/
for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) {
/*
* Fill up the entry for the TZROM. This will cover
* everything in the first GB.
*/
if (idx == trom_start_index) {
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Mark the second gb as device
*/
if (idx == dev0_start_index) {
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Fill up the block entry for the DRAM with Normal
* inner-WBWA outer-WBWA non-transient attributes.
* This will cover 2-4GB. Note that the acesses are
* marked as non-secure.
*/
if ((idx >= dram_start_index) && (idx < dram_end_index)) {
l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
NS);
continue;
}
assert(0);
}
/*****************************************************************
* LEVEL2 PAGETABLE SETUP
*
* Find the start and end indices of the memory & peripherals in the
* second level pagetables.
******************************************************************/
/* Initializations for the 1st GB */
trom_start_index = TWO_MB_INDEX(TZROM_BASE);
trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
if (trom_end_index == trom_start_index)
trom_end_index++;
tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
if (tdram_end_index == tdram_start_index)
tdram_end_index++;
flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
if (flash0_end_index == flash0_start_index)
flash0_end_index++;
flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
if (flash1_end_index == flash1_start_index)
flash1_end_index++;
vram_start_index = TWO_MB_INDEX(VRAM_BASE);
vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
if (vram_end_index == vram_start_index)
vram_end_index++;
dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
if (dev1_end_index == dev1_start_index)
dev1_end_index++;
/* Since the size is < 2M this is a single index */
tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base);
nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
/*
* Fill up the level2 translation table for the first GB next
*/
for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
l2_desc = INVALID_DESC;
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
/* Block entries for 64M of trusted Boot ROM */
if ((idx >= trom_start_index) && (idx < trom_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 256K of TZRAM */
if (idx == tzram_start_index) {
pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
/* Block entries for 32M of trusted DRAM */
if ((idx >= tdram_start_index) && (idx <= tdram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for 64M of aliased trusted Boot ROM */
if ((idx >= flash0_start_index) && (idx < flash0_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 64M of flash1 */
if ((idx >= flash1_start_index) && (idx < flash1_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 32M of VRAM */
if ((idx >= vram_start_index) && (idx < vram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev0_start_index) && (idx < dev0_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev1_start_index) && (idx < dev1_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 64K of NSRAM */
if (idx == nsram_start_index) {
pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
*xt_addr = l2_desc;
}
/*
* Initializations for the 2nd GB. Mark everything as device
* for the time being as the memory map is not final. Each
* index will need to be offset'ed to allow absolute values
*/
off = NUM_2MB_IN_GB;
for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
l2_desc = create_device_block(idx, LEVEL2, 0);
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
*xt_addr = l2_desc;
}
/*****************************************************************
* LEVEL3 PAGETABLE SETUP
* The following setup assumes knowledge of the scatter file. This
* should be reasonable as this is platform specific code.
*****************************************************************/
/* Fill up the level3 pagetable for the trusted SRAM. */
tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base);
tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base +
tzram_layout->total_size);
if (tzram_end_index == tzram_start_index)
tzram_end_index++;
/*
* Reusing trom* to mark RO memory. BLX_STACKS follows BLX_RO in the
* scatter file. Using BLX_RO$$Limit does not work as it might not
* cross the page boundary thus leading to truncation of valid RO
* memory
*/
trom_start_index = FOUR_KB_INDEX(ro_start);
trom_end_index = FOUR_KB_INDEX(ro_limit);
if (trom_end_index == trom_start_index)
trom_end_index++;
/*
* Reusing dev* to mark coherent device memory. $$Limit works here
* 'cause the coherent memory section is known to be 4k in size
*/
dev0_start_index = FOUR_KB_INDEX(coh_start);
dev0_end_index = FOUR_KB_INDEX(coh_limit);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(TZRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off];
if (idx >= tzram_start_index && idx < tzram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, 0);
if (idx >= trom_start_index && idx < trom_end_index)
l3_desc = create_romem_block(idx, LEVEL3, 0);
if (idx >= dev0_start_index && idx < dev0_end_index)
l3_desc = create_device_block(idx, LEVEL3, 0);
*xt_addr = l3_desc;
}
/* Fill up the level3 pagetable for the non-trusted SRAM. */
nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
if (nsram_end_index == nsram_start_index)
nsram_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(NSRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off];
if (idx >= nsram_start_index && idx < nsram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, NS);
*xt_addr = l3_desc;
}
return (unsigned long) l1_xlation_table;
}
/*******************************************************************************
* Enable the MMU assuming that the pagetables have already been created
*******************************************************************************/
void enable_mmu()
{
unsigned long mair, tcr, ttbr, sctlr;
unsigned long current_el = read_current_el();
/* Set the attributes in the right indices of the MAIR */
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ATTR_IWBWA_OWBWA_NTR_INDEX);
write_mair(mair);
/*
* Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
*/
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
if (GET_EL(current_el) == MODE_EL3) {
tcr |= TCR_EL3_RES1;
/* Invalidate all TLBs */
tlbialle3();
} else {
/* Invalidate EL1 TLBs */
tlbivmalle1();
}
write_tcr(tcr);
/* Set TTBR bits as well */
assert(((unsigned long)l1_xlation_table & (sizeof(l1_xlation_table) - 1)) == 0);
ttbr = (unsigned long) l1_xlation_table;
write_ttbr0(ttbr);
sctlr = read_sctlr();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
write_sctlr(sctlr);
return;
}
void disable_mmu(void)
{
/* Zero out the MMU related registers */
write_mair(0);
write_tcr(0);
write_ttbr0(0);
write_sctlr(0);
/* Invalidate TLBs of the CurrentEL */
tlbiall();
/* Flush the caches */
dcsw_op_all(DCCISW);
return;
}
/*******************************************************************************
* Setup the pagetables as per the platform memory map & initialize the mmu
*******************************************************************************/
void configure_mmu(meminfo *mem_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit)
{
fill_xlation_tables(mem_layout,
ro_start,
ro_limit,
coh_start,
coh_limit);
enable_mmu();
return;
}
/* Simple routine which returns a configuration variable value */
unsigned long platform_get_cfgvar(unsigned int var_id)
{
assert(var_id < CONFIG_LIMIT);
return platform_config[var_id];
}
/*******************************************************************************
* A single boot loader stack is expected to work on both the Foundation FVP
* models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
* SYS_ID register provides a mechanism for detecting the differences between
* these platforms. This information is stored in a per-BL array to allow the
* code to take the correct path.Per BL platform configuration.
******************************************************************************/
int platform_config_setup(void)
{
unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK;
bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK;
arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK;
assert(rev == REV_FVP);
assert(arch == ARCH_MODEL);
/*
* The build field in the SYS_ID tells which variant of the GIC
* memory is implemented by the model.
*/
switch (bld) {
case BLD_GIC_VE_MMAP:
platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE;
platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE;
platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE;
platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE;
break;
case BLD_GIC_A53A57_MMAP:
platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE;
platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE;
platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE;
platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE;
break;
default:
assert(0);
}
/*
* The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
* for the Foundation FVP.
*/
switch (hbi) {
case HBI_FOUNDATION:
platform_config[CONFIG_MAX_AFF0] = 4;
platform_config[CONFIG_MAX_AFF1] = 1;
platform_config[CONFIG_CPU_SETUP] = 0;
platform_config[CONFIG_BASE_MMAP] = 0;
break;
case HBI_FVP_BASE:
midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
platform_config[CONFIG_CPU_SETUP] = 1;
else
platform_config[CONFIG_CPU_SETUP] = 0;
platform_config[CONFIG_MAX_AFF0] = 4;
platform_config[CONFIG_MAX_AFF1] = 2;
platform_config[CONFIG_BASE_MMAP] = 1;
break;
default:
assert(0);
}
return 0;
}
unsigned long plat_get_ns_image_entrypoint(void) {
return NS_IMAGE_OFFSET;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform.h>
.globl plat_report_exception
.section platform_code, "ax"
/* ---------------------------------------------
* void plat_report_exception(unsigned int type)
* Function to report an unhandled exception
* with platform-specific means.
* On FVP platform, it updates the LEDs
* to indicate where we are
* ---------------------------------------------
*/
plat_report_exception:
mrs x1, CurrentEl
lsr x1, x1, #MODE_EL_SHIFT
lsl x1, x1, #SYS_LED_EL_SHIFT
lsl x0, x0, #SYS_LED_EC_SHIFT
mov x2, #(SECURE << SYS_LED_SS_SHIFT)
orr x0, x0, x2
orr x0, x0, x1
mov x1, #VE_SYSREGS_BASE
add x1, x1, #V2M_SYS_LED
str x0, [x1]
ret
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl1.h>
#include <console.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
#if defined (__GNUC__)
extern unsigned long __FIRMWARE_ROM_START__;
extern unsigned long __FIRMWARE_ROM_SIZE__;
extern unsigned long __FIRMWARE_DATA_START__;
extern unsigned long __FIRMWARE_DATA_SIZE__;
extern unsigned long __FIRMWARE_BSS_START__;
extern unsigned long __FIRMWARE_BSS_SIZE__;
extern unsigned long __DATA_RAM_START__;
extern unsigned long __DATA_RAM_SIZE__;
extern unsigned long __BSS_RAM_START__;
extern unsigned long __BSS_RAM_SIZE__;
extern unsigned long __FIRMWARE_RAM_STACKS_START__;
extern unsigned long __FIRMWARE_RAM_STACKS_SIZE__;
extern unsigned long __FIRMWARE_RAM_PAGETABLES_START__;
extern unsigned long __FIRMWARE_RAM_PAGETABLES_SIZE__;
extern unsigned long __FIRMWARE_RAM_COHERENT_START__;
extern unsigned long __FIRMWARE_RAM_COHERENT_SIZE__;
#define BL1_COHERENT_MEM_BASE (&__FIRMWARE_RAM_COHERENT_START__)
#define BL1_COHERENT_MEM_LIMIT \
((unsigned long long)&__FIRMWARE_RAM_COHERENT_START__ + \
(unsigned long long)&__FIRMWARE_RAM_COHERENT_SIZE__)
#define BL1_FIRMWARE_RAM_GLOBALS_ZI_BASE \
(unsigned long)(&__BSS_RAM_START__)
#define BL1_FIRMWARE_RAM_GLOBALS_ZI_LENGTH \
(unsigned long)(&__FIRMWARE_BSS_SIZE__)
#define BL1_FIRMWARE_RAM_COHERENT_ZI_BASE \
(unsigned long)(&__FIRMWARE_RAM_COHERENT_START__)
#define BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH\
(unsigned long)(&__FIRMWARE_RAM_COHERENT_SIZE__)
#define BL1_NORMAL_RAM_BASE (unsigned long)(&__BSS_RAM_START__)
#define BL1_NORMAL_RAM_LIMIT \
((unsigned long)&__FIRMWARE_RAM_COHERENT_START__ + \
(unsigned long)&__FIRMWARE_RAM_COHERENT_SIZE__)
#else
#error "Unknown compiler."
#endif
/* Data structure which holds the extents of the trusted SRAM for BL1*/
static meminfo bl1_tzram_layout = {0};
meminfo bl1_get_sec_mem_layout(void)
{
return bl1_tzram_layout;
}
/*******************************************************************************
* Perform any BL1 specific platform actions.
******************************************************************************/
void bl1_early_platform_setup(void)
{
unsigned long bl1_normal_ram_base;
unsigned long bl1_coherent_ram_limit;
unsigned long tzram_limit = TZRAM_BASE + TZRAM_SIZE;
/*
* Initialize extents of the bl1 sections as per the platform
* defined values.
*/
bl1_normal_ram_base = BL1_NORMAL_RAM_BASE;
bl1_coherent_ram_limit = BL1_NORMAL_RAM_LIMIT;
/*
* Calculate how much ram is BL1 using & how much remains free.
* This also includes a rudimentary mechanism to detect whether
* the BL1 data is loaded at the top or bottom of memory.
* TODO: add support for discontigous chunks of free ram if
* needed. Might need dynamic memory allocation support
* et al.
* Also assuming that the section for coherent memory is
* the last and for globals the first in the scatter file.
*/
bl1_tzram_layout.total_base = TZRAM_BASE;
bl1_tzram_layout.total_size = TZRAM_SIZE;
if (bl1_coherent_ram_limit == tzram_limit) {
bl1_tzram_layout.free_base = TZRAM_BASE;
bl1_tzram_layout.free_size = bl1_normal_ram_base - TZRAM_BASE;
} else {
bl1_tzram_layout.free_base = bl1_coherent_ram_limit;
bl1_tzram_layout.free_size =
tzram_limit - bl1_coherent_ram_limit;
}
}
/*******************************************************************************
* Function which will evaluate how much of the trusted ram has been gobbled
* up by BL1 and return the base and size of whats available for loading BL2.
* Its called after coherency and the MMU have been turned on.
******************************************************************************/
void bl1_platform_setup(void)
{
/*
* This should zero out our coherent stacks as well but we don't care
* as they are not being used right now.
*/
memset((void *) BL1_FIRMWARE_RAM_COHERENT_ZI_BASE, 0,
(size_t) BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH);
/* Enable and initialize the System level generic timer */
mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_EN);
/* Initialize the console */
console_init();
return;
}
/*******************************************************************************
* Perform the very early platform specific architecture setup here. At the
* moment this is only intializes the mmu in a quick and dirty way. Later arch-
* itectural setup (bl1_arch_setup()) does not do anything platform specific.
******************************************************************************/
void bl1_plat_arch_setup(void)
{
configure_mmu(&bl1_tzram_layout,
TZROM_BASE, /* Read_only region start */
TZROM_BASE + TZROM_SIZE, /* Read_only region size */
/* Coherent region start */
BL1_FIRMWARE_RAM_COHERENT_ZI_BASE,
/* Coherent region size */
BL1_FIRMWARE_RAM_COHERENT_ZI_BASE +
BL1_FIRMWARE_RAM_COHERENT_ZI_LENGTH);
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl2.h>
#include <bl_common.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
#if defined (__GNUC__)
extern unsigned long __BL2_RO_BASE__;
extern unsigned long __BL2_STACKS_BASE__;
extern unsigned long __BL2_COHERENT_RAM_BASE__;
extern unsigned long __BL2_RW_BASE__;
#define BL2_RO_BASE __BL2_RO_BASE__
#define BL2_STACKS_BASE __BL2_STACKS_BASE__
#define BL2_COHERENT_RAM_BASE __BL2_COHERENT_RAM_BASE__
#define BL2_RW_BASE __BL2_RW_BASE__
#else
#error "Unknown compiler."
#endif
/* Pointer to memory visible to both BL2 and BL31 for passing data */
extern unsigned char **bl2_el_change_mem_ptr;
/* Data structure which holds the extents of the trusted SRAM for BL2 */
static meminfo bl2_tzram_layout
__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
section("tzfw_coherent_mem"))) = {0};
/* Data structure which holds the extents of the non-trusted DRAM for BL2*/
static meminfo dram_layout = {0};
meminfo bl2_get_sec_mem_layout(void)
{
return bl2_tzram_layout;
}
meminfo bl2_get_ns_mem_layout(void)
{
return dram_layout;
}
/*******************************************************************************
* BL1 has passed the extents of the trusted SRAM that should be visible to BL2
* in x0. This memory layout is sitting at the base of the free trusted SRAM.
* Copy it to a safe loaction before its reclaimed by later BL2 functionality.
******************************************************************************/
void bl2_early_platform_setup(meminfo *mem_layout,
void *data)
{
/* Setup the BL2 memory layout */
bl2_tzram_layout.total_base = mem_layout->total_base;
bl2_tzram_layout.total_size = mem_layout->total_size;
bl2_tzram_layout.free_base = mem_layout->free_base;
bl2_tzram_layout.free_size = mem_layout->free_size;
bl2_tzram_layout.attr = mem_layout->attr;
bl2_tzram_layout.next = 0;
/* Initialize the platform config for future decision making */
platform_config_setup();
return;
}
/*******************************************************************************
* Not much to do here aprt from finding out the extents of non-trusted DRAM
* which will be used for loading the non-trusted software images. We are
* relying on pre-iniitialized zi memory so there is nothing to zero out like
* in BL1. This is 'cause BL2 is raw PIC binary. Its load address is determined
* at runtime. The ZI section might be lost if its not already there.
******************************************************************************/
void bl2_platform_setup()
{
dram_layout.total_base = DRAM_BASE;
dram_layout.total_size = DRAM_SIZE;
dram_layout.free_base = DRAM_BASE;
dram_layout.free_size = DRAM_SIZE;
dram_layout.attr = 0;
/* Use the Trusted DRAM for passing args to BL31 */
bl2_el_change_mem_ptr = (unsigned char **) TZDRAM_BASE;
return;
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only intializes the mmu in a quick and dirty way.
******************************************************************************/
void bl2_plat_arch_setup()
{
unsigned long sctlr;
/* Enable instruction cache. */
sctlr = read_sctlr();
sctlr |= SCTLR_I_BIT;
write_sctlr(sctlr);
/*
* Very simple exception vectors which assert if any exception other
* than a single SMC call from BL2 to pass control to BL31 in EL3 is
* received.
*/
write_vbar((unsigned long) early_exceptions);
configure_mmu(&bl2_tzram_layout,
(unsigned long) &BL2_RO_BASE,
(unsigned long) &BL2_STACKS_BASE,
(unsigned long) &BL2_COHERENT_RAM_BASE,
(unsigned long) &BL2_RW_BASE);
return;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl31.h>
#include <bl_common.h>
#include <pl011.h>
#include <bakery_lock.h>
#include <cci400.h>
#include <gic.h>
#include <fvp_pwrc.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
#if defined (__GNUC__)
extern unsigned long __BL31_RO_BASE__;
extern unsigned long __BL31_STACKS_BASE__;
extern unsigned long __BL31_COHERENT_RAM_BASE__;
extern unsigned long __BL31_RW_BASE__;
#define BL31_RO_BASE __BL31_RO_BASE__
#define BL31_STACKS_BASE __BL31_STACKS_BASE__
#define BL31_COHERENT_RAM_BASE __BL31_COHERENT_RAM_BASE__
#define BL31_RW_BASE __BL31_RW_BASE__
#else
#error "Unknown compiler."
#endif
/*******************************************************************************
* This data structures holds information copied by BL31 from BL2 to pass
* control to the non-trusted software images. A per-cpu entry was created to
* use the same structure in the warm boot path but that's not the case right
* now. Persisting with this approach for the time being. TODO: Can this be
* moved out of device memory.
******************************************************************************/
el_change_info ns_entry_info[PLATFORM_CORE_COUNT]
__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
section("tzfw_coherent_mem"))) = {0};
/* Data structure which holds the extents of the trusted SRAM for BL31 */
static meminfo bl31_tzram_layout
__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
section("tzfw_coherent_mem"))) = {0};
meminfo bl31_get_sec_mem_layout(void)
{
return bl31_tzram_layout;
}
/*******************************************************************************
* Return information about passing control to the non-trusted software images
* to common code.TODO: In the initial architecture, the image after BL31 will
* always run in the non-secure state. In the final architecture there
* will be a series of images. This function will need enhancement then
******************************************************************************/
el_change_info *bl31_get_next_image_info(unsigned long mpidr)
{
return &ns_entry_info[platform_get_core_pos(mpidr)];
}
/*******************************************************************************
* Perform any BL31 specific platform actions. Here we copy parameters passed
* by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they are lost
* (potentially). This is done before the MMU is initialized so that the memory
* layout can be used while creating page tables.
******************************************************************************/
void bl31_early_platform_setup(meminfo *mem_layout,
void *data,
unsigned long mpidr)
{
el_change_info *image_info = (el_change_info *) data;
unsigned int lin_index = platform_get_core_pos(mpidr);
/* Setup the BL31 memory layout */
bl31_tzram_layout.total_base = mem_layout->total_base;
bl31_tzram_layout.total_size = mem_layout->total_size;
bl31_tzram_layout.free_base = mem_layout->free_base;
bl31_tzram_layout.free_size = mem_layout->free_size;
bl31_tzram_layout.attr = mem_layout->attr;
bl31_tzram_layout.next = 0;
/* Save information about jumping into the NS world */
ns_entry_info[lin_index].entrypoint = image_info->entrypoint;
ns_entry_info[lin_index].spsr = image_info->spsr;
ns_entry_info[lin_index].args = image_info->args;
ns_entry_info[lin_index].security_state = image_info->security_state;
ns_entry_info[lin_index].next = image_info->next;
/* Initialize the platform config for future decision making */
platform_config_setup();
}
/*******************************************************************************
* Initialize the gic, configure the CLCD and zero out variables needed by the
* secondaries to boot up correctly.
******************************************************************************/
void bl31_platform_setup()
{
unsigned int reg_val;
/* Initialize the gic cpu and distributor interfaces */
gic_setup();
/*
* TODO: Configure the CLCD before handing control to
* linux. Need to see if a separate driver is needed
* instead.
*/
mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0);
mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
(1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16));
/* Allow access to the System counter timer module */
reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1));
mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
/* Intialize the power controller */
fvp_pwrc_setup();
/* Topologies are best known to the platform. */
plat_setup_topology();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only intializes the mmu in a quick and dirty way.
******************************************************************************/
void bl31_plat_arch_setup()
{
unsigned long sctlr;
/* Enable instruction cache. */
sctlr = read_sctlr();
sctlr |= SCTLR_I_BIT;
write_sctlr(sctlr);
write_vbar((unsigned long) runtime_exceptions);
configure_mmu(&bl31_tzram_layout,
(unsigned long) &BL31_RO_BASE,
(unsigned long) &BL31_STACKS_BASE,
(unsigned long) &BL31_COHERENT_RAM_BASE,
(unsigned long) &BL31_RW_BASE);
}
/*******************************************************************************
* TODO: Move GIC setup to a separate file in case it is needed by other BL
* stages or ELs
* TODO: Revisit if priorities are being set such that no non-secure interrupt
* can have a higher priority than a secure one as recommended in the GICv2 spec
*******************************************************************************/
/*******************************************************************************
* This function does some minimal GICv3 configuration. The Firmware itself does
* not fully support GICv3 at this time and relies on GICv2 emulation as
* provided by GICv3. This function allows software (like Linux) in later stages
* to use full GICv3 features.
*******************************************************************************/
void gicv3_cpuif_setup(void)
{
unsigned int scr_val, val, base;
/*
* When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
* bit set. In order to allow interrupts to get routed to the CPU we
* need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
* to clear (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*
* TODO:
* For GICv4 we also need to adjust the Base address based on
* GICR_TYPER.VLPIS
*/
base = BASE_GICR_BASE +
(platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
val = gicr_read_waker(base);
val &= ~WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to clear. */
val = gicr_read_waker(base);
while (val & WAKER_CA) {
val = gicr_read_waker(base);
}
/*
* We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
* Restore SCR_EL3.NS again before exit.
*/
scr_val = read_scr();
write_scr(scr_val | SCR_NS_BIT);
/*
* By default EL2 and NS-EL1 software should be able to enable GICv3
* System register access without any configuration at EL3. But it turns
* out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So
* we need to set it here again. In order to do that we need to enable
* register access. We leave it enabled as it should be fine and might
* prevent problems with later software trying to access GIC System
* Registers.
*/
val = read_icc_sre_el3();
write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
val = read_icc_sre_el2();
write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE);
write_icc_pmr_el1(MAX_PRI_VAL);
/* Restore SCR_EL3 */
write_scr(scr_val);
}
/*******************************************************************************
* This function does some minimal GICv3 configuration when cores go
* down.
*******************************************************************************/
void gicv3_cpuif_deactivate(void)
{
unsigned int val, base;
/*
* When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
* wait for GICR_WAKER.ChildrenAsleep to get set.
* (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*
* TODO:
* For GICv4 we also need to adjust the Base address based on
* GICR_TYPER.VLPIS
*/
base = BASE_GICR_BASE +
(platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
val = gicr_read_waker(base);
val |= WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to set. */
val = gicr_read_waker(base);
while ((val & WAKER_CA) == 0) {
val = gicr_read_waker(base);
}
}
/*******************************************************************************
* Enable secure interrupts and use FIQs to route them. Disable legacy bypass
* and set the priority mask register to allow all interrupts to trickle in.
******************************************************************************/
void gic_cpuif_setup(unsigned int gicc_base)
{
unsigned int val;
val = gicc_read_iidr(gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. We want to
* allow default GICv2 behaviour but allow the next stage to
* enable full gicv3 features.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
gicv3_cpuif_setup();
}
val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
gicc_write_pmr(gicc_base, MAX_PRI_VAL);
gicc_write_ctlr(gicc_base, val);
}
/*******************************************************************************
* Place the cpu interface in a state where it can never make a cpu exit wfi as
* as result of an asserted interrupt. This is critical for powering down a cpu
******************************************************************************/
void gic_cpuif_deactivate(unsigned int gicc_base)
{
unsigned int val;
/* Disable secure, non-secure interrupts and disable their bypass */
val = gicc_read_ctlr(gicc_base);
val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
gicc_write_ctlr(gicc_base, val);
val = gicc_read_iidr(gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. Make sure the
* RDIST is put to sleep.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
gicv3_cpuif_deactivate();
}
}
/*******************************************************************************
* Per cpu gic distributor setup which will be done by all cpus after a cold
* boot/hotplug. This marks out the secure interrupts & enables them.
******************************************************************************/
void gic_pcpu_distif_setup(unsigned int gicd_base)
{
gicd_write_igroupr(gicd_base, 0, ~0);
gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6);
gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL);
gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL);
gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6);
gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7);
}
/*******************************************************************************
* Global gic distributor setup which will be done by the primary cpu after a
* cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
* then enables the secure GIC distributor interface.
******************************************************************************/
void gic_distif_setup(unsigned int gicd_base)
{
unsigned int ctr, num_ints, ctlr;
/* Disable the distributor before going further */
ctlr = gicd_read_ctlr(gicd_base);
ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
gicd_write_ctlr(gicd_base, ctlr);
/*
* Mark out non-secure interrupts. Calculate number of
* IGROUPR registers to consider. Will be equal to the
* number of IT_LINES
*/
num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
num_ints++;
for (ctr = 0; ctr < num_ints; ctr++)
gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0);
/* Configure secure interrupts now */
gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG);
gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL);
gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG,
platform_get_core_pos(read_mpidr()));
gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG);
gic_pcpu_distif_setup(gicd_base);
gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0);
}
void gic_setup(void)
{
unsigned int gicd_base, gicc_base;
gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
gic_cpuif_setup(gicc_base);
gic_distif_setup(gicd_base);
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <bl_common.h>
#include <bl31.h>
#include <bakery_lock.h>
#include <cci400.h>
#include <gic.h>
#include <fvp_pwrc.h>
/* Only included for error codes */
#include <psci.h>
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned on. The
* level and mpidr determine the affinity instance.
******************************************************************************/
int fvp_affinst_on(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned long ns_entrypoint,
unsigned int afflvl,
unsigned int state)
{
int rc = PSCI_E_SUCCESS;
unsigned long linear_id;
mailbox *fvp_mboxes;
unsigned int psysr;
if (ns_entrypoint < DRAM_BASE) {
rc = PSCI_E_INVALID_PARAMS;
goto exit;
}
/*
* It's possible to turn on only affinity level 0 i.e. a cpu
* on the FVP. Ignore any other affinity level.
*/
if (afflvl != MPIDR_AFFLVL0)
goto exit;
/*
* Ensure that we do not cancel an inflight power off request
* for the target cpu. That would leave it in a zombie wfi.
* Wait for it to power off, program the jump address for the
* target cpu and then program the power controller to turn
* that cpu on
*/
do {
psysr = fvp_pwrc_read_psysr(mpidr);
} while (psysr & PSYSR_AFF_L0);
linear_id = platform_get_core_pos(mpidr);
fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
fvp_mboxes[linear_id].value = sec_entrypoint;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
sizeof(unsigned long));
fvp_pwrc_write_pponr(mpidr);
exit:
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: This function is called with coherent stacks so that caches can be
* turned off, flushed and coherency disabled. There is no guarantee that caches
* will remain turned on across calls to this function as each affinity level is
* dealt with. So do not write & read global variables across calls. It will be
* wise to do flush a write to the global to prevent unpredictable results.
******************************************************************************/
int fvp_affinst_off(unsigned long mpidr,
unsigned int afflvl,
unsigned int state)
{
int rc = PSCI_E_SUCCESS;
unsigned int gicc_base, ectlr;
unsigned long cpu_setup;
switch (afflvl) {
case MPIDR_AFFLVL1:
if (state == PSCI_STATE_OFF) {
/*
* Disable coherency if this cluster is to be
* turned off
*/
cci_disable_coherency(mpidr);
/*
* Program the power controller to turn the
* cluster off
*/
fvp_pwrc_write_pcoffr(mpidr);
}
break;
case MPIDR_AFFLVL0:
if (state == PSCI_STATE_OFF) {
/*
* Take this cpu out of intra-cluster coherency if
* the FVP flavour supports the SMP bit.
*/
cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
if (cpu_setup) {
ectlr = read_cpuectlr();
ectlr &= ~CPUECTLR_SMP_BIT;
write_cpuectlr(ectlr);
}
/*
* Prevent interrupts from spuriously waking up
* this cpu
*/
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
gic_cpuif_deactivate(gicc_base);
/*
* Program the power controller to power this
* cpu off
*/
fvp_pwrc_write_ppoffr(mpidr);
}
break;
default:
assert(0);
}
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: This function is called with coherent stacks so that caches can be
* turned off, flushed and coherency disabled. There is no guarantee that caches
* will remain turned on across calls to this function as each affinity level is
* dealt with. So do not write & read global variables across calls. It will be
* wise to do flush a write to the global to prevent unpredictable results.
******************************************************************************/
int fvp_affinst_suspend(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned long ns_entrypoint,
unsigned int afflvl,
unsigned int state)
{
int rc = PSCI_E_SUCCESS;
unsigned int gicc_base, ectlr;
unsigned long cpu_setup, linear_id;
mailbox *fvp_mboxes;
/* Cannot allow NS world to execute trusted firmware code */
if (ns_entrypoint < DRAM_BASE) {
rc = PSCI_E_INVALID_PARAMS;
goto exit;
}
switch (afflvl) {
case MPIDR_AFFLVL1:
if (state == PSCI_STATE_OFF) {
/*
* Disable coherency if this cluster is to be
* turned off
*/
cci_disable_coherency(mpidr);
/*
* Program the power controller to turn the
* cluster off
*/
fvp_pwrc_write_pcoffr(mpidr);
}
break;
case MPIDR_AFFLVL0:
if (state == PSCI_STATE_OFF) {
/*
* Take this cpu out of intra-cluster coherency if
* the FVP flavour supports the SMP bit.
*/
cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
if (cpu_setup) {
ectlr = read_cpuectlr();
ectlr &= ~CPUECTLR_SMP_BIT;
write_cpuectlr(ectlr);
}
/* Program the jump address for the target cpu */
linear_id = platform_get_core_pos(mpidr);
fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
fvp_mboxes[linear_id].value = sec_entrypoint;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
sizeof(unsigned long));
/*
* Prevent interrupts from spuriously waking up
* this cpu
*/
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
gic_cpuif_deactivate(gicc_base);
/*
* Program the power controller to power this
* cpu off and enable wakeup interrupts.
*/
fvp_pwrc_write_pwkupr(mpidr);
fvp_pwrc_write_ppoffr(mpidr);
}
break;
default:
assert(0);
}
exit:
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity
* instance. The 'state' arg. allows the platform to decide whether the cluster
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
******************************************************************************/
int fvp_affinst_on_finish(unsigned long mpidr,
unsigned int afflvl,
unsigned int state)
{
int rc = PSCI_E_SUCCESS;
unsigned long linear_id, cpu_setup;
mailbox *fvp_mboxes;
unsigned int gicd_base, gicc_base, reg_val, ectlr;
switch (afflvl) {
case MPIDR_AFFLVL1:
/* Enable coherency if this cluster was off */
if (state == PSCI_STATE_OFF)
cci_enable_coherency(mpidr);
break;
case MPIDR_AFFLVL0:
/*
* Ignore the state passed for a cpu. It could only have
* been off if we are here.
*/
/*
* Turn on intra-cluster coherency if the FVP flavour supports
* it.
*/
cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
if (cpu_setup) {
ectlr = read_cpuectlr();
ectlr |= CPUECTLR_SMP_BIT;
write_cpuectlr(ectlr);
}
/* Zero the jump address in the mailbox for this cpu */
fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
linear_id = platform_get_core_pos(mpidr);
fvp_mboxes[linear_id].value = 0;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
sizeof(unsigned long));
gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
/* Enable the gic cpu interface */
gic_cpuif_setup(gicc_base);
/* TODO: This setup is needed only after a cold boot */
gic_pcpu_distif_setup(gicd_base);
/* Allow access to the System counter timer module */
reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
reg_val = (1 << CNTNSAR_NS_SHIFT(0)) |
(1 << CNTNSAR_NS_SHIFT(1));
mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
break;
default:
assert(0);
}
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity
* instance.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
int fvp_affinst_suspend_finish(unsigned long mpidr,
unsigned int afflvl,
unsigned int state)
{
return fvp_affinst_on_finish(mpidr, afflvl, state);
}
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
static plat_pm_ops fvp_plat_pm_ops = {
0,
fvp_affinst_on,
fvp_affinst_off,
fvp_affinst_suspend,
fvp_affinst_on_finish,
fvp_affinst_suspend_finish,
};
/*******************************************************************************
* Export the platform specific power ops & initialize the fvp power controller
******************************************************************************/
int platform_setup_pm(plat_pm_ops **plat_ops)
{
*plat_ops = &fvp_plat_pm_ops;
return 0;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <platform.h>
#include <fvp_pwrc.h>
/* TODO: Reusing psci error codes & state information. Get our own! */
#include <psci.h>
/* We treat '255' as an invalid affinity instance */
#define AFFINST_INVAL 0xff
/*******************************************************************************
* We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
* flavour has a different topology. The common bit is that there can be a max.
* of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
* a tree like data structure which caters to these maximum bounds. It simply
* marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
* cluster 1 on the Foundation FVP. The 'data' field is currently unused.
******************************************************************************/
typedef struct {
unsigned char sibling;
unsigned char child;
unsigned char state;
unsigned int data;
} affinity_info;
/*******************************************************************************
* The following two data structures store the topology tree for the fvp. There
* is a separate array for each affinity level i.e. cpus and clusters. The child
* and sibling references allow traversal inside and in between the two arrays.
******************************************************************************/
static affinity_info fvp_aff1_topology_map[PLATFORM_CLUSTER_COUNT];
static affinity_info fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
/* Simple global variable to safeguard us from stupidity */
static unsigned int topology_setup_done;
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the platform
* topology. psci queries the platform to determine how many affinity instances
* are present at a particular level for a given mpidr e.g. consider a dual
* cluster platform where each cluster has 4 cpus. A call to this function with
* (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
* Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
* This is 'cause we are effectively asking how many affinity level 1 instances
* are implemented under affinity level 2 instance 0.
******************************************************************************/
unsigned int plat_get_aff_count(unsigned int aff_lvl,
unsigned long mpidr)
{
unsigned int aff_count = 1, ctr;
unsigned char parent_aff_id;
assert(topology_setup_done == 1);
switch (aff_lvl) {
case 3:
case 2:
/*
* Assert if the parent affinity instance is not 0.
* This also takes care of level 3 in an obfuscated way
*/
parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/*
* Report that we implement a single instance of
* affinity levels 2 & 3 which are AFF_ABSENT
*/
break;
case 1:
/* Assert if the parent affinity instance is not 0. */
parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/* Fetch the starting index in the aff1 array */
for (ctr = 0;
fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff1_topology_map[ctr].sibling) {
aff_count++;
}
break;
case 0:
/* Assert if the cluster id is anything apart from 0 or 1 */
parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id < PLATFORM_CLUSTER_COUNT);
/* Fetch the starting index in the aff0 array */
for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff0_topology_map[ctr].sibling) {
aff_count++;
}
break;
default:
assert(0);
}
return aff_count;
}
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the state of a
* affinity instance in the platform topology. psci queries the platform to
* determine whether an affinity instance is present or absent. This caters for
* topologies where an intermediate affinity level instance is missing e.g.
* consider a platform which implements a single cluster with 4 cpus and there
* is another cpu sitting directly on the interconnect along with the cluster.
* The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
* cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
* 1 is however missing but needs to be accounted to reach this single cpu in
* the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
* applicable to the FVP but depicted as an example.
******************************************************************************/
unsigned int plat_get_aff_state(unsigned int aff_lvl,
unsigned long mpidr)
{
unsigned int aff_state = PSCI_AFF_ABSENT, idx;
idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(topology_setup_done == 1);
switch (aff_lvl) {
case 3:
case 2:
/* Report affinity levels 2 & 3 as absent */
break;
case 1:
aff_state = fvp_aff1_topology_map[idx].state;
break;
case 0:
/*
* First get start index of the aff0 in its array & then add
* to it the affinity id that we want the state of
*/
idx = fvp_aff1_topology_map[idx].child;
idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
aff_state = fvp_aff0_topology_map[idx].state;
break;
default:
assert(0);
}
return aff_state;
}
/*******************************************************************************
* Handy optimization to prevent the psci implementation from traversing through
* affinity levels which are not present while detecting the platform topology.
******************************************************************************/
int plat_get_max_afflvl()
{
return MPIDR_AFFLVL1;
}
/*******************************************************************************
* This function populates the FVP specific topology information depending upon
* the FVP flavour its running on. We construct all the mpidrs we can handle
* and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
******************************************************************************/
int plat_setup_topology()
{
unsigned char aff0, aff1, aff_state, aff0_offset = 0;
unsigned long mpidr;
topology_setup_done = 0;
for (aff1 = 0; aff1 < PLATFORM_CLUSTER_COUNT; aff1++) {
fvp_aff1_topology_map[aff1].child = aff0_offset;
fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
for (aff0 = 0; aff0 < PLATFORM_MAX_CPUS_PER_CLUSTER; aff0++) {
mpidr = aff1 << MPIDR_AFF1_SHIFT;
mpidr |= aff0 << MPIDR_AFF0_SHIFT;
if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
/*
* Presence of even a single aff0 indicates
* presence of parent aff1 on the FVP.
*/
aff_state = PSCI_AFF_PRESENT;
fvp_aff1_topology_map[aff1].state =
PSCI_AFF_PRESENT;
} else {
aff_state = PSCI_AFF_ABSENT;
}
fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
fvp_aff0_topology_map[aff0_offset].state = aff_state;
fvp_aff0_topology_map[aff0_offset].sibling =
aff0_offset + 1;
/* Increment the absolute number of aff0s traversed */
aff0_offset++;
}
/* Tie-off the last aff0 sibling to -1 to avoid overflow */
fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
}
/* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
topology_setup_done = 1;
return 0;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLATFORM_H__
#define __PLATFORM_H__
#include <arch.h>
#include <mmio.h>
#include <psci.h>
#include <bl_common.h>
/*******************************************************************************
* Platform binary types for linking
******************************************************************************/
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
/*******************************************************************************
* Generic platform constants
******************************************************************************/
#define PLATFORM_STACK_SIZE 0x800
#define FIRMWARE_WELCOME_STR "Booting trusted firmware boot loader stage 1\n\r"
#define BL2_IMAGE_NAME "bl2.bin"
#define BL31_IMAGE_NAME "bl31.bin"
#define NS_IMAGE_OFFSET FLASH0_BASE
#define PLATFORM_CACHE_LINE_SIZE 64
#define PLATFORM_CLUSTER_COUNT 2ull
#define PLATFORM_CLUSTER0_CORE_COUNT 4
#define PLATFORM_CLUSTER1_CORE_COUNT 4
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
PLATFORM_CLUSTER0_CORE_COUNT)
#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
#define PRIMARY_CPU 0x0
/* Constants for accessing platform configuration */
#define CONFIG_GICD_ADDR 0
#define CONFIG_GICC_ADDR 1
#define CONFIG_GICH_ADDR 2
#define CONFIG_GICV_ADDR 3
#define CONFIG_MAX_AFF0 4
#define CONFIG_MAX_AFF1 5
/* Indicate whether the CPUECTLR SMP bit should be enabled. */
#define CONFIG_CPU_SETUP 6
#define CONFIG_BASE_MMAP 7
#define CONFIG_LIMIT 8
/*******************************************************************************
* Platform memory map related constants
******************************************************************************/
#define TZROM_BASE 0x00000000
#define TZROM_SIZE 0x04000000
#define TZRAM_BASE 0x04000000
#define TZRAM_SIZE 0x40000
#define FLASH0_BASE 0x08000000
#define FLASH0_SIZE TZROM_SIZE
#define FLASH1_BASE 0x0c000000
#define FLASH1_SIZE 0x04000000
#define PSRAM_BASE 0x14000000
#define PSRAM_SIZE 0x04000000
#define VRAM_BASE 0x18000000
#define VRAM_SIZE 0x02000000
/* Aggregate of all devices in the first GB */
#define DEVICE0_BASE 0x1a000000
#define DEVICE0_SIZE 0x12200000
#define DEVICE1_BASE 0x2f000000
#define DEVICE1_SIZE 0x200000
#define NSRAM_BASE 0x2e000000
#define NSRAM_SIZE 0x10000
/* Location of trusted dram on the base fvp */
#define TZDRAM_BASE 0x06000000
#define TZDRAM_SIZE 0x02000000
#define MBOX_OFF 0x1000
#define AFFMAP_OFF 0x1200
#define DRAM_BASE 0x80000000ull
#define DRAM_SIZE 0x80000000ull
#define PCIE_EXP_BASE 0x40000000
#define TZRNG_BASE 0x7fe60000
#define TZNVCTR_BASE 0x7fe70000
#define TZROOTKEY_BASE 0x7fe80000
/* Memory mapped Generic timer interfaces */
#define SYS_CNTCTL_BASE 0x2a430000
#define SYS_CNTREAD_BASE 0x2a800000
#define SYS_TIMCTL_BASE 0x2a810000
/* Counter timer module offsets */
#define CNTNSAR 0x4
#define CNTNSAR_NS_SHIFT(x) x
#define CNTACR_BASE(x) (0x40 + (x << 2))
#define CNTACR_RPCT_SHIFT 0x0
#define CNTACR_RVCT_SHIFT 0x1
#define CNTACR_RFRQ_SHIFT 0x2
#define CNTACR_RVOFF_SHIFT 0x3
#define CNTACR_RWVT_SHIFT 0x4
#define CNTACR_RWPT_SHIFT 0x5
/* V2M motherboard system registers & offsets */
#define VE_SYSREGS_BASE 0x1c010000
#define V2M_SYS_ID 0x0
#define V2M_SYS_LED 0x8
#define V2M_SYS_CFGDATA 0xa0
#define V2M_SYS_CFGCTRL 0xa4
/*
* V2M sysled bit definitions. The values written to this
* register are defined in arch.h & runtime_svc.h. Only
* used by the primary cpu to diagnose any cold boot issues.
*
* SYS_LED[0] - Security state (S=0/NS=1)
* SYS_LED[2:1] - Exception Level (EL3-EL0)
* SYS_LED[7:3] - Exception Class (Sync/Async & origin)
*
*/
#define SYS_LED_SS_SHIFT 0x0
#define SYS_LED_EL_SHIFT 0x1
#define SYS_LED_EC_SHIFT 0x3
#define SYS_LED_SS_MASK 0x1
#define SYS_LED_EL_MASK 0x3
#define SYS_LED_EC_MASK 0x1f
/* V2M sysid register bits */
#define SYS_ID_REV_SHIFT 27
#define SYS_ID_HBI_SHIFT 16
#define SYS_ID_BLD_SHIFT 12
#define SYS_ID_ARCH_SHIFT 8
#define SYS_ID_FPGA_SHIFT 0
#define SYS_ID_REV_MASK 0xf
#define SYS_ID_HBI_MASK 0xfff
#define SYS_ID_BLD_MASK 0xf
#define SYS_ID_ARCH_MASK 0xf
#define SYS_ID_FPGA_MASK 0xff
#define SYS_ID_BLD_LENGTH 4
#define REV_FVP 0x0
#define HBI_FVP_BASE 0x020
#define HBI_FOUNDATION 0x010
#define BLD_GIC_VE_MMAP 0x0
#define BLD_GIC_A53A57_MMAP 0x1
#define ARCH_MODEL 0x1
/* FVP Power controller base address*/
#define PWRC_BASE 0x1c100000
/*******************************************************************************
* Platform specific per affinity states. Distinction between off and suspend
* is made to allow reporting of a suspended cpu as still being on e.g. in the
* affinity_info psci call.
******************************************************************************/
#define PLATFORM_MAX_AFF0 4
#define PLATFORM_MAX_AFF1 2
#define PLAT_AFF_UNK 0xff
#define PLAT_AFF0_OFF 0x0
#define PLAT_AFF0_ONPENDING 0x1
#define PLAT_AFF0_SUSPEND 0x2
#define PLAT_AFF0_ON 0x3
#define PLAT_AFF1_OFF 0x0
#define PLAT_AFF1_ONPENDING 0x1
#define PLAT_AFF1_SUSPEND 0x2
#define PLAT_AFF1_ON 0x3
/*******************************************************************************
* BL2 specific defines.
******************************************************************************/
#define BL2_BASE 0x0402D000
/*******************************************************************************
* BL31 specific defines.
******************************************************************************/
#define BL31_BASE 0x0400E000
/*******************************************************************************
* Platform specific page table and MMU setup constants
******************************************************************************/
#define EL3_ADDR_SPACE_SIZE (1ull << 32)
#define EL3_NUM_PAGETABLES 2
#define EL3_TROM_PAGETABLE 0
#define EL3_TRAM_PAGETABLE 1
#define ADDR_SPACE_SIZE (1ull << 32)
#define NUM_L2_PAGETABLES 2
#define GB1_L2_PAGETABLE 0
#define GB2_L2_PAGETABLE 1
#define NUM_L3_PAGETABLES 2
#define TZRAM_PAGETABLE 0
#define NSRAM_PAGETABLE 1
/*******************************************************************************
* CCI-400 related constants
******************************************************************************/
#define CCI400_BASE 0x2c090000
#define CCI400_SL_IFACE_CLUSTER0 3
#define CCI400_SL_IFACE_CLUSTER1 4
#define CCI400_SL_IFACE_INDEX(mpidr) (mpidr & MPIDR_CLUSTER_MASK ? \
CCI400_SL_IFACE_CLUSTER1 : \
CCI400_SL_IFACE_CLUSTER0)
/*******************************************************************************
* GIC-400 & interrupt handling related constants
******************************************************************************/
/* VE compatible GIC memory map */
#define VE_GICD_BASE 0x2c001000
#define VE_GICC_BASE 0x2c002000
#define VE_GICH_BASE 0x2c004000
#define VE_GICV_BASE 0x2c006000
/* Base FVP compatible GIC memory map */
#define BASE_GICD_BASE 0x2f000000
#define BASE_GICR_BASE 0x2f100000
#define BASE_GICC_BASE 0x2c000000
#define BASE_GICH_BASE 0x2c010000
#define BASE_GICV_BASE 0x2c02f000
#define IRQ_TZ_WDOG 56
#define IRQ_SEC_PHY_TIMER 29
#define IRQ_SEC_SGI_0 8
#define IRQ_SEC_SGI_1 9
#define IRQ_SEC_SGI_2 10
#define IRQ_SEC_SGI_3 11
#define IRQ_SEC_SGI_4 12
#define IRQ_SEC_SGI_5 13
#define IRQ_SEC_SGI_6 14
#define IRQ_SEC_SGI_7 15
#define IRQ_SEC_SGI_8 16
/*******************************************************************************
* PL011 related constants
******************************************************************************/
#define PL011_BASE 0x1c090000
/*******************************************************************************
* Declarations and constants to access the mailboxes safely. Each mailbox is
* aligned on the biggest cache line size in the platform. This is known only
* to the platform as it might have a combination of integrated and external
* caches. Such alignment ensures that two maiboxes do not sit on the same cache
* line at any cache level. They could belong to different cpus/clusters &
* get written while being protected by different locks causing corruption of
* a valid mailbox address.
******************************************************************************/
#define CACHE_WRITEBACK_SHIFT 6
#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
#ifndef __ASSEMBLY__
typedef volatile struct {
unsigned long value
__attribute__((__aligned__(CACHE_WRITEBACK_GRANULE)));
} mailbox;
/*******************************************************************************
* Function and variable prototypes
******************************************************************************/
extern unsigned long *bl1_normal_ram_base;
extern unsigned long *bl1_normal_ram_len;
extern unsigned long *bl1_normal_ram_limit;
extern unsigned long *bl1_normal_ram_zi_base;
extern unsigned long *bl1_normal_ram_zi_len;
extern unsigned long *bl1_coherent_ram_base;
extern unsigned long *bl1_coherent_ram_len;
extern unsigned long *bl1_coherent_ram_limit;
extern unsigned long *bl1_coherent_ram_zi_base;
extern unsigned long *bl1_coherent_ram_zi_len;
extern unsigned long warm_boot_entrypoint;
extern void bl1_plat_arch_setup(void);
extern void bl2_plat_arch_setup(void);
extern void bl31_plat_arch_setup(void);
extern int platform_setup_pm(plat_pm_ops **);
extern unsigned int platform_get_core_pos(unsigned long mpidr);
extern void disable_mmu(void);
extern void enable_mmu(void);
extern void configure_mmu(meminfo *,
unsigned long,
unsigned long,
unsigned long,
unsigned long);
extern unsigned long platform_get_cfgvar(unsigned int);
extern int platform_config_setup(void);
extern void plat_report_exception(unsigned long);
extern unsigned long plat_get_ns_image_entrypoint(void);
/* Declarations for fvp_topology.c */
extern int plat_setup_topology(void);
extern int plat_get_max_afflvl(void);
extern unsigned int plat_get_aff_count(unsigned int, unsigned long);
extern unsigned int plat_get_aff_state(unsigned int, unsigned long);
#endif /*__ASSEMBLY__*/
#endif /* __PLATFORM_H__ */
ARM Trusted Firmware - version 0.2
==================================
ARM Trusted Firmware provides a reference implementation of secure world
software for [ARMv8], including Exception Level 3 (EL3) software. This first
release focuses on support for ARM's [Fixed Virtual Platforms (FVPs)] [FVP].
The intent is to provide a reference implementation of various ARM interface
standards, such as the Power State Coordination Interface ([PSCI]), Trusted
Board Boot Requirements (TBBR) and [Secure Monitor] [TEE-SMC] code. As far as
possible the code is designed for reuse or porting to other ARMv8 model and
hardware platforms.
This release is the first one as source code: an initial prototype
release was available in binary form in the [Linaro AArch64 OpenEmbedded
Engineering Build] [AArch64 LEB] to support the new FVP Base platform
models from ARM.
ARM will continue development in collaboration with interested parties to
provide a full reference implementation of PSCI, TBBR and Secure Monitor code
to the benefit of all developers working with ARMv8 TrustZone software.
License
-------
The software is provided under a BSD 3-Clause [license]. Certain source files
are derived from FreeBSD code: the original license is included in these
source files.
This Release
------------
This software is an early implementation of the Trusted Firmware. Only
limited functionality is provided at present and it has not been optimized or
subjected to extended robustness or stress testing.
### Functionality
* Initial implementation of a subset of the Trusted Board Boot Requirements
Platform Design Document (PDD).
* Initializes the secure world (for example, exception vectors, control
registers, GIC and interrupts for the platform), before transitioning into
the normal world.
* Supports both GICv2 and GICv3 initialization for use by normal world
software.
* Starts the normal world at the highest available Exception Level: EL2
if available, otherwise EL1.
* Handles SMCs (Secure Monitor Calls) conforming to the [SMC Calling
Convention PDD] [SMCCC].
* Handles SMCs relating to the [Power State Coordination Interface PDD] [PSCI]
for the Secondary CPU Boot and CPU hotplug use-cases.
For a full list of updated functionality and implementation details, please
see the [User Guide]. The [Change Log] provides details of changes made
since the last release.
### Platforms
This release of the Trusted Firmware has been tested on the following ARM
[FVP]s (64-bit versions only):
* `FVP_Base_AEMv8A-AEMv8A` (Version 5.1 build 8).
* `FVP_Base_Cortex-A57x4-A53x4` (Version 5.1 build 8).
* `FVP_Base_Cortex-A57x1-A53x1` (Version 5.1 build 8).
These models can be licensed from ARM: see [www.arm.com/fvp] [FVP]
### Still to Come
* Complete implementation of the [PSCI] specification.
* Secure memory, Secure monitor, Test Secure OS & Secure interrupts.
* Booting the firmware from a block device.
* Completing the currently experimental GICv3 support.
For a full list of detailed issues in the current code, please see the [Change
Log].
Getting Started
---------------
Get the Trusted Firmware source code from
[GitHub](https://www.github.com/ARM-software/arm-trusted-firmware).
See the [User Guide] for instructions on how to install, build and use
the Trusted Firmware with the ARM [FVP]s.
See the [Porting Guide] as well for information about how to use this
software on another ARMv8 platform.
### Feedback and support
ARM welcomes any feedback on the Trusted Firmware. Please send feedback using
the [GitHub issue tracker](
https://github.com/ARM-software/arm-trusted-firmware/issues).
ARM licensees may contact ARM directly via their partner managers.
- - - - - - - - - - - - - - - - - - - - - - - - - -
_Copyright (c) 2013 ARM Ltd. All rights reserved._
[License]: license.md "BSD license for ARM Trusted Firmware"
[Change Log]: ./docs/change-log.md
[User Guide]: ./docs/user-guide.md
[Porting Guide]: ./docs/porting-guide.md
[ARMv8]: http://www.arm.com/products/processors/armv8-architecture.php "ARMv8 Architecture"
[FVP]: http://www.arm.com/fvp "ARM's Fixed Virtual Platforms"
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022b/index.html "Power State Coordination Interface PDD (ARM DEN 0022B.b)"
[SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
[TEE-SMC]: http://www.arm.com/products/processors/technologies/trustzone/tee-smc.php "Secure Monitor and TEEs"
[AArch64 LEB]: http://releases.linaro.org/13.09/openembedded/aarch64 "Linaro AArch64 OpenEmbedded ARM Fast Model 13.09 Release"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment