Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
d0759c2c
Unverified
Commit
d0759c2c
authored
6 years ago
by
Soby Mathew
Committed by
GitHub
6 years ago
Browse files
Options
Download
Plain Diff
Merge pull request #1843 from DavidPu/xlat_tables_v2_non_recursion
Remove recursion from xlat_tables_v2 library
parents
77913d44
c54c7fc3
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
lib/xlat_tables_v2/xlat_tables_core.c
+230
-98
lib/xlat_tables_v2/xlat_tables_core.c
lib/xlat_tables_v2/xlat_tables_utils.c
+111
-55
lib/xlat_tables_v2/xlat_tables_utils.c
with
341 additions
and
153 deletions
+341
-153
lib/xlat_tables_v2/xlat_tables_core.c
View file @
d0759c2c
...
...
@@ -325,8 +325,9 @@ static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
return
action
;
}
/*
*
Recursive f
unction that writes to the translation tables and unmaps the
*
F
unction that writes to the translation tables and unmaps the
* specified region.
*/
static
void
xlat_tables_unmap_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
...
...
@@ -337,70 +338,137 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
{
assert
((
level
>=
ctx
->
base_level
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
));
uint64_t
*
subtable
;
uint64_t
desc
;
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be used to restore previous level
* after finish subtable iteration.
*/
struct
desc_table_unmap
{
uint64_t
*
table_base
;
uintptr_t
table_idx_va
;
unsigned
int
idx
;
}
desc_tables
[
XLAT_TABLE_LEVEL_MAX
+
1
]
=
{
{
NULL
,
0U
,
XLAT_TABLE_ENTRIES
},
};
unsigned
int
this_level
=
level
;
uint64_t
*
this_base
=
table_base
;
unsigned
int
max_entries
=
table_entries
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
unsigned
int
table_idx
;
uintptr_t
table_idx_va
;
uintptr_t
table_idx_end_va
;
/* End VA of this entry */
uintptr_t
region_end_va
=
mm
->
base_va
+
mm
->
size
-
1U
;
unsigned
int
table_idx
;
table_idx_va
=
xlat_tables_find_start_va
(
mm
,
table_base_va
,
level
);
table_idx
=
xlat_tables_va_to_index
(
table_base_va
,
table_idx_va
,
level
);
while
(
t
able_idx
<
table_entries
)
{
while
(
t
his_base
!=
NULL
)
{
table_idx_end_va
=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
;
uint64_t
desc
;
uint64_t
desc_type
;
uintptr_t
table_idx_end_va
;
/* End VA of this entry */
action_t
action
;
desc
=
table_base
[
table_idx
];
uint64_t
desc_type
=
desc
&
DESC_MASK
;
/* finish current xlat level iteration. */
if
(
table_idx
>=
max_entries
)
{
if
(
this_level
>
ctx
->
base_level
)
{
xlat_table_dec_regions_count
(
ctx
,
this_base
);
}
action_t
action
=
xlat_tables_unmap_region_action
(
mm
,
table_idx_va
,
table_idx_end_va
,
level
,
desc_type
);
if
(
this_level
>
level
)
{
uint64_t
*
subtable
;
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
/* back from subtable iteration, restore
* previous DESC_TABLE entry.
*/
this_level
--
;
this_base
=
desc_tables
[
this_level
].
table_base
;
table_idx
=
desc_tables
[
this_level
].
idx
;
table_idx_va
=
desc_tables
[
this_level
].
table_idx_va
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
if
(
this_level
==
level
)
{
max_entries
=
table_entries
;
}
else
{
max_entries
=
XLAT_TABLE_ENTRIES
;
}
desc
=
this_base
[
table_idx
];
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/*
* If the subtable is now empty, remove its reference.
*/
if
(
xlat_table_is_empty
(
ctx
,
subtable
))
{
this_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
}
table_idx
++
;
table_idx_va
+=
level_size
;
}
else
{
/* reached end of top level, exit.*/
this_base
=
NULL
;
break
;
}
}
/* If reached the end of the region, stop iterating entries in
* current xlat level.
*/
if
(
region_end_va
<=
table_idx_va
)
{
table_idx
=
max_entries
;
continue
;
}
table_idx_end_va
=
table_idx_va
+
XLAT_BLOCK_SIZE
(
this_level
)
-
1U
;
desc
=
this_base
[
table_idx
];
desc_type
=
desc
&
DESC_MASK
;
table_base
[
table_idx
]
=
INVALID_DESC
;
action
=
xlat_tables_unmap_region_action
(
mm
,
table_idx_va
,
table_idx_end_va
,
this_level
,
desc_type
);
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
this_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
table_idx
++
;
table_idx_va
+=
level_size
;
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
uint64_t
*
subtable
;
uintptr_t
base_va
;
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/* Recurse to write into subtable */
xlat_tables_unmap_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
/*
* If the subtable is now empty, remove its reference.
*/
if
(
xlat_table_is_empty
(
ctx
,
subtable
))
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
}
desc_tables
[
this_level
].
table_base
=
this_base
;
desc_tables
[
this_level
].
table_idx_va
=
table_idx_va
;
base_va
=
table_idx_va
;
desc_tables
[
this_level
].
idx
=
table_idx
;
this_base
=
subtable
;
this_level
++
;
max_entries
=
XLAT_TABLE_ENTRIES
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
table_idx_va
=
xlat_tables_find_start_va
(
mm
,
base_va
,
this_level
);
table_idx
=
xlat_tables_va_to_index
(
base_va
,
table_idx_va
,
this_level
);
}
else
{
assert
(
action
==
ACTION_NONE
);
}
table_idx
++
;
table_idx_va
+=
XLAT_BLOCK_SIZE
(
level
);
/* If reached the end of the region, exit */
if
(
region_end
_va
<
=
table_idx_va
)
break
;
table_idx
++
;
table_idx
_va
+
=
level_size
;
}
}
if
(
level
>
ctx
->
base_level
)
xlat_table_dec_regions_count
(
ctx
,
table_base
);
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
...
...
@@ -537,105 +605,169 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
}
/*
*
Recursive f
unction that writes to the translation tables and maps the
*
F
unction that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static
uintptr_t
xlat_tables_map_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
uintptr_t
table_base_va
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
unsigned
int
table_entries
,
unsigned
int
level
)
{
assert
((
level
>=
ctx
->
base_level
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
));
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be used to restore previous level
* after finish subtable iteration.
*/
struct
desc_table_map
{
uint64_t
*
table_base
;
uintptr_t
table_idx_va
;
unsigned
int
idx
;
}
desc_tables
[
XLAT_TABLE_LEVEL_MAX
+
1
]
=
{
{
NULL
,
0U
,
XLAT_TABLE_ENTRIES
},
};
unsigned
int
this_level
=
level
;
uint64_t
*
this_base
=
table_base
;
unsigned
int
max_entries
=
table_entries
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1U
;
uintptr_t
table_idx_va
;
unsigned
long
long
table_idx_pa
;
uint64_t
*
subtable
;
uint64_t
desc
;
unsigned
int
table_idx
;
table_idx_va
=
xlat_tables_find_start_va
(
mm
,
table_base_va
,
level
);
table_idx
=
xlat_tables_va_to_index
(
table_base_va
,
table_idx_va
,
level
);
#if PLAT_XLAT_TABLES_DYNAMIC
if
(
level
>
ctx
->
base_level
)
xlat_table_inc_regions_count
(
ctx
,
table_base
);
while
(
this_base
!=
NULL
)
{
uint64_t
desc
;
uint64_t
desc_type
;
unsigned
long
long
table_idx_pa
;
action_t
action
;
/* finish current xlat level iteration. */
if
(
table_idx
>=
max_entries
)
{
if
(
this_level
<=
level
)
{
this_base
=
NULL
;
break
;
}
else
{
/* back from subtable iteration, restore
* previous DESC_TABLE entry.
*/
this_level
--
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
this_base
=
desc_tables
[
this_level
].
table_base
;
table_idx
=
desc_tables
[
this_level
].
idx
;
if
(
this_level
==
level
)
{
max_entries
=
table_entries
;
}
else
{
max_entries
=
XLAT_TABLE_ENTRIES
;
}
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
uintptr_t
subtable
;
desc
=
this_base
[
table_idx
];
subtable
=
(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
xlat_clean_dcache_range
(
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
while
(
table_idx
<
table_entries
)
{
table_idx
++
;
table_idx_va
=
desc_tables
[
this_level
].
table_idx_va
+
level_size
;
}
}
desc
=
table_base
[
table_idx
];
desc
=
this_base
[
table_idx
];
desc_type
=
desc
&
DESC_MASK
;
table_idx_pa
=
mm
->
base_pa
+
table_idx_va
-
mm
->
base_va
;
action_t
action
=
xlat_tables_map_region_action
(
mm
,
(
uint32_t
)(
desc
&
DESC_MASK
),
table_idx_pa
,
table_idx_va
,
level
);
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
/* If reached the end of the region, simply exit since we
* already write all BLOCK entries and create all required
* subtables.
*/
if
(
mm_end_va
<=
table_idx_va
)
{
this_base
=
NULL
;
break
;
}
table_base
[
table_idx
]
=
xlat_desc
(
ctx
,
(
uint32_t
)
mm
->
attr
,
table_idx_pa
,
level
);
action
=
xlat_tables_map_region_action
(
mm
,
desc_type
,
table_idx_pa
,
table_idx_va
,
this_level
);
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
this_base
[
table_idx
]
=
xlat_desc
(
ctx
,
mm
->
attr
,
table_idx_pa
,
this_level
);
table_idx
++
;
table_idx_va
+=
level_size
;
}
else
if
(
action
==
ACTION_CREATE_NEW_TABLE
)
{
uintptr_t
end_va
;
subtable
=
xlat_table_get_empty
(
ctx
);
uintptr_t
base_va
;
uint64_t
*
subtable
=
xlat_table_get_empty
(
ctx
);
if
(
subtable
==
NULL
)
{
/* Not enough free tables to map this region */
/* Not enough free tables to map this region
.
*/
return
table_idx_va
;
}
/* Point to new subtable from this one. */
table_base
[
table_idx
]
=
TABLE_DESC
|
(
unsigned
long
)
subtable
;
this_base
[
table_idx
]
=
TABLE_DESC
|
(
unsigned
long
)
subtable
;
desc_tables
[
this_level
].
table_base
=
this_base
;
desc_tables
[
this_level
].
table_idx_va
=
table_idx_va
;
desc_tables
[
this_level
].
idx
=
table_idx
;
base_va
=
table_idx_va
;
this_level
++
;
this_base
=
subtable
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
table_idx_va
=
xlat_tables_find_start_va
(
mm
,
base_va
,
this_level
);
table_idx
=
xlat_tables_va_to_index
(
base_va
,
table_idx_va
,
this_level
);
max_entries
=
XLAT_TABLE_ENTRIES
;
/* Recurse to write into subtable */
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#if PLAT_XLAT_TABLES_DYNAMIC
if
(
this_level
>
ctx
->
base_level
)
{
xlat_table_inc_regions_count
(
ctx
,
subtable
);
}
#endif
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
uintptr_t
end_va
;
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/* Recurse to write into subtable */
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
uintptr_t
base_va
;
uint64_t
*
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
}
else
{
desc_tables
[
this_level
].
table_base
=
this_base
;
desc_tables
[
this_level
].
table_idx_va
=
table_idx_va
;
desc_tables
[
this_level
].
idx
=
table_idx
;
base_va
=
table_idx_va
;
assert
(
action
==
ACTION_NONE
);
this_level
++
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
table_idx_va
=
xlat_tables_find_start_va
(
mm
,
base_va
,
this_level
);
table_idx
=
xlat_tables_va_to_index
(
base_va
,
table_idx_va
,
this_level
);
this_base
=
subtable
;
max_entries
=
XLAT_TABLE_ENTRIES
;
#if PLAT_XLAT_TABLES_DYNAMIC
if
(
this_level
>
ctx
->
base_level
)
{
xlat_table_inc_regions_count
(
ctx
,
subtable
);
}
#endif
}
else
{
assert
(
action
==
ACTION_NONE
);
table_idx
++
;
table_idx_va
+=
level_size
;
}
table_idx
++
;
table_idx_va
+=
XLAT_BLOCK_SIZE
(
level
);
/* If reached the end of the region, exit */
if
(
mm_end_va
<=
table_idx_va
)
break
;
}
return
table_idx_va
-
1U
;
...
...
This diff is collapsed.
Click to expand it.
lib/xlat_tables_v2/xlat_tables_utils.c
View file @
d0759c2c
...
...
@@ -109,7 +109,7 @@ static const char *invalid_descriptors_ommited =
"%s(%d invalid descriptors omitted)
\n
"
;
/*
*
Recursive f
unction that reads the translation tables passed as an argument
*
F
unction that reads the translation tables passed as an argument
* and prints their status.
*/
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
uintptr_t
table_base_va
,
...
...
@@ -118,10 +118,23 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
{
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
desc
;
uintptr_t
table_idx_va
=
table_base_va
;
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be restored after return from
* subtable iteration.
*/
struct
desc_table
{
const
uint64_t
*
table_base
;
uintptr_t
table_idx_va
;
unsigned
int
idx
;
}
desc_tables
[
XLAT_TABLE_LEVEL_MAX
+
1
]
=
{
{
NULL
,
0U
,
XLAT_TABLE_ENTRIES
},
};
unsigned
int
this_level
=
level
;
const
uint64_t
*
this_base
=
table_base
;
unsigned
int
max_entries
=
table_entries
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
unsigned
int
table_idx
=
0U
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
)
;
uintptr_t
table_idx_va
=
table_base_va
;
/*
* Keep track of how many invalid descriptors are counted in a row.
...
...
@@ -131,67 +144,110 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
*/
int
invalid_row_count
=
0
;
while
(
table_idx
<
table_entries
)
{
desc
=
table_base
[
table_idx
];
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
printf
(
"%sVA:0x%lx size:0x%zx
\n
"
,
level_spacers
[
level
],
table_idx_va
,
level_size
);
}
invalid_row_count
++
;
}
else
{
while
(
this_base
!=
NULL
)
{
/* finish current xlat level */
if
(
table_idx
>=
max_entries
)
{
if
(
invalid_row_count
>
1
)
{
printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
level_spacers
[
this_
level
],
invalid_row_count
-
1
);
}
invalid_row_count
=
0
;
/*
* Check if this is a table or a block. Tables are only
* allowed in levels other than 3, but DESC_PAGE has the
* same value as DESC_TABLE, so we need to check.
*/
if
(((
desc
&
DESC_MASK
)
==
TABLE_DESC
)
&&
(
level
<
XLAT_TABLE_LEVEL_MAX
))
{
/* no parent level to iterate. */
if
(
this_level
<=
level
)
{
this_base
=
NULL
;
table_idx
=
max_entries
+
1
;
}
else
{
/* retore previous DESC_TABLE entry and start
* to iterate.
*/
this_level
--
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
this_base
=
desc_tables
[
this_level
].
table_base
;
table_idx
=
desc_tables
[
this_level
].
idx
;
table_idx_va
=
desc_tables
[
this_level
].
table_idx_va
;
if
(
this_level
==
level
)
{
max_entries
=
table_entries
;
}
else
{
max_entries
=
XLAT_TABLE_ENTRIES
;
}
assert
(
this_base
!=
NULL
);
}
}
else
{
uint64_t
desc
=
this_base
[
table_idx
];
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
printf
(
"%sVA:0x%lx size:0x%zx
\n
"
,
level_spacers
[
this_level
],
table_idx_va
,
level_size
);
}
invalid_row_count
++
;
table_idx
++
;
table_idx_va
+=
level_size
;
}
else
{
if
(
invalid_row_count
>
1
)
{
printf
(
invalid_descriptors_ommited
,
level_spacers
[
this_level
],
invalid_row_count
-
1
);
}
invalid_row_count
=
0
;
/*
*
Do not print any PA for a table descriptor,
* a
s it doesn't directly map physical memory
*
but instead points to the next translation
*
table in the translation table wal
k.
*
Check if this is a table or a block. Tables
* a
re only allowed in levels other than 3, but
*
DESC_PAGE has the same value as DESC_TABLE,
*
so we need to chec
k.
*/
printf
(
"%sVA:0x%lx size:0x%zx
\n
"
,
level_spacers
[
level
],
table_idx_va
,
level_size
);
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
xlat_tables_print_internal
(
ctx
,
table_idx_va
,
(
uint64_t
*
)
addr_inner
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
}
else
{
printf
(
"%sVA:0x%lx PA:0x%llx size:0x%zx "
,
level_spacers
[
level
],
table_idx_va
,
(
uint64_t
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
xlat_desc_print
(
ctx
,
desc
);
printf
(
"
\n
"
);
if
(((
desc
&
DESC_MASK
)
==
TABLE_DESC
)
&&
(
this_level
<
XLAT_TABLE_LEVEL_MAX
))
{
uintptr_t
addr_inner
;
/*
* Do not print any PA for a table
* descriptor, as it doesn't directly
* map physical memory but instead
* points to the next translation
* table in the translation table walk.
*/
printf
(
"%sVA:0x%lx size:0x%zx
\n
"
,
level_spacers
[
this_level
],
table_idx_va
,
level_size
);
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
/* save current xlat level */
desc_tables
[
this_level
].
table_base
=
this_base
;
desc_tables
[
this_level
].
idx
=
table_idx
+
1
;
desc_tables
[
this_level
].
table_idx_va
=
table_idx_va
+
level_size
;
/* start iterating next level entries */
this_base
=
(
uint64_t
*
)
addr_inner
;
max_entries
=
XLAT_TABLE_ENTRIES
;
this_level
++
;
level_size
=
XLAT_BLOCK_SIZE
(
this_level
);
table_idx
=
0U
;
}
else
{
printf
(
"%sVA:0x%lx PA:0x%llx size:0x%zx "
,
level_spacers
[
this_level
],
table_idx_va
,
(
uint64_t
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
xlat_desc_print
(
ctx
,
desc
);
printf
(
"
\n
"
);
table_idx
++
;
table_idx_va
+=
level_size
;
}
}
}
table_idx
++
;
table_idx_va
+=
level_size
;
}
if
(
invalid_row_count
>
1
)
{
printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
}
}
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment
Menu
Projects
Groups
Snippets
Help