Preliminary Rust support (#1781)

* Add support for R_ARM_THM_MOVW_ABS_NC/THM_MOVT_ABS

These are sometimes emitted by the Rust LLVM compiler.

Ref: https://github.com/ARM-software/abi-aa/blob/main/aaelf32/aaelf32.rst#56relocation

* Discard LLVM bitcode from extension applications

LLVM-based compilers may include uncompressed bitcode in object files
to help with link-time optimization. However this can bloat binary sizes
from KB to MB.

* Expose alligned_malloc/free functions to applications

This is required to implement a global allocator in Rust.
This commit is contained in:
David Coles 2022-09-25 14:06:46 -07:00 committed by GitHub
parent 2a2078d9b5
commit a6b98ccbbe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 47 additions and 2 deletions

View File

@ -433,8 +433,8 @@ Function,-,acoshl,long double,long double
Function,-,acosl,long double,long double
Function,+,acquire_mutex,void*,"ValueMutex*, uint32_t"
Function,-,aligned_alloc,void*,"size_t, size_t"
Function,-,aligned_free,void,void*
Function,-,aligned_malloc,void*,"size_t, size_t"
Function,+,aligned_free,void,void*
Function,+,aligned_malloc,void*,"size_t, size_t"
Function,-,arc4random,__uint32_t,
Function,-,arc4random_buf,void,"void*, size_t"
Function,-,arc4random_uniform,__uint32_t,__uint32_t

1 entry status name type params
433 Function - acosl long double long double
434 Function + acquire_mutex void* ValueMutex*, uint32_t
435 Function - aligned_alloc void* size_t, size_t
436 Function - + aligned_free void void*
437 Function - + aligned_malloc void* size_t, size_t
438 Function - arc4random __uint32_t
439 Function - arc4random_buf void void*, size_t
440 Function - arc4random_uniform __uint32_t __uint32_t

View File

@ -48,5 +48,7 @@ SECTIONS
{
*(.comment)
*(.comment.*)
*(.llvmbc)
*(.llvmcmd)
}
}

View File

@ -1116,6 +1116,8 @@ typedef struct {
#define R_ARM_LDR_SBREL_11_0 35
#define R_ARM_ALU_SBREL_19_12 36
#define R_ARM_ALU_SBREL_27_20 37
#define R_ARM_THM_MOVW_ABS_NC 47 /* Direct 16 bit (Thumb32 MOVW) */
#define R_ARM_THM_MOVT_ABS 48 /* Direct high 16 bit */
#define R_ARM_GNU_VTENTRY 100
#define R_ARM_GNU_VTINHERIT 101
#define R_ARM_THM_PC11 102 /* thumb unconditional branch */

View File

@ -255,6 +255,42 @@ static void elf_relocate_jmp_call(ELFFile* elf, Elf32_Addr relAddr, int type, El
(uint16_t)((lo & 0xc000) | (j1 << 13) | blx_bit | (j2 << 11) | imm11);
}
static void elf_relocate_mov(Elf32_Addr relAddr, int type, Elf32_Addr symAddr) {
uint16_t upper_insn = ((uint16_t*)relAddr)[0];
uint16_t lower_insn = ((uint16_t*)relAddr)[1];
/* MOV*<C> <Rd>,#<imm16>
*
* i = upper[10]
* imm4 = upper[3:0]
* imm3 = lower[14:12]
* imm8 = lower[7:0]
*
* imm16 = imm4:i:imm3:imm8
*/
uint32_t i = (upper_insn >> 10) & 1; /* upper[10] */
uint32_t imm4 = upper_insn & 0x000F; /* upper[3:0] */
uint32_t imm3 = (lower_insn >> 12) & 0x7; /* lower[14:12] */
uint32_t imm8 = lower_insn & 0x00FF; /* lower[7:0] */
int32_t addend = (imm4 << 12) | (i << 11) | (imm3 << 8) | imm8; /* imm16 */
uint32_t addr = (symAddr + addend);
if (type == R_ARM_THM_MOVT_ABS) {
addr >>= 16; /* upper 16 bits */
} else {
addr &= 0x0000FFFF; /* lower 16 bits */
}
/* Re-encode */
((uint16_t*)relAddr)[0] = (upper_insn & 0xFBF0)
| (((addr >> 11) & 1) << 10) /* i */
| ((addr >> 12) & 0x000F); /* imm4 */
((uint16_t*)relAddr)[1] = (lower_insn & 0x8F00)
| (((addr >> 8) & 0x7) << 12) /* imm3 */
| (addr & 0x00FF); /* imm8 */
}
static bool elf_relocate_symbol(ELFFile* elf, Elf32_Addr relAddr, int type, Elf32_Addr symAddr) {
switch(type) {
case R_ARM_TARGET1:
@ -268,6 +304,11 @@ static bool elf_relocate_symbol(ELFFile* elf, Elf32_Addr relAddr, int type, Elf3
FURI_LOG_D(
TAG, " R_ARM_THM_CALL/JMP relocated is 0x%08X", (unsigned int)*((uint32_t*)relAddr));
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
elf_relocate_mov(relAddr, type, symAddr);
FURI_LOG_D(TAG, " R_ARM_THM_MOVW_ABS_NC/MOVT_ABS relocated is 0x%08X", (unsigned int)*((uint32_t*)relAddr));
break;
default:
FURI_LOG_E(TAG, " Undefined relocation %d", type);
return false;