1
0
Fork 0

sparc64: Add eBPF JIT.

This is an eBPF JIT for sparc64.  All major features are supported.

All tests under tools/testing/selftests/bpf/ pass.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2017-04-17 18:44:36 -07:00
parent 6b3d4eec7f
commit 7a12b5031c
7 changed files with 1424 additions and 60 deletions

View File

@ -31,7 +31,8 @@ config SPARC
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_CBPF_JIT
select HAVE_CBPF_JIT if SPARC32
select HAVE_EBPF_JIT if SPARC64
select HAVE_DEBUG_BUGVERBOSE
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CLOCKEVENTS

View File

@ -39,7 +39,7 @@
#define r_TMP2 G2
#define r_OFF G3
/* assembly code in arch/sparc/net/bpf_jit_asm.S */
/* assembly code in arch/sparc/net/bpf_jit_asm_32.S */
extern u32 bpf_jit_load_word[];
extern u32 bpf_jit_load_half[];
extern u32 bpf_jit_load_byte[];

View File

@ -0,0 +1,66 @@
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
#ifndef __ASSEMBLER__
#define G0 0x00
#define G1 0x01
#define G2 0x02
#define G3 0x03
#define G6 0x06
#define G7 0x07
#define O0 0x08
#define O1 0x09
#define O2 0x0a
#define O3 0x0b
#define O4 0x0c
#define O5 0x0d
#define SP 0x0e
#define O7 0x0f
#define L0 0x10
#define L1 0x11
#define L2 0x12
#define L3 0x13
#define L4 0x14
#define L5 0x15
#define L6 0x16
#define L7 0x17
#define I0 0x18
#define I1 0x19
#define I2 0x1a
#define I3 0x1b
#define I4 0x1c
#define I5 0x1d
#define FP 0x1e
#define I7 0x1f
#define r_SKB L0
#define r_HEADLEN L4
#define r_SKB_DATA L5
#define r_TMP G1
#define r_TMP2 G3
/* assembly code in arch/sparc/net/bpf_jit_asm_64.S */
extern u32 bpf_jit_load_word[];
extern u32 bpf_jit_load_half[];
extern u32 bpf_jit_load_byte[];
extern u32 bpf_jit_load_byte_msh[];
extern u32 bpf_jit_load_word_positive_offset[];
extern u32 bpf_jit_load_half_positive_offset[];
extern u32 bpf_jit_load_byte_positive_offset[];
extern u32 bpf_jit_load_byte_msh_positive_offset[];
extern u32 bpf_jit_load_word_negative_offset[];
extern u32 bpf_jit_load_half_negative_offset[];
extern u32 bpf_jit_load_byte_negative_offset[];
extern u32 bpf_jit_load_byte_msh_negative_offset[];
#else
#define r_RESULT %o0
#define r_SKB %o0
#define r_OFF %o1
#define r_HEADLEN %l4
#define r_SKB_DATA %l5
#define r_TMP %g1
#define r_TMP2 %g3
#endif
#endif /* _BPF_JIT_H */

View File

@ -2,17 +2,10 @@
#include "bpf_jit_32.h"
#ifdef CONFIG_SPARC64
#define SAVE_SZ 176
#define SCRATCH_OFF STACK_BIAS + 128
#define BE_PTR(label) be,pn %xcc, label
#define SIGN_EXTEND(reg) sra reg, 0, reg
#else
#define SAVE_SZ 96
#define SCRATCH_OFF 72
#define BE_PTR(label) be label
#define SIGN_EXTEND(reg)
#endif
#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */

View File

@ -1 +1,161 @@
#include "bpf_jit_asm_32.S"
#include <asm/ptrace.h>
#include "bpf_jit_64.h"
#define SAVE_SZ 176
#define SCRATCH_OFF STACK_BIAS + 128
#define BE_PTR(label) be,pn %xcc, label
#define SIGN_EXTEND(reg) sra reg, 0, reg
#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
.text
.globl bpf_jit_load_word
bpf_jit_load_word:
cmp r_OFF, 0
bl bpf_slow_path_word_neg
nop
.globl bpf_jit_load_word_positive_offset
bpf_jit_load_word_positive_offset:
sub r_HEADLEN, r_OFF, r_TMP
cmp r_TMP, 3
ble bpf_slow_path_word
add r_SKB_DATA, r_OFF, r_TMP
andcc r_TMP, 3, %g0
bne load_word_unaligned
nop
retl
ld [r_TMP], r_RESULT
load_word_unaligned:
ldub [r_TMP + 0x0], r_OFF
ldub [r_TMP + 0x1], r_TMP2
sll r_OFF, 8, r_OFF
or r_OFF, r_TMP2, r_OFF
ldub [r_TMP + 0x2], r_TMP2
sll r_OFF, 8, r_OFF
or r_OFF, r_TMP2, r_OFF
ldub [r_TMP + 0x3], r_TMP2
sll r_OFF, 8, r_OFF
retl
or r_OFF, r_TMP2, r_RESULT
.globl bpf_jit_load_half
bpf_jit_load_half:
cmp r_OFF, 0
bl bpf_slow_path_half_neg
nop
.globl bpf_jit_load_half_positive_offset
bpf_jit_load_half_positive_offset:
sub r_HEADLEN, r_OFF, r_TMP
cmp r_TMP, 1
ble bpf_slow_path_half
add r_SKB_DATA, r_OFF, r_TMP
andcc r_TMP, 1, %g0
bne load_half_unaligned
nop
retl
lduh [r_TMP], r_RESULT
load_half_unaligned:
ldub [r_TMP + 0x0], r_OFF
ldub [r_TMP + 0x1], r_TMP2
sll r_OFF, 8, r_OFF
retl
or r_OFF, r_TMP2, r_RESULT
.globl bpf_jit_load_byte
bpf_jit_load_byte:
cmp r_OFF, 0
bl bpf_slow_path_byte_neg
nop
.globl bpf_jit_load_byte_positive_offset
bpf_jit_load_byte_positive_offset:
cmp r_OFF, r_HEADLEN
bge bpf_slow_path_byte
nop
retl
ldub [r_SKB_DATA + r_OFF], r_RESULT
#define bpf_slow_path_common(LEN) \
save %sp, -SAVE_SZ, %sp; \
mov %i0, %o0; \
mov %i1, %o1; \
add %fp, SCRATCH_OFF, %o2; \
call skb_copy_bits; \
mov (LEN), %o3; \
cmp %o0, 0; \
restore;
bpf_slow_path_word:
bpf_slow_path_common(4)
bl bpf_error
ld [%sp + SCRATCH_OFF], r_RESULT
retl
nop
bpf_slow_path_half:
bpf_slow_path_common(2)
bl bpf_error
lduh [%sp + SCRATCH_OFF], r_RESULT
retl
nop
bpf_slow_path_byte:
bpf_slow_path_common(1)
bl bpf_error
ldub [%sp + SCRATCH_OFF], r_RESULT
retl
nop
#define bpf_negative_common(LEN) \
save %sp, -SAVE_SZ, %sp; \
mov %i0, %o0; \
mov %i1, %o1; \
SIGN_EXTEND(%o1); \
call bpf_internal_load_pointer_neg_helper; \
mov (LEN), %o2; \
mov %o0, r_TMP; \
cmp %o0, 0; \
BE_PTR(bpf_error); \
restore;
bpf_slow_path_word_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_word_negative_offset
bpf_jit_load_word_negative_offset:
bpf_negative_common(4)
andcc r_TMP, 3, %g0
bne load_word_unaligned
nop
retl
ld [r_TMP], r_RESULT
bpf_slow_path_half_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_half_negative_offset
bpf_jit_load_half_negative_offset:
bpf_negative_common(2)
andcc r_TMP, 1, %g0
bne load_half_unaligned
nop
retl
lduh [r_TMP], r_RESULT
bpf_slow_path_byte_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_byte_negative_offset
bpf_jit_load_byte_negative_offset:
bpf_negative_common(1)
retl
ldub [r_TMP], r_RESULT
bpf_error:
/* Make the JIT program itself return zero. */
ret
restore %g0, %g0, %o0

View File

@ -17,24 +17,6 @@ static inline bool is_simm13(unsigned int value)
return value + 0x1000 < 0x2000;
}
static void bpf_flush_icache(void *start_, void *end_)
{
#ifdef CONFIG_SPARC64
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long start = (unsigned long) start_;
unsigned long end = (unsigned long) end_;
start &= ~7UL;
end = (end + 7UL) & ~7UL;
while (start < end) {
flushi(start);
start += 32;
}
}
#endif
}
#define SEEN_DATAREF 1 /* might call external helpers */
#define SEEN_XREG 2 /* ebx is used */
#define SEEN_MEM 4 /* use mem[] for temporary storage */
@ -82,11 +64,7 @@ static void bpf_flush_icache(void *start_, void *end_)
#define BE (F2(0, 2) | CONDE)
#define BNE (F2(0, 2) | CONDNE)
#ifdef CONFIG_SPARC64
#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
#else
#define BE_PTR BE
#endif
#define SETHI(K, REG) \
(F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
@ -116,13 +94,8 @@ static void bpf_flush_icache(void *start_, void *end_)
#define LD64 F3(3, 0x0b)
#define ST32 F3(3, 0x04)
#ifdef CONFIG_SPARC64
#define LDPTR LD64
#define BASE_STACKFRAME 176
#else
#define LDPTR LD32
#define BASE_STACKFRAME 96
#endif
#define LD32I (LD32 | IMMED)
#define LD8I (LD8 | IMMED)
@ -234,11 +207,7 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
__emit_load8(BASE, STRUCT, FIELD, DEST); \
} while (0)
#ifdef CONFIG_SPARC64
#define BIAS (STACK_BIAS - 4)
#else
#define BIAS (-4)
#endif
#define emit_ldmem(OFF, DEST) \
do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \
@ -249,13 +218,8 @@ do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \
} while (0)
#ifdef CONFIG_SMP
#ifdef CONFIG_SPARC64
#define emit_load_cpu(REG) \
emit_load16(G6, struct thread_info, cpu, REG)
#else
#define emit_load_cpu(REG) \
emit_load32(G6, struct thread_info, cpu, REG)
#endif
#else
#define emit_load_cpu(REG) emit_clear(REG)
#endif
@ -486,7 +450,6 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (K == 1)
break;
emit_write_y(G0);
#ifdef CONFIG_SPARC32
/* The Sparc v8 architecture requires
* three instructions between a %y
* register write and the first use.
@ -494,31 +457,21 @@ void bpf_jit_compile(struct bpf_prog *fp)
emit_nop();
emit_nop();
emit_nop();
#endif
emit_alu_K(DIV, K);
break;
case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
emit_cmpi(r_X, 0);
if (pc_ret0 > 0) {
t_offset = addrs[pc_ret0 - 1];
#ifdef CONFIG_SPARC32
emit_branch(BE, t_offset + 20);
#else
emit_branch(BE, t_offset + 8);
#endif
emit_nop(); /* delay slot */
} else {
emit_branch_off(BNE, 16);
emit_nop();
#ifdef CONFIG_SPARC32
emit_jump(cleanup_addr + 20);
#else
emit_jump(cleanup_addr + 8);
#endif
emit_clear(r_A);
}
emit_write_y(G0);
#ifdef CONFIG_SPARC32
/* The Sparc v8 architecture requires
* three instructions between a %y
* register write and the first use.
@ -526,7 +479,6 @@ void bpf_jit_compile(struct bpf_prog *fp)
emit_nop();
emit_nop();
emit_nop();
#endif
emit_alu_X(DIV);
break;
case BPF_ALU | BPF_NEG:
@ -797,7 +749,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
bpf_jit_dump(flen, proglen, pass + 1, image);
if (image) {
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
fp->jited = 1;
}

File diff suppressed because it is too large Load Diff