py: Implement and,or,xor native ops for viper.

native-del-fast
Damien George 2014-10-12 14:21:06 +01:00
parent 1606607bd4
commit 1ef2348df0
9 changed files with 110 additions and 0 deletions

View File

@ -175,6 +175,21 @@ STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
return 0x0400000 | (rn << 16) | (rd << 12) | rm;
}
STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
// and rd, rn, rm
return 0x0000000 | (rn << 16) | (rd << 12) | rm;
}
STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
// eor rd, rn, rm
return 0x0200000 | (rn << 16) | (rd << 12) | rm;
}
STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
// orr rd, rn, rm
return 0x1800000 | (rn << 16) | (rd << 12) | rm;
}
void asm_arm_bkpt(asm_arm_t *as) {
// bkpt #0
emit_al(as, 0x1200070);
@ -312,6 +327,21 @@ void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
}
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
// and rd, rn, rm
emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
}
void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
// eor rd, rn, rm
emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
}
void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
// orr rd, rn, rm
emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
}
void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
// add rd, sp, #local_num*4
emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));

View File

@ -96,6 +96,9 @@ void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn);
// arithmetic
void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num);
void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);

View File

@ -53,6 +53,8 @@
#define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */
#define OPCODE_MOV_RM64_TO_R64 (0x8b)
#define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */
#define OPCODE_AND_R64_TO_RM64 (0x21) /* /r */
#define OPCODE_OR_R64_TO_RM64 (0x09) /* /r */
#define OPCODE_XOR_R64_TO_RM64 (0x31) /* /r */
#define OPCODE_ADD_R64_TO_RM64 (0x01) /* /r */
#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
@ -385,6 +387,14 @@ void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64
asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
}
void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64);
}
void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_OR_R64_TO_RM64);
}
void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
}

View File

@ -86,6 +86,8 @@ void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64
void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64);
void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64);

View File

@ -53,6 +53,8 @@
#define OPCODE_MOV_R32_TO_RM32 (0x89)
#define OPCODE_MOV_RM32_TO_R32 (0x8b)
#define OPCODE_LEA_MEM_TO_R32 (0x8d) /* /r */
#define OPCODE_AND_R32_TO_RM32 (0x21) /* /r */
#define OPCODE_OR_R32_TO_RM32 (0x09) /* /r */
#define OPCODE_XOR_R32_TO_RM32 (0x31) /* /r */
#define OPCODE_ADD_R32_TO_RM32 (0x01)
#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
@ -287,6 +289,14 @@ void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32
asm_x86_mov_i32_to_r32(as, src_i32, dest_r32);
}
void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32);
}
void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_OR_R32_TO_RM32);
}
void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
}

View File

@ -83,6 +83,8 @@ void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32
void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32);
void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32);

View File

@ -145,6 +145,9 @@
#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
@ -270,6 +273,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
@ -346,6 +352,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
@ -422,6 +431,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
@ -1769,6 +1781,15 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
#endif
} else if (op == MP_BINARY_OP_OR || op == MP_BINARY_OP_INPLACE_OR) {
ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
} else if (op == MP_BINARY_OP_XOR || op == MP_BINARY_OP_INPLACE_XOR) {
ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
} else if (op == MP_BINARY_OP_AND || op == MP_BINARY_OP_INPLACE_AND) {
ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
} else if (op == MP_BINARY_OP_ADD || op == MP_BINARY_OP_INPLACE_ADD) {
ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);

View File

@ -34,3 +34,25 @@ shr(1, 0)
shr(1, 3)
shr(42, 2)
shr(-42, 2)
@micropython.viper
def and_(x:int, y:int):
print(x & y, y & x)
and_(1, 0)
and_(1, 3)
and_(0xf0, 0x3f)
and_(-42, 6)
@micropython.viper
def or_(x:int, y:int):
print(x | y, y | x)
or_(1, 0)
or_(1, 2)
or_(-42, 5)
@micropython.viper
def xor(x:int, y:int):
print(x ^ y, y ^ x)
xor(1, 0)
xor(1, 2)
xor(-42, 5)

View File

@ -23,3 +23,13 @@
0
10
-11
0 0
1 1
48 48
6 6
1 1
3 3
-41 -41
1 1
3 3
-45 -45