summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerin Jacob <jerinj@marvell.com>2019-09-03 16:29:34 +0530
committerThomas Monjalon <thomas@monjalon.net>2019-10-12 14:20:29 +0200
commit9f4469d9e83ab091f664278f581f65a231c1e0da (patch)
tree3b8061c54bf049229b4970d53c604e8003b38c1a
parent111e2a747a4fde20bfcb093936efbb09dae8d4ec (diff)
downloaddpdk-next-eventdev-9f4469d9e83ab091f664278f581f65a231c1e0da.zip
dpdk-next-eventdev-9f4469d9e83ab091f664278f581f65a231c1e0da.tar.gz
dpdk-next-eventdev-9f4469d9e83ab091f664278f581f65a231c1e0da.tar.xz
bpf/arm: add logical operations
Add OR, AND, NEG, XOR, shift operations for immediate and source register variants. Signed-off-by: Jerin Jacob <jerinj@marvell.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
-rw-r--r--lib/librte_bpf/bpf_jit_arm64.c189
1 files changed, 189 insertions, 0 deletions
diff --git a/lib/librte_bpf/bpf_jit_arm64.c b/lib/librte_bpf/bpf_jit_arm64.c
index 5d2ce37..fcde358 100644
--- a/lib/librte_bpf/bpf_jit_arm64.c
+++ b/lib/librte_bpf/bpf_jit_arm64.c
@@ -44,6 +44,17 @@ struct a64_jit_ctx {
};
static int
+check_immr_imms(bool is64, uint8_t immr, uint8_t imms)
+{
+ const unsigned int width = is64 ? 64 : 32;
+
+ if (immr >= width || imms >= width)
+ return 1;
+
+ return 0;
+}
+
+static int
check_mov_hw(bool is64, const uint8_t val)
{
if (val == 16 || val == 0)
@@ -289,6 +300,12 @@ emit_sub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
}
static void
+emit_neg(struct a64_jit_ctx *ctx, bool is64, uint8_t rd)
+{
+ emit_add_sub(ctx, is64, rd, A64_ZR, rd, A64_SUB);
+}
+
+static void
emit_mul(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
{
uint32_t insn;
@@ -304,6 +321,9 @@ emit_mul(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
}
#define A64_UDIV 0x2
+#define A64_LSLV 0x8
+#define A64_LSRV 0x9
+#define A64_ASRV 0xA
static void
emit_data_process_two_src(struct a64_jit_ctx *ctx, bool is64, uint8_t rd,
uint8_t rn, uint8_t rm, uint16_t op)
@@ -328,6 +348,107 @@ emit_div(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
}
static void
+emit_lslv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_LSLV);
+}
+
+static void
+emit_lsrv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_LSRV);
+}
+
+static void
+emit_asrv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_ASRV);
+}
+
+#define A64_UBFM 0x2
+#define A64_SBFM 0x0
+static void
+emit_bitfield(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn,
+ uint8_t immr, uint8_t imms, uint16_t op)
+
+{
+ uint32_t insn;
+
+ insn = (!!is64) << 31;
+ if (insn)
+ insn |= 1 << 22; /* Set N bit when is64 is set */
+ insn |= op << 29;
+ insn |= 0x26 << 23;
+ insn |= immr << 16;
+ insn |= imms << 10;
+ insn |= rn << 5;
+ insn |= rd;
+
+ emit_insn(ctx, insn, check_reg(rd) || check_reg(rn) ||
+ check_immr_imms(is64, immr, imms));
+}
+static void
+emit_lsl(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm)
+{
+ const unsigned int width = is64 ? 64 : 32;
+ uint8_t imms, immr;
+
+ immr = (width - imm) & (width - 1);
+ imms = width - 1 - imm;
+
+ emit_bitfield(ctx, is64, rd, rd, immr, imms, A64_UBFM);
+}
+
+static void
+emit_lsr(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm)
+{
+ emit_bitfield(ctx, is64, rd, rd, imm, is64 ? 63 : 31, A64_UBFM);
+}
+
+static void
+emit_asr(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm)
+{
+ emit_bitfield(ctx, is64, rd, rd, imm, is64 ? 63 : 31, A64_SBFM);
+}
+
+#define A64_AND 0
+#define A64_OR 1
+#define A64_XOR 2
+static void
+emit_logical(struct a64_jit_ctx *ctx, bool is64, uint8_t rd,
+ uint8_t rm, uint16_t op)
+{
+ uint32_t insn;
+
+ insn = (!!is64) << 31;
+ insn |= op << 29;
+ insn |= 0x50 << 21;
+ insn |= rm << 16;
+ insn |= rd << 5;
+ insn |= rd;
+
+ emit_insn(ctx, insn, check_reg(rd) || check_reg(rm));
+}
+
+static void
+emit_or(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_logical(ctx, is64, rd, rm, A64_OR);
+}
+
+static void
+emit_and(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_logical(ctx, is64, rd, rm, A64_AND);
+}
+
+static void
+emit_xor(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
+{
+ emit_logical(ctx, is64, rd, rm, A64_XOR);
+}
+
+static void
emit_msub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn,
uint8_t rm, uint8_t ra)
{
@@ -707,6 +828,74 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
emit_mov_imm(ctx, is64, tmp1, imm);
emit_mod(ctx, is64, tmp2, dst, tmp1);
break;
+ /* dst |= src */
+ case (BPF_ALU | BPF_OR | BPF_X):
+ case (EBPF_ALU64 | BPF_OR | BPF_X):
+ emit_or(ctx, is64, dst, src);
+ break;
+ /* dst |= imm */
+ case (BPF_ALU | BPF_OR | BPF_K):
+ case (EBPF_ALU64 | BPF_OR | BPF_K):
+ emit_mov_imm(ctx, is64, tmp1, imm);
+ emit_or(ctx, is64, dst, tmp1);
+ break;
+ /* dst &= src */
+ case (BPF_ALU | BPF_AND | BPF_X):
+ case (EBPF_ALU64 | BPF_AND | BPF_X):
+ emit_and(ctx, is64, dst, src);
+ break;
+ /* dst &= imm */
+ case (BPF_ALU | BPF_AND | BPF_K):
+ case (EBPF_ALU64 | BPF_AND | BPF_K):
+ emit_mov_imm(ctx, is64, tmp1, imm);
+ emit_and(ctx, is64, dst, tmp1);
+ break;
+ /* dst ^= src */
+ case (BPF_ALU | BPF_XOR | BPF_X):
+ case (EBPF_ALU64 | BPF_XOR | BPF_X):
+ emit_xor(ctx, is64, dst, src);
+ break;
+ /* dst ^= imm */
+ case (BPF_ALU | BPF_XOR | BPF_K):
+ case (EBPF_ALU64 | BPF_XOR | BPF_K):
+ emit_mov_imm(ctx, is64, tmp1, imm);
+ emit_xor(ctx, is64, dst, tmp1);
+ break;
+ /* dst = -dst */
+ case (BPF_ALU | BPF_NEG):
+ case (EBPF_ALU64 | BPF_NEG):
+ emit_neg(ctx, is64, dst);
+ break;
+ /* dst <<= src */
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case EBPF_ALU64 | BPF_LSH | BPF_X:
+ emit_lslv(ctx, is64, dst, src);
+ break;
+ /* dst <<= imm */
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case EBPF_ALU64 | BPF_LSH | BPF_K:
+ emit_lsl(ctx, is64, dst, imm);
+ break;
+ /* dst >>= src */
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case EBPF_ALU64 | BPF_RSH | BPF_X:
+ emit_lsrv(ctx, is64, dst, src);
+ break;
+ /* dst >>= imm */
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case EBPF_ALU64 | BPF_RSH | BPF_K:
+ emit_lsr(ctx, is64, dst, imm);
+ break;
+ /* dst >>= src (arithmetic) */
+ case BPF_ALU | EBPF_ARSH | BPF_X:
+ case EBPF_ALU64 | EBPF_ARSH | BPF_X:
+ emit_asrv(ctx, is64, dst, src);
+ break;
+ /* dst >>= imm (arithmetic) */
+ case BPF_ALU | EBPF_ARSH | BPF_K:
+ case EBPF_ALU64 | EBPF_ARSH | BPF_K:
+ emit_asr(ctx, is64, dst, imm);
+ break;
/* Return r0 */
case (BPF_JMP | EBPF_EXIT):
emit_epilogue(ctx);