/* aarch64-dis.c -- AArch64 disassembler.
Copyright (C) 2009-2024 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of the GNU opcodes library.
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not,
see . */
#include "sysdep.h"
#include
#include "disassemble.h"
#include "libiberty.h"
#include "opintl.h"
#include "aarch64-dis.h"
#include "elf-bfd.h"
#include "safe-ctype.h"
#include "obstack.h"
#define obstack_chunk_alloc xmalloc
#define obstack_chunk_free free
#define INSNLEN 4
/* This character is used to encode style information within the output
buffers. See get_style_text and print_operands for more details. */
#define STYLE_MARKER_CHAR '\002'
/* Cached mapping symbol state. */
enum map_type
{
MAP_INSN,
MAP_DATA
};
static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
static enum map_type last_type;
static int last_mapping_sym = -1;
static bfd_vma last_stop_offset = 0;
static bfd_vma last_mapping_addr = 0;
/* Other options */
static int no_aliases = 0; /* If set disassemble as most general inst. */
static int no_notes = 1; /* If set do not print disassemble notes in the
output as comments. */
/* Currently active instruction sequence. */
static aarch64_instr_sequence insn_sequence;
static void
set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
{
}
static void
parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
{
/* Try to match options that are simple flags */
if (startswith (option, "no-aliases"))
{
no_aliases = 1;
return;
}
if (startswith (option, "aliases"))
{
no_aliases = 0;
return;
}
if (startswith (option, "no-notes"))
{
no_notes = 1;
return;
}
if (startswith (option, "notes"))
{
no_notes = 0;
return;
}
#ifdef DEBUG_AARCH64
if (startswith (option, "debug_dump"))
{
debug_dump = 1;
return;
}
#endif /* DEBUG_AARCH64 */
/* Invalid option. */
opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
}
static void
parse_aarch64_dis_options (const char *options)
{
const char *option_end;
if (options == NULL)
return;
while (*options != '\0')
{
/* Skip empty options. */
if (*options == ',')
{
options++;
continue;
}
/* We know that *options is neither NUL or a comma. */
option_end = options + 1;
while (*option_end != ',' && *option_end != '\0')
option_end++;
parse_aarch64_dis_option (options, option_end - options);
/* Go on to the next one. If option_end points to a comma, it
will be skipped above. */
options = option_end;
}
}
/* Functions doing the instruction disassembling. */
/* The unnamed arguments consist of the number of fields and information about
these fields where the VALUE will be extracted from CODE and returned.
MASK can be zero or the base mask of the opcode.
N.B. the fields are required to be in such an order than the most signficant
field for VALUE comes the first, e.g. the in
SQDMLAL , , .[]
is encoded in H:L:M in some cases, the fields H:L:M should be passed in
the order of H, L, M. */
aarch64_insn
extract_fields (aarch64_insn code, aarch64_insn mask, ...)
{
uint32_t num;
const aarch64_field *field;
enum aarch64_field_kind kind;
va_list va;
va_start (va, mask);
num = va_arg (va, uint32_t);
assert (num <= 5);
aarch64_insn value = 0x0;
while (num--)
{
kind = va_arg (va, enum aarch64_field_kind);
field = &fields[kind];
value <<= field->width;
value |= extract_field (kind, code, mask);
}
va_end (va);
return value;
}
/* Extract the value of all fields in SELF->fields after START from
instruction CODE. The least significant bit comes from the final field. */
static aarch64_insn
extract_all_fields_after (const aarch64_operand *self, unsigned int start,
aarch64_insn code)
{
aarch64_insn value;
unsigned int i;
enum aarch64_field_kind kind;
value = 0;
for (i = start;
i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
{
kind = self->fields[i];
value <<= fields[kind].width;
value |= extract_field (kind, code, 0);
}
return value;
}
/* Extract the value of all fields in SELF->fields from instruction CODE.
The least significant bit comes from the final field. */
static aarch64_insn
extract_all_fields (const aarch64_operand *self, aarch64_insn code)
{
return extract_all_fields_after (self, 0, code);
}
/* Sign-extend bit I of VALUE. */
static inline uint64_t
sign_extend (aarch64_insn value, unsigned i)
{
uint64_t ret, sign;
assert (i < 32);
ret = value;
sign = (uint64_t) 1 << i;
return ((ret & (sign + sign - 1)) ^ sign) - sign;
}
/* N.B. the following inline helpfer functions create a dependency on the
order of operand qualifier enumerators. */
/* Given VALUE, return qualifier for a general purpose register. */
static inline enum aarch64_opnd_qualifier
get_greg_qualifier_from_value (aarch64_insn value)
{
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
if (value <= 0x1
&& aarch64_get_qualifier_standard_value (qualifier) == value)
return qualifier;
return AARCH64_OPND_QLF_ERR;
}
/* Given VALUE, return qualifier for a vector register. This does not support
decoding instructions that accept the 2H vector type. */
static inline enum aarch64_opnd_qualifier
get_vreg_qualifier_from_value (aarch64_insn value)
{
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
/* Instructions using vector type 2H should not call this function. Skip over
the 2H qualifier. */
if (qualifier >= AARCH64_OPND_QLF_V_2H)
qualifier += 1;
if (value <= 0x8
&& aarch64_get_qualifier_standard_value (qualifier) == value)
return qualifier;
return AARCH64_OPND_QLF_ERR;
}
/* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
static inline enum aarch64_opnd_qualifier
get_sreg_qualifier_from_value (aarch64_insn value)
{
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
if (value <= 0x4
&& aarch64_get_qualifier_standard_value (qualifier) == value)
return qualifier;
return AARCH64_OPND_QLF_ERR;
}
/* Given the instruction in *INST which is probably half way through the
decoding and our caller wants to know the expected qualifier for operand
I. Return such a qualifier if we can establish it; otherwise return
AARCH64_OPND_QLF_NIL. */
static aarch64_opnd_qualifier_t
get_expected_qualifier (const aarch64_inst *inst, int i)
{
aarch64_opnd_qualifier_seq_t qualifiers;
/* Should not be called if the qualifier is known. */
if (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL)
{
int invalid_count;
if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
i, qualifiers, &invalid_count))
return qualifiers[i];
else
return AARCH64_OPND_QLF_NIL;
}
else
return AARCH64_OPND_QLF_ERR;
}
/* Operand extractors. */
bool
aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info ATTRIBUTE_UNUSED,
const aarch64_insn code ATTRIBUTE_UNUSED,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
return true;
}
bool
aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->reg.regno = (extract_field (self->fields[0], code, 0)
+ get_operand_specific_data (self));
return true;
}
bool
aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
const aarch64_insn code ATTRIBUTE_UNUSED,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
assert (info->idx == 1
|| info->idx == 2
|| info->idx == 3
|| info->idx == 5);
unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
info->reg.regno = (prev_regno == 0x1f) ? 0x1f
: prev_regno + 1;
return true;
}
/* e.g. IC {, }. */
bool
aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->reg.regno = extract_field (self->fields[0], code, 0);
assert (info->idx == 1
&& (aarch64_get_operand_class (inst->operands[0].type)
== AARCH64_OPND_CLASS_SYSTEM));
/* This will make the constraint checking happy and more importantly will
help the disassembler determine whether this operand is optional or
not. */
info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
return true;
}
/* e.g. SQDMLAL , , .[]. */
bool
aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
/* regno */
info->reglane.regno = extract_field (self->fields[0], code,
inst->opcode->mask);
/* Index and/or type. */
if (inst->opcode->iclass == asisdone
|| inst->opcode->iclass == asimdins)
{
if (info->type == AARCH64_OPND_En
&& inst->opcode->operands[0] == AARCH64_OPND_Ed)
{
unsigned shift;
/* index2 for e.g. INS .[], .[]. */
assert (info->idx == 1); /* Vn */
aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
/* Depend on AARCH64_OPND_Ed to determine the qualifier. */
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
info->reglane.index = value >> shift;
}
else
{
/* index and type for e.g. DUP , .[].
imm5<3:0>
0000 RESERVED
xxx1 B
xx10 H
x100 S
1000 D */
int pos = -1;
aarch64_insn value = extract_field (FLD_imm5, code, 0);
while (++pos <= 3 && (value & 0x1) == 0)
value >>= 1;
if (pos > 3)
return false;
info->qualifier = get_sreg_qualifier_from_value (pos);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
info->reglane.index = (unsigned) (value >> 1);
}
}
else if (inst->opcode->iclass == dotproduct)
{
/* Need information in other operand(s) to help decoding. */
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
switch (info->qualifier)
{
case AARCH64_OPND_QLF_S_4B:
case AARCH64_OPND_QLF_S_2H:
/* L:H */
info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
info->reglane.regno &= 0x1f;
break;
default:
return false;
}
}
else if (inst->opcode->iclass == cryptosm3)
{
/* index for e.g. SM3TT2A .4S, .4S, S[]. */
info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
}
else
{
/* Index only for e.g. SQDMLAL , , .[]
or SQDMLAL , , .[]. */
/* Need information in other operand(s) to help decoding. */
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
switch (info->qualifier)
{
case AARCH64_OPND_QLF_S_H:
if (info->type == AARCH64_OPND_Em16)
{
/* h:l:m */
info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
FLD_M);
info->reglane.regno &= 0xf;
}
else
{
/* h:l */
info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
}
break;
case AARCH64_OPND_QLF_S_S:
/* h:l */
info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
break;
case AARCH64_OPND_QLF_S_D:
/* H */
info->reglane.index = extract_field (FLD_H, code, 0);
break;
default:
return false;
}
if (inst->opcode->op == OP_FCMLA_ELEM
&& info->qualifier != AARCH64_OPND_QLF_S_H)
{
/* Complex operand takes two elements. */
if (info->reglane.index & 1)
return false;
info->reglane.index /= 2;
}
}
return true;
}
bool
aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
/* R */
info->reglist.first_regno = extract_field (self->fields[0], code, 0);
/* len */
info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
info->reglist.stride = 1;
return true;
}
/* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
bool
aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn value;
/* Number of elements in each structure to be loaded/stored. */
unsigned expected_num = get_opcode_dependent_value (inst->opcode);
struct
{
unsigned is_reserved;
unsigned num_regs;
unsigned num_elements;
} data [] =
{ {0, 4, 4},
{1, 4, 4},
{0, 4, 1},
{0, 4, 2},
{0, 3, 3},
{1, 3, 3},
{0, 3, 1},
{0, 1, 1},
{0, 2, 2},
{1, 2, 2},
{0, 2, 1},
};
/* Rt */
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
/* opcode */
value = extract_field (FLD_opcode, code, 0);
/* PR 21595: Check for a bogus value. */
if (value >= ARRAY_SIZE (data))
return false;
if (expected_num != data[value].num_elements || data[value].is_reserved)
return false;
info->reglist.num_regs = data[value].num_regs;
info->reglist.stride = 1;
return true;
}
/* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
lanes instructions. */
bool
aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn value;
/* Rt */
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
/* S */
value = extract_field (FLD_S, code, 0);
/* Number of registers is equal to the number of elements in
each structure to be loaded/stored. */
info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
/* Except when it is LD1R. */
if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
info->reglist.num_regs = 2;
info->reglist.stride = 1;
return true;
}
/* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
load/store single element instructions. */
bool
aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_field field = {0, 0};
aarch64_insn QSsize; /* fields Q:S:size. */
aarch64_insn opcodeh2; /* opcode<2:1> */
/* Rt */
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
/* Decode the index, opcode<2:1> and size. */
gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
opcodeh2 = extract_field_2 (&field, code, 0);
QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
switch (opcodeh2)
{
case 0x0:
info->qualifier = AARCH64_OPND_QLF_S_B;
/* Index encoded in "Q:S:size". */
info->reglist.index = QSsize;
break;
case 0x1:
if (QSsize & 0x1)
/* UND. */
return false;
info->qualifier = AARCH64_OPND_QLF_S_H;
/* Index encoded in "Q:S:size<1>". */
info->reglist.index = QSsize >> 1;
break;
case 0x2:
if ((QSsize >> 1) & 0x1)
/* UND. */
return false;
if ((QSsize & 0x1) == 0)
{
info->qualifier = AARCH64_OPND_QLF_S_S;
/* Index encoded in "Q:S". */
info->reglist.index = QSsize >> 2;
}
else
{
if (extract_field (FLD_S, code, 0))
/* UND */
return false;
info->qualifier = AARCH64_OPND_QLF_S_D;
/* Index encoded in "Q". */
info->reglist.index = QSsize >> 3;
}
break;
default:
return false;
}
info->reglist.has_index = 1;
info->reglist.num_regs = 0;
info->reglist.stride = 1;
/* Number of registers is equal to the number of elements in
each structure to be loaded/stored. */
info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
return true;
}
/* Decode fields immh:immb and/or Q for e.g.
SSHR ., ., #
or SSHR , , #. */
bool
aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
int pos;
aarch64_insn Q, imm, immh;
enum aarch64_insn_class iclass = inst->opcode->iclass;
immh = extract_field (FLD_immh, code, 0);
if (immh == 0)
return false;
imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
pos = 4;
/* Get highest set bit in immh. */
while (--pos >= 0 && (immh & 0x8) == 0)
immh <<= 1;
assert ((iclass == asimdshf || iclass == asisdshf)
&& (info->type == AARCH64_OPND_IMM_VLSR
|| info->type == AARCH64_OPND_IMM_VLSL));
if (iclass == asimdshf)
{
Q = extract_field (FLD_Q, code, 0);
/* immh Q
0000 x SEE AdvSIMD modified immediate
0001 0 8B
0001 1 16B
001x 0 4H
001x 1 8H
01xx 0 2S
01xx 1 4S
1xxx 0 RESERVED
1xxx 1 2D */
info->qualifier =
get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return false;
}
else
{
info->qualifier = get_sreg_qualifier_from_value (pos);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
}
if (info->type == AARCH64_OPND_IMM_VLSR)
/* immh
0000 SEE AdvSIMD modified immediate
0001 (16-UInt(immh:immb))
001x (32-UInt(immh:immb))
01xx (64-UInt(immh:immb))
1xxx (128-UInt(immh:immb)) */
info->imm.value = (16 << pos) - imm;
else
/* immh:immb
immh
0000 SEE AdvSIMD modified immediate
0001 (UInt(immh:immb)-8)
001x (UInt(immh:immb)-16)
01xx (UInt(immh:immb)-32)
1xxx (UInt(immh:immb)-64) */
info->imm.value = imm - (8 << pos);
return true;
}
/* Decode shift immediate for e.g. sshr (imm). */
bool
aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
int64_t imm;
aarch64_insn val;
val = extract_field (FLD_size, code, 0);
switch (val)
{
case 0: imm = 8; break;
case 1: imm = 16; break;
case 2: imm = 32; break;
default: return false;
}
info->imm.value = imm;
return true;
}
/* Decode imm for e.g. BFM , , #, #.
value in the field(s) will be extracted as unsigned immediate value. */
bool
aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
uint64_t imm;
imm = extract_all_fields (self, code);
if (operand_need_sign_extension (self))
imm = sign_extend (imm, get_operand_fields_width (self) - 1);
if (operand_need_shift_by_two (self))
imm <<= 2;
else if (operand_need_shift_by_three (self))
imm <<= 3;
else if (operand_need_shift_by_four (self))
imm <<= 4;
if (info->type == AARCH64_OPND_ADDR_ADRP)
imm <<= 12;
if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
&& inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
info->imm.value = imm;
return true;
}
/* Decode imm and its shifter for e.g. MOVZ , #{, LSL #}. */
bool
aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors)
{
aarch64_ext_imm (self, info, code, inst, errors);
info->shifter.kind = AARCH64_MOD_LSL;
info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
return true;
}
/* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
MOVI ., # {, LSL #}. */
bool
aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
uint64_t imm;
enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
aarch64_field field = {0, 0};
assert (info->idx == 1);
if (info->type == AARCH64_OPND_SIMD_FPIMM)
info->imm.is_fp = 1;
/* a:b:c:d:e:f:g:h */
imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
{
/* Either MOVI , #
or MOVI .2D, #.
is a 64-bit immediate
'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
encoded in "a:b:c:d:e:f:g:h". */
int i;
unsigned abcdefgh = imm;
for (imm = 0ull, i = 0; i < 8; i++)
if (((abcdefgh >> i) & 0x1) != 0)
imm |= 0xffull << (8 * i);
}
info->imm.value = imm;
/* cmode */
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
switch (info->qualifier)
{
case AARCH64_OPND_QLF_NIL:
/* no shift */
info->shifter.kind = AARCH64_MOD_NONE;
return 1;
case AARCH64_OPND_QLF_LSL:
/* shift zeros */
info->shifter.kind = AARCH64_MOD_LSL;
switch (aarch64_get_qualifier_esize (opnd0_qualifier))
{
case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
default: return false;
}
/* 00: 0; 01: 8; 10:16; 11:24. */
info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
break;
case AARCH64_OPND_QLF_MSL:
/* shift ones */
info->shifter.kind = AARCH64_MOD_MSL;
gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
break;
default:
return false;
}
return true;
}
/* Decode an 8-bit floating-point immediate. */
bool
aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->imm.value = extract_all_fields (self, code);
info->imm.is_fp = 1;
return true;
}
/* Decode a 1-bit rotate immediate (#90 or #270). */
bool
aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
uint64_t rot = extract_field (self->fields[0], code, 0);
assert (rot < 2U);
info->imm.value = rot * 180 + 90;
return true;
}
/* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
bool
aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
uint64_t rot = extract_field (self->fields[0], code, 0);
assert (rot < 4U);
info->imm.value = rot * 90;
return true;
}
/* Decode scale for e.g. SCVTF , , #. */
bool
aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->imm.value = 64- extract_field (FLD_scale, code, 0);
return true;
}
/* Decode arithmetic immediate for e.g.
SUBS , , # {, }. */
bool
aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn value;
info->shifter.kind = AARCH64_MOD_LSL;
/* shift */
value = extract_field (FLD_shift, code, 0);
if (value >= 2)
return false;
info->shifter.amount = value ? 12 : 0;
/* imm12 (unsigned) */
info->imm.value = extract_field (FLD_imm12, code, 0);
return true;
}
/* Return true if VALUE is a valid logical immediate encoding, storing the
decoded value in *RESULT if so. ESIZE is the number of bytes in the
decoded immediate. */
static bool
decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
{
uint64_t imm, mask;
uint32_t N, R, S;
unsigned simd_size;
/* value is N:immr:imms. */
S = value & 0x3f;
R = (value >> 6) & 0x3f;
N = (value >> 12) & 0x1;
/* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
(in other words, right rotated by R), then replicated. */
if (N != 0)
{
simd_size = 64;
mask = 0xffffffffffffffffull;
}
else
{
switch (S)
{
case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
default: return false;
}
mask = (1ull << simd_size) - 1;
/* Top bits are IGNORED. */
R &= simd_size - 1;
}
if (simd_size > esize * 8)
return false;
/* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
if (S == simd_size - 1)
return false;
/* S+1 consecutive bits to 1. */
/* NOTE: S can't be 63 due to detection above. */
imm = (1ull << (S + 1)) - 1;
/* Rotate to the left by simd_size - R. */
if (R != 0)
imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
/* Replicate the value according to SIMD size. */
switch (simd_size)
{
case 2: imm = (imm << 2) | imm;
/* Fall through. */
case 4: imm = (imm << 4) | imm;
/* Fall through. */
case 8: imm = (imm << 8) | imm;
/* Fall through. */
case 16: imm = (imm << 16) | imm;
/* Fall through. */
case 32: imm = (imm << 32) | imm;
/* Fall through. */
case 64: break;
default: return 0;
}
*result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
return true;
}
/* Decode a logical immediate for e.g. ORR , , #. */
bool
aarch64_ext_limm (const aarch64_operand *self,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
uint32_t esize;
aarch64_insn value;
value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
self->fields[2]);
esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
return decode_limm (esize, value, &info->imm.value);
}
/* Decode a logical immediate for the BIC alias of AND (etc.). */
bool
aarch64_ext_inv_limm (const aarch64_operand *self,
aarch64_opnd_info *info, const aarch64_insn code,
const aarch64_inst *inst,
aarch64_operand_error *errors)
{
if (!aarch64_ext_limm (self, info, code, inst, errors))
return false;
info->imm.value = ~info->imm.value;
return true;
}
/* Decode Ft for e.g. STR , [, {, {}}]
or LDP , , [], #. */
bool
aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
const aarch64_insn code, const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn value;
/* Rt */
info->reg.regno = extract_field (FLD_Rt, code, 0);
/* size */
value = extract_field (FLD_ldst_size, code, 0);
if (inst->opcode->iclass == ldstpair_indexed
|| inst->opcode->iclass == ldstnapair_offs
|| inst->opcode->iclass == ldstpair_off
|| inst->opcode->iclass == loadlit)
{
enum aarch64_opnd_qualifier qualifier;
switch (value)
{
case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
default: return false;
}
info->qualifier = qualifier;
}
else
{
/* opc1:size */
value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
if (value > 0x4)
return false;
info->qualifier = get_sreg_qualifier_from_value (value);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return false;
}
return true;
}
/* Decode the address operand for e.g. STXRB , , [{,#0}]. */
bool
aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
/* Rn */
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
return true;
}
/* Decode the address operand for rcpc3 instructions with optional load/store
datasize offset, e.g. STILPP , , [{,#-16}]! and
LIDAP , , []{,#-16}. */
bool
aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *err ATTRIBUTE_UNUSED)
{
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
if (!extract_field (FLD_opc2, code, 0))
{
info->addr.writeback = 1;
enum aarch64_opnd type;
for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
{
aarch64_opnd_info opnd = info[i];
type = opnd.type;
if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
break;
}
assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
int offset = calc_ldst_datasize (inst->operands);
switch (type)
{
case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
info->addr.offset.imm = -offset;
info->addr.preind = 1;
break;
case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
case AARCH64_OPND_RCPC3_ADDR_POSTIND:
info->addr.offset.imm = offset;
info->addr.postind = 1;
break;
default:
return false;
}
}
return true;
}
bool
aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
/* Rn */
info->addr.base_regno = extract_field (self->fields[0], code, 0);
/* simm9 */
aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
info->addr.offset.imm = sign_extend (imm, 8);
return true;
}
/* Decode the address operand for e.g.
stlur , [{, }]. */
bool
aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code, const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
/* Rn */
info->addr.base_regno = extract_field (self->fields[0], code, 0);
/* simm9 */
aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
info->addr.offset.imm = sign_extend (imm, 8);
if (extract_field (self->fields[2], code, 0) == 1) {
info->addr.writeback = 1;
info->addr.preind = 1;
}
return true;
}
/* Decode the address operand for e.g.
STR , [, {, {}}]. */
bool
aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code, const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn S, value;
/* Rn */
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
/* Rm */
info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
/* option */
value = extract_field (FLD_option, code, 0);
info->shifter.kind =
aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
/* Fix-up the shifter kind; although the table-driven approach is
efficient, it is slightly inflexible, thus needing this fix-up. */
if (info->shifter.kind == AARCH64_MOD_UXTX)
info->shifter.kind = AARCH64_MOD_LSL;
/* S */
S = extract_field (FLD_S, code, 0);
if (S == 0)
{
info->shifter.amount = 0;
info->shifter.amount_present = 0;
}
else
{
int size;
/* Need information in other operand(s) to help achieve the decoding
from 'S' field. */
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
/* Get the size of the data element that is accessed, which may be
different from that of the source register size, e.g. in strb/ldrb. */
size = aarch64_get_qualifier_esize (info->qualifier);
info->shifter.amount = get_logsz (size);
info->shifter.amount_present = 1;
}
return true;
}
/* Decode the address operand for e.g. LDRSW , [], #. */
bool
aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
aarch64_insn code, const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn imm;
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
/* Rn */
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
/* simm (imm9 or imm7) */
imm = extract_field (self->fields[0], code, 0);
info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
if (self->fields[0] == FLD_imm7
|| info->qualifier == AARCH64_OPND_QLF_imm_tag)
/* scaled immediate in ld/st pair instructions. */
info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
/* qualifier */
if (inst->opcode->iclass == ldst_unscaled
|| inst->opcode->iclass == ldstnapair_offs
|| inst->opcode->iclass == ldstpair_off
|| inst->opcode->iclass == ldst_unpriv)
info->addr.writeback = 0;
else
{
/* pre/post- index */
info->addr.writeback = 1;
if (extract_field (self->fields[1], code, 0) == 1)
info->addr.preind = 1;
else
info->addr.postind = 1;
}
return true;
}
/* Decode the address operand for e.g. LDRSW , [{, #}]. */
bool
aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
int shift;
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
/* Rn */
info->addr.base_regno = extract_field (self->fields[0], code, 0);
/* uimm12 */
info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
return true;
}
/* Decode the address operand for e.g. LDRAA , [{, #}]. */
bool
aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn imm;
info->qualifier = get_expected_qualifier (inst, info->idx);
if (info->qualifier == AARCH64_OPND_QLF_ERR)
return 0;
/* Rn */
info->addr.base_regno = extract_field (self->fields[0], code, 0);
/* simm10 */
imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
info->addr.offset.imm = sign_extend (imm, 9) << 3;
if (extract_field (self->fields[3], code, 0) == 1) {
info->addr.writeback = 1;
info->addr.preind = 1;
}
return true;
}
/* Decode the address operand for e.g.
LD1 {., ., .}, [], >. */
bool
aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code, const aarch64_inst *inst,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
/* The opcode dependent area stores the number of elements in
each structure to be loaded/stored. */
int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
/* Rn */
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
/* Rm | # */
info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
if (info->addr.offset.regno == 31)
{
if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
/* Special handling of loading single structure to all lane. */
info->addr.offset.imm = (is_ld1r ? 1
: inst->operands[0].reglist.num_regs)
* aarch64_get_qualifier_esize (inst->operands[0].qualifier);
else
info->addr.offset.imm = inst->operands[0].reglist.num_regs
* aarch64_get_qualifier_esize (inst->operands[0].qualifier)
* aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
}
else
info->addr.offset.is_reg = 1;
info->addr.writeback = 1;
return true;
}
/* Decode the condition operand for e.g. CSEL , , , . */
bool
aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
aarch64_insn value;
/* cond */
value = extract_field (FLD_cond, code, 0);
info->cond = get_cond_from_value (value);
return true;
}
/* Decode the system register operand for e.g. MRS , . */
bool
aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
/* op0:op1:CRn:CRm:op2 */
info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
FLD_CRm, FLD_op2);
info->sysreg.flags = 0;
/* If a system instruction, check which restrictions should be on the register
value during decoding, these will be enforced then. */
if (inst->opcode->iclass == ic_system)
{
/* Check to see if it's read-only, else check if it's write only.
if it's both or unspecified don't care. */
if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
info->sysreg.flags = F_REG_READ;
else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
== F_SYS_WRITE)
info->sysreg.flags = F_REG_WRITE;
}
return true;
}
/* Decode the PSTATE field operand for e.g. MSR , #. */
bool
aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info, aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
int i;
aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
/* op1:op2 */
info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
{
/* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
uint32_t flags = aarch64_pstatefields[i].flags;
if ((flags & F_REG_IN_CRM)
&& ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
continue;
info->sysreg.flags = flags;
return true;
}
/* Reserved value in . */
return false;
}
/* Decode the system instruction op operand for e.g. AT , . */
bool
aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
aarch64_opnd_info *info,
aarch64_insn code,
const aarch64_inst *inst ATTRIBUTE_UNUSED,
aarch64_operand_error *errors ATTRIBUTE_UNUSED)
{
int i;
aarch64_insn value;
const aarch64_sys_ins_reg *sysins_ops;
/* op0:op1:CRn:CRm:op2 */
value = extract_fields (code, 0, 5,
FLD_op0, FLD_op1, FLD_CRn,
FLD_CRm, FLD_op2);
switch (info->type)
{
case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
case AARCH64_OPND_SYSREG_SR:
sysins_ops = aarch64_sys_regs_sr;
/* Let's remove op2 for rctx. Refer to comments in the definition of
aarch64_sys_regs_sr[]. */
value = value & ~(0x7);
break;
default: return false;
}
for (i = 0; sysins_ops[i].name != NULL; ++i)
if (sysins_ops[i].value == value)
{
info->sysins_op = sysins_ops + i;
DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
info->sysins_op->name,
(unsigned)info->sysins_op->value,
aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
return true;
}
return false;
}
/* Decode the memory barrier option operand for e.g. DMB