summaryrefslogtreecommitdiff
path: root/arch/x86/lib/insn-eval.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/lib/insn-eval.c')
-rw-r--r--arch/x86/lib/insn-eval.c645
1 files changed, 551 insertions, 94 deletions
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 9119d8e41f1f..e03eeec55cfe 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -13,6 +13,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/ldt.h>
+#include <asm/msr.h>
#include <asm/vm86.h>
#undef pr_fmt
@@ -20,6 +21,7 @@
enum reg_type {
REG_TYPE_RM = 0,
+ REG_TYPE_REG,
REG_TYPE_INDEX,
REG_TYPE_BASE,
};
@@ -36,8 +38,6 @@ enum reg_type {
*/
static bool is_string_insn(struct insn *insn)
{
- insn_get_opcode(insn);
-
/* All string instructions have a 1-byte opcode. */
if (insn->opcode.nbytes != 1)
return false;
@@ -53,6 +53,28 @@ static bool is_string_insn(struct insn *insn)
}
/**
+ * insn_has_rep_prefix() - Determine if instruction has a REP prefix
+ * @insn: Instruction containing the prefix to inspect
+ *
+ * Returns:
+ *
+ * true if the instruction has a REP prefix, false if not.
+ */
+bool insn_has_rep_prefix(struct insn *insn)
+{
+ insn_byte_t p;
+
+ insn_get_prefixes(insn);
+
+ for_each_insn_prefix(insn, p) {
+ if (p == 0xf2 || p == 0xf3)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* get_seg_reg_override_idx() - obtain segment register override index
* @insn: Valid instruction with segment override prefixes
*
@@ -69,15 +91,16 @@ static bool is_string_insn(struct insn *insn)
static int get_seg_reg_override_idx(struct insn *insn)
{
int idx = INAT_SEG_REG_DEFAULT;
- int num_overrides = 0, i;
+ int num_overrides = 0;
+ insn_byte_t p;
insn_get_prefixes(insn);
/* Look for any segment override prefixes. */
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_CS):
idx = INAT_SEG_REG_CS;
@@ -155,7 +178,7 @@ static bool check_seg_overrides(struct insn *insn, int regoff)
*/
static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
{
- if (user_64bit_mode(regs))
+ if (any_64bit_mode(regs))
return INAT_SEG_REG_IGNORE;
/*
* Resolve the default segment register as described in Section 3.7.4
@@ -179,6 +202,8 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
if (insn->addr_bytes == 2)
return -EINVAL;
+ fallthrough;
+
case -EDOM:
case offsetof(struct pt_regs, bx):
case offsetof(struct pt_regs, si):
@@ -205,7 +230,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
* resolve_seg_reg() - obtain segment register index
* @insn: Instruction with operands
* @regs: Register values as seen when entering kernel mode
- * @regoff: Operand offset, in pt_regs, used to deterimine segment register
+ * @regoff: Operand offset, in pt_regs, used to determine segment register
*
* Determine the segment register associated with the operands and, if
* applicable, prefixes and the instruction pointed by @insn.
@@ -264,7 +289,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff)
* which may be invalid at this point.
*/
if (regoff == offsetof(struct pt_regs, ip)) {
- if (user_64bit_mode(regs))
+ if (any_64bit_mode(regs))
return INAT_SEG_REG_IGNORE;
else
return INAT_SEG_REG_CS;
@@ -287,7 +312,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff)
* In long mode, segment override prefixes are ignored, except for
* overrides for FS and GS.
*/
- if (user_64bit_mode(regs)) {
+ if (any_64bit_mode(regs)) {
if (idx != INAT_SEG_REG_FS &&
idx != INAT_SEG_REG_GS)
idx = INAT_SEG_REG_IGNORE;
@@ -317,9 +342,9 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff)
*/
static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
{
-#ifdef CONFIG_X86_64
unsigned short sel;
+#ifdef CONFIG_X86_64
switch (seg_reg_idx) {
case INAT_SEG_REG_IGNORE:
return 0;
@@ -360,7 +385,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_GS:
return vm86regs->gs;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -378,45 +402,53 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_FS:
return (unsigned short)(regs->fs & 0xffff);
case INAT_SEG_REG_GS:
- /*
- * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS.
- * The macro below takes care of both cases.
- */
- return get_user_gs(regs);
+ savesegment(gs, sel);
+ return sel;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
#endif /* CONFIG_X86_64 */
}
-static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
- enum reg_type type)
+static const int pt_regoff[] = {
+ offsetof(struct pt_regs, ax),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, sp),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+#ifdef CONFIG_X86_64
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+ offsetof(struct pt_regs, r10),
+ offsetof(struct pt_regs, r11),
+ offsetof(struct pt_regs, r12),
+ offsetof(struct pt_regs, r13),
+ offsetof(struct pt_regs, r14),
+ offsetof(struct pt_regs, r15),
+#else
+ offsetof(struct pt_regs, ds),
+ offsetof(struct pt_regs, es),
+ offsetof(struct pt_regs, fs),
+ offsetof(struct pt_regs, gs),
+#endif
+};
+
+int pt_regs_offset(struct pt_regs *regs, int regno)
+{
+ if ((unsigned)regno < ARRAY_SIZE(pt_regoff))
+ return pt_regoff[regno];
+ return -EDOM;
+}
+
+static int get_regno(struct insn *insn, enum reg_type type)
{
+ int nr_registers = ARRAY_SIZE(pt_regoff);
int regno = 0;
- static const int regoff[] = {
- offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
- offsetof(struct pt_regs, dx),
- offsetof(struct pt_regs, bx),
- offsetof(struct pt_regs, sp),
- offsetof(struct pt_regs, bp),
- offsetof(struct pt_regs, si),
- offsetof(struct pt_regs, di),
-#ifdef CONFIG_X86_64
- offsetof(struct pt_regs, r8),
- offsetof(struct pt_regs, r9),
- offsetof(struct pt_regs, r10),
- offsetof(struct pt_regs, r11),
- offsetof(struct pt_regs, r12),
- offsetof(struct pt_regs, r13),
- offsetof(struct pt_regs, r14),
- offsetof(struct pt_regs, r15),
-#endif
- };
- int nr_registers = ARRAY_SIZE(regoff);
/*
* Don't possibly decode a 32-bit instructions as
* reading a 64-bit-only register.
@@ -439,6 +471,13 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
regno += 8;
break;
+ case REG_TYPE_REG:
+ regno = X86_MODRM_REG(insn->modrm.value);
+
+ if (X86_REX_R(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
case REG_TYPE_INDEX:
regno = X86_SIB_INDEX(insn->sib.value);
if (X86_REX_X(insn->rex_prefix.value))
@@ -477,7 +516,18 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
- return regoff[regno];
+ return regno;
+}
+
+static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ enum reg_type type)
+{
+ int regno = get_regno(insn, type);
+
+ if (regno < 0)
+ return regno;
+
+ return pt_regs_offset(regs, regno);
}
/**
@@ -485,7 +535,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
* @insn: Instruction containing ModRM byte
* @regs: Register values as seen when entering kernel mode
* @offs1: Offset of the first operand register
- * @offs2: Offset of the second opeand register, if applicable
+ * @offs2: Offset of the second operand register, if applicable
*
* Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte
* in @insn. This function is to be used with 16-bit address encodings. The
@@ -544,7 +594,7 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
* If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement-
* only addressing. This means that no registers are involved in
* computing the effective address. Thus, ensure that the first
- * register offset is invalild. The second register offset is already
+ * register offset is invalid. The second register offset is already
* invalid under the aforementioned conditions.
*/
if ((X86_MODRM_MOD(insn->modrm.value) == 0) &&
@@ -555,7 +605,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
}
/**
- * get_desc() - Obtain pointer to a segment descriptor
+ * get_desc() - Obtain contents of a segment descriptor
+ * @out: Segment descriptor contents on success
* @sel: Segment selector
*
* Given a segment selector, obtain a pointer to the segment descriptor.
@@ -563,31 +614,40 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
*
* Returns:
*
- * Pointer to segment descriptor on success.
+ * True on success, false on failure.
*
* NULL on error.
*/
-static struct desc_struct *get_desc(unsigned short sel)
+static bool get_desc(struct desc_struct *out, unsigned short sel)
{
struct desc_ptr gdt_desc = {0, 0};
unsigned long desc_base;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- struct desc_struct *desc = NULL;
+ bool success = false;
struct ldt_struct *ldt;
/* Bits [15:3] contain the index of the desired entry. */
sel >>= 3;
- mutex_lock(&current->active_mm->context.lock);
- ldt = current->active_mm->context.ldt;
- if (ldt && sel < ldt->nr_entries)
- desc = &ldt->entries[sel];
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return false;
+
+ mutex_lock(&current->mm->context.lock);
+ ldt = current->mm->context.ldt;
+ if (ldt && sel < ldt->nr_entries) {
+ *out = ldt->entries[sel];
+ success = true;
+ }
- mutex_unlock(&current->active_mm->context.lock);
+ mutex_unlock(&current->mm->context.lock);
- return desc;
+ return success;
}
#endif
native_store_gdt(&gdt_desc);
@@ -602,9 +662,10 @@ static struct desc_struct *get_desc(unsigned short sel)
desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
if (desc_base > gdt_desc.size)
- return NULL;
+ return false;
- return (struct desc_struct *)(gdt_desc.address + desc_base);
+ *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
+ return true;
}
/**
@@ -626,7 +687,7 @@ static struct desc_struct *get_desc(unsigned short sel)
*/
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
@@ -640,23 +701,27 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
*/
return (unsigned long)(sel << 4);
- if (user_64bit_mode(regs)) {
+ if (any_64bit_mode(regs)) {
/*
* Only FS or GS will have a base address, the rest of
* the segments' bases are forced to 0.
*/
unsigned long base;
- if (seg_reg_idx == INAT_SEG_REG_FS)
- rdmsrl(MSR_FS_BASE, base);
- else if (seg_reg_idx == INAT_SEG_REG_GS)
+ if (seg_reg_idx == INAT_SEG_REG_FS) {
+ rdmsrq(MSR_FS_BASE, base);
+ } else if (seg_reg_idx == INAT_SEG_REG_GS) {
/*
* swapgs was called at the kernel entry point. Thus,
* MSR_KERNEL_GS_BASE will have the user-space GS base.
*/
- rdmsrl(MSR_KERNEL_GS_BASE, base);
- else
+ if (user_mode(regs))
+ rdmsrq(MSR_KERNEL_GS_BASE, base);
+ else
+ rdmsrq(MSR_GS_BASE, base);
+ } else {
base = 0;
+ }
return base;
}
@@ -664,11 +729,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return -1L;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -1L;
- return get_desc_base(desc);
+ return get_desc_base(&desc);
}
/**
@@ -690,7 +754,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
*/
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
unsigned long limit;
short sel;
@@ -698,14 +762,13 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
if (sel < 0)
return 0;
- if (user_64bit_mode(regs) || v8086_mode(regs))
+ if (any_64bit_mode(regs) || v8086_mode(regs))
return -1L;
if (!sel)
return 0;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return 0;
/*
@@ -714,8 +777,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
* not tested when checking the segment limits. In practice,
* this means that the segment ends in (limit << 12) + 0xfff.
*/
- limit = get_desc_limit(desc);
- if (desc->g)
+ limit = get_desc_limit(&desc);
+ if (desc.g)
limit = (limit << 12) + 0xfff;
return limit;
@@ -739,7 +802,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
*/
int insn_get_code_seg_params(struct pt_regs *regs)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
if (v8086_mode(regs))
@@ -750,8 +813,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
if (sel < 0)
return sel;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -EINVAL;
/*
@@ -759,10 +821,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
* determines whether a segment contains data or code. If this is a data
* segment, return error.
*/
- if (!(desc->type & BIT(3)))
+ if (!(desc.type & BIT(3)))
return -EINVAL;
- switch ((desc->l << 1) | desc->d) {
+ switch ((desc.l << 1) | desc.d) {
case 0: /*
* Legacy mode. CS.L=0, CS.D=0. Address and operand size are
* both 16-bit.
@@ -779,7 +841,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
*/
return INSN_CODE_SEG_PARAMS(4, 8);
case 3: /* Invalid setting. CS.L=1, CS.D=1 */
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -803,6 +865,41 @@ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs)
}
/**
+ * insn_get_modrm_reg_off() - Obtain register in reg part of the ModRM byte
+ * @insn: Instruction containing the ModRM byte
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * Returns:
+ *
+ * The register indicated by the reg part of the ModRM byte. The
+ * register is obtained as an offset from the base of pt_regs.
+ */
+int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs)
+{
+ return get_reg_offset(insn, regs, REG_TYPE_REG);
+}
+
+/**
+ * insn_get_modrm_reg_ptr() - Obtain register pointer based on ModRM byte
+ * @insn: Instruction containing the ModRM byte
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * Returns:
+ *
+ * The register indicated by the reg part of the ModRM byte.
+ * The register is obtained as a pointer within pt_regs.
+ */
+unsigned long *insn_get_modrm_reg_ptr(struct insn *insn, struct pt_regs *regs)
+{
+ int offset;
+
+ offset = insn_get_modrm_reg_off(insn, regs);
+ if (offset < 0)
+ return NULL;
+ return (void *)regs + offset;
+}
+
+/**
* get_seg_base_limit() - obtain base address and limit of a segment
* @insn: Instruction. Must be valid.
* @regs: Register values as seen when entering kernel mode
@@ -876,10 +973,11 @@ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs,
static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs,
int *regoff, long *eff_addr)
{
- insn_get_modrm(insn);
+ int ret;
- if (!insn->modrm.nbytes)
- return -EINVAL;
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (X86_MODRM_MOD(insn->modrm.value) != 3)
return -EINVAL;
@@ -925,14 +1023,14 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs,
int *regoff, long *eff_addr)
{
long tmp;
+ int ret;
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
return -EINVAL;
- insn_get_modrm(insn);
-
- if (!insn->modrm.nbytes)
- return -EINVAL;
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (X86_MODRM_MOD(insn->modrm.value) > 2)
return -EINVAL;
@@ -945,7 +1043,7 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs,
* following instruction.
*/
if (*regoff == -EDOM) {
- if (user_64bit_mode(regs))
+ if (any_64bit_mode(regs))
tmp = regs->ip + insn->length;
else
tmp = 0;
@@ -1038,15 +1136,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
* get_eff_addr_sib() - Obtain referenced effective address via SIB
* @insn: Instruction. Must be valid.
* @regs: Register values as seen when entering kernel mode
- * @regoff: Obtained operand offset, in pt_regs, associated with segment
+ * @base_offset: Obtained operand offset, in pt_regs, associated with segment
* @eff_addr: Obtained effective address
*
* Obtain the effective address referenced by the SIB byte of @insn. After
* identifying the registers involved in the indexed, register-indirect memory
* reference, its value is obtained from the operands in @regs. The computed
* address is stored @eff_addr. Also, the register operand that indicates the
- * associated segment is stored in @regoff, this parameter can later be used to
- * determine such segment.
+ * associated segment is stored in @base_offset; this parameter can later be
+ * used to determine such segment.
*
* Returns:
*
@@ -1054,18 +1152,21 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
* @base_offset will have a register, as an offset from the base of pt_regs,
* that can be used to resolve the associated segment.
*
- * -EINVAL on error.
+ * Negative value on error.
*/
static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
int *base_offset, long *eff_addr)
{
long base, indx;
int indx_offset;
+ int ret;
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
return -EINVAL;
- insn_get_modrm(insn);
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (!insn->modrm.nbytes)
return -EINVAL;
@@ -1073,7 +1174,9 @@ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
if (X86_MODRM_MOD(insn->modrm.value) > 2)
return -EINVAL;
- insn_get_sib(insn);
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
if (!insn->sib.nbytes)
return -EINVAL;
@@ -1142,8 +1245,8 @@ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs)
short eff_addr;
long tmp;
- insn_get_modrm(insn);
- insn_get_displacement(insn);
+ if (insn_get_displacement(insn))
+ goto out;
if (insn->addr_bytes != 2)
goto out;
@@ -1247,7 +1350,7 @@ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs)
* After computed, the effective address is treated as an unsigned
* quantity.
*/
- if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit))
+ if (!any_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit))
goto out;
/*
@@ -1351,6 +1454,9 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
if (!insn || !regs)
return (void __user *)-1L;
+ if (insn_get_opcode(insn))
+ return (void __user *)-1L;
+
switch (insn->addr_bytes) {
case 2:
return get_addr_ref_16(insn, regs);
@@ -1362,3 +1468,354 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
return (void __user *)-1L;
}
}
+
+int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip)
+{
+ unsigned long seg_base = 0;
+
+ /*
+ * If not in user-space long mode, a custom code segment could be in
+ * use. This is true in protected mode (if the process defined a local
+ * descriptor table), or virtual-8086 mode. In most of the cases
+ * seg_base will be zero as in USER_CS.
+ */
+ if (!user_64bit_mode(regs)) {
+ seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+ if (seg_base == -1L)
+ return -EINVAL;
+ }
+
+ *ip = seg_base + regs->ip;
+
+ return 0;
+}
+
+/**
+ * insn_fetch_from_user() - Copy instruction bytes from user-space memory
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Array to store the fetched instruction
+ *
+ * Gets the linear address of the instruction and copies the instruction bytes
+ * to the buf.
+ *
+ * Returns:
+ *
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated
+ */
+int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
+{
+ unsigned long ip;
+ int not_copied;
+
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
+
+ not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
+
+ return MAX_INSN_SIZE - not_copied;
+}
+
+/**
+ * insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
+ * while in atomic code
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Array to store the fetched instruction
+ *
+ * Gets the linear address of the instruction and copies the instruction bytes
+ * to the buf. This function must be used in atomic context.
+ *
+ * Returns:
+ *
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated.
+ */
+int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
+{
+ unsigned long ip;
+ int not_copied;
+
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
+
+ not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
+
+ return MAX_INSN_SIZE - not_copied;
+}
+
+/**
+ * insn_decode_from_regs() - Decode an instruction
+ * @insn: Structure to store decoded instruction
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Buffer containing the instruction bytes
+ * @buf_size: Number of instruction bytes available in buf
+ *
+ * Decodes the instruction provided in buf and stores the decoding results in
+ * insn. Also determines the correct address and operand sizes.
+ *
+ * Returns:
+ *
+ * True if instruction was decoded, False otherwise.
+ */
+bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE], int buf_size)
+{
+ int seg_defs;
+
+ insn_init(insn, buf, buf_size, user_64bit_mode(regs));
+
+ /*
+ * Override the default operand and address sizes with what is specified
+ * in the code segment descriptor. The instruction decoder only sets
+ * the address size it to either 4 or 8 address bytes and does nothing
+ * for the operand bytes. This OK for most of the cases, but we could
+ * have special cases where, for instance, a 16-bit code segment
+ * descriptor is used.
+ * If there is an address override prefix, the instruction decoder
+ * correctly updates these values, even for 16-bit defaults.
+ */
+ seg_defs = insn_get_code_seg_params(regs);
+ if (seg_defs == -EINVAL)
+ return false;
+
+ insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
+ insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
+
+ if (insn_get_length(insn))
+ return false;
+
+ if (buf_size < insn->length)
+ return false;
+
+ return true;
+}
+
+/**
+ * insn_decode_mmio() - Decode a MMIO instruction
+ * @insn: Structure to store decoded instruction
+ * @bytes: Returns size of memory operand
+ *
+ * Decodes instruction that used for Memory-mapped I/O.
+ *
+ * Returns:
+ *
+ * Type of the instruction. Size of the memory operand is stored in
+ * @bytes. If decode failed, INSN_MMIO_DECODE_FAILED returned.
+ */
+enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
+{
+ enum insn_mmio_type type = INSN_MMIO_DECODE_FAILED;
+
+ *bytes = 0;
+
+ if (insn_get_opcode(insn))
+ return INSN_MMIO_DECODE_FAILED;
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x88: /* MOV m8,r8 */
+ *bytes = 1;
+ fallthrough;
+ case 0x89: /* MOV m16/m32/m64, r16/m32/m64 */
+ if (!*bytes)
+ *bytes = insn->opnd_bytes;
+ type = INSN_MMIO_WRITE;
+ break;
+
+ case 0xc6: /* MOV m8, imm8 */
+ *bytes = 1;
+ fallthrough;
+ case 0xc7: /* MOV m16/m32/m64, imm16/imm32/imm64 */
+ if (!*bytes)
+ *bytes = insn->opnd_bytes;
+ type = INSN_MMIO_WRITE_IMM;
+ break;
+
+ case 0x8a: /* MOV r8, m8 */
+ *bytes = 1;
+ fallthrough;
+ case 0x8b: /* MOV r16/r32/r64, m16/m32/m64 */
+ if (!*bytes)
+ *bytes = insn->opnd_bytes;
+ type = INSN_MMIO_READ;
+ break;
+
+ case 0xa4: /* MOVS m8, m8 */
+ *bytes = 1;
+ fallthrough;
+ case 0xa5: /* MOVS m16/m32/m64, m16/m32/m64 */
+ if (!*bytes)
+ *bytes = insn->opnd_bytes;
+ type = INSN_MMIO_MOVS;
+ break;
+
+ case 0x0f: /* Two-byte instruction */
+ switch (insn->opcode.bytes[1]) {
+ case 0xb6: /* MOVZX r16/r32/r64, m8 */
+ *bytes = 1;
+ fallthrough;
+ case 0xb7: /* MOVZX r32/r64, m16 */
+ if (!*bytes)
+ *bytes = 2;
+ type = INSN_MMIO_READ_ZERO_EXTEND;
+ break;
+
+ case 0xbe: /* MOVSX r16/r32/r64, m8 */
+ *bytes = 1;
+ fallthrough;
+ case 0xbf: /* MOVSX r32/r64, m16 */
+ if (!*bytes)
+ *bytes = 2;
+ type = INSN_MMIO_READ_SIGN_EXTEND;
+ break;
+ }
+ break;
+ }
+
+ return type;
+}
+
+/*
+ * Recognise typical NOP patterns for both 32bit and 64bit.
+ *
+ * Notably:
+ * - NOP, but not: REP NOP aka PAUSE
+ * - NOPL
+ * - MOV %reg, %reg
+ * - LEA 0(%reg),%reg
+ * - JMP +0
+ *
+ * Must not have false-positives; instructions identified as a NOP might be
+ * emulated as a NOP (uprobe) or Run Length Encoded in a larger NOP
+ * (alternatives).
+ *
+ * False-negatives are fine; need not be exhaustive.
+ */
+bool insn_is_nop(struct insn *insn)
+{
+ u8 b3 = 0, x3 = 0, r3 = 0;
+ u8 b4 = 0, x4 = 0, r4 = 0, m = 0;
+ u8 modrm, modrm_mod, modrm_reg, modrm_rm;
+ u8 sib = 0, sib_scale, sib_index, sib_base;
+ u8 nrex, rex;
+ u8 p, rep = 0;
+
+ if ((nrex = insn->rex_prefix.nbytes)) {
+ rex = insn->rex_prefix.bytes[nrex-1];
+
+ r3 = !!X86_REX_R(rex);
+ x3 = !!X86_REX_X(rex);
+ b3 = !!X86_REX_B(rex);
+ if (nrex > 1) {
+ r4 = !!X86_REX2_R(rex);
+ x4 = !!X86_REX2_X(rex);
+ b4 = !!X86_REX2_B(rex);
+ m = !!X86_REX2_M(rex);
+ }
+
+ } else if (insn->vex_prefix.nbytes) {
+ /*
+ * Ignore VEX encoded NOPs
+ */
+ return false;
+ }
+
+ if (insn->modrm.nbytes) {
+ modrm = insn->modrm.bytes[0];
+ modrm_mod = X86_MODRM_MOD(modrm);
+ modrm_reg = X86_MODRM_REG(modrm) + 8*r3 + 16*r4;
+ modrm_rm = X86_MODRM_RM(modrm) + 8*b3 + 16*b4;
+ modrm = 1;
+ }
+
+ if (insn->sib.nbytes) {
+ sib = insn->sib.bytes[0];
+ sib_scale = X86_SIB_SCALE(sib);
+ sib_index = X86_SIB_INDEX(sib) + 8*x3 + 16*x4;
+ sib_base = X86_SIB_BASE(sib) + 8*b3 + 16*b4;
+ sib = 1;
+
+ modrm_rm = sib_base;
+ }
+
+ for_each_insn_prefix(insn, p) {
+ if (p == 0xf3) /* REPE */
+ rep = 1;
+ }
+
+ /*
+ * Opcode map munging:
+ *
+ * REX2: 0 - single byte opcode
+ * 1 - 0f second byte opcode
+ */
+ switch (m) {
+ case 0: break;
+ case 1: insn->opcode.value <<= 8;
+ insn->opcode.value |= 0x0f;
+ break;
+ default:
+ return false;
+ }
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x0f: /* 2nd byte */
+ break;
+
+ case 0x89: /* MOV */
+ if (modrm_mod != 3) /* register-direct */
+ return false;
+
+ /* native size */
+ if (insn->opnd_bytes != 4 * (1 + insn->x86_64))
+ return false;
+
+ return modrm_reg == modrm_rm; /* MOV %reg, %reg */
+
+ case 0x8d: /* LEA */
+ if (modrm_mod == 0 || modrm_mod == 3) /* register-indirect with disp */
+ return false;
+
+ /* native size */
+ if (insn->opnd_bytes != 4 * (1 + insn->x86_64))
+ return false;
+
+ if (insn->displacement.value != 0)
+ return false;
+
+ if (sib && (sib_scale != 0 || sib_index != 4)) /* (%reg, %eiz, 1) */
+ return false;
+
+ for_each_insn_prefix(insn, p) {
+ if (p != 0x3e) /* DS */
+ return false;
+ }
+
+ return modrm_reg == modrm_rm; /* LEA 0(%reg), %reg */
+
+ case 0x90: /* NOP */
+ if (b3 || b4) /* XCHG %r{8,16,24},%rax */
+ return false;
+
+ if (rep) /* REP NOP := PAUSE */
+ return false;
+
+ return true;
+
+ case 0xe9: /* JMP.d32 */
+ case 0xeb: /* JMP.d8 */
+ return insn->immediate.value == 0; /* JMP +0 */
+
+ default:
+ return false;
+ }
+
+ switch (insn->opcode.bytes[1]) {
+ case 0x1f:
+ return modrm_reg == 0; /* 0f 1f /0 -- NOPL */
+
+ default:
+ return false;
+ }
+}