summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/guest.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/guest.c')
-rw-r--r--arch/arm64/kvm/guest.c346
1 files changed, 261 insertions, 85 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2fff06114a8f..aaf1d4939739 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -18,39 +18,49 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <kvm/arm_psci.h>
+#include <kvm/arm_hypercalls.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_host.h>
+#include <asm/kvm_nested.h>
#include <asm/sigcontext.h>
#include "trace.h"
-#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
-#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT(halt_successful_poll),
- VCPU_STAT(halt_attempted_poll),
- VCPU_STAT(halt_poll_invalid),
- VCPU_STAT(halt_wakeup),
- VCPU_STAT(hvc_exit_stat),
- VCPU_STAT(wfe_exit_stat),
- VCPU_STAT(wfi_exit_stat),
- VCPU_STAT(mmio_exit_user),
- VCPU_STAT(mmio_exit_kernel),
- VCPU_STAT(exits),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
};
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_user),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, exits)
+};
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
+};
static bool core_reg_offset_is_vreg(u64 off)
{
@@ -108,19 +118,69 @@ static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
return size;
}
-static int validate_core_offset(const struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
+static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size = core_reg_size_from_offset(vcpu, off);
if (size < 0)
- return -EINVAL;
+ return NULL;
if (KVM_REG_SIZE(reg->id) != size)
- return -EINVAL;
+ return NULL;
- return 0;
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
+ off /= 2;
+ return &vcpu->arch.ctxt.regs.regs[off];
+
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ return &vcpu->arch.ctxt.regs.sp;
+
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ return &vcpu->arch.ctxt.regs.pc;
+
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ return &vcpu->arch.ctxt.regs.pstate;
+
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
+
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
+ return &vcpu->arch.ctxt.spsr_abt;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
+ return &vcpu->arch.ctxt.spsr_und;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
+ return &vcpu->arch.ctxt.spsr_irq;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
+ return &vcpu->arch.ctxt.spsr_fiq;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
+ off /= 4;
+ return &vcpu->arch.ctxt.fp_regs.vregs[off];
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ return &vcpu->arch.ctxt.fp_regs.fpsr;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ return &vcpu->arch.ctxt.fp_regs.fpcr;
+
+ default:
+ return NULL;
+ }
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -132,8 +192,8 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
* off the index in the "array".
*/
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
- int nr_regs = sizeof(*regs) / sizeof(__u32);
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
+ void *addr;
u32 off;
/* Our ID is an index into the kvm_regs struct. */
@@ -142,10 +202,11 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(vcpu, reg))
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
return -EINVAL;
- if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
+ if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
@@ -154,10 +215,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
- int nr_regs = sizeof(*regs) / sizeof(__u32);
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
__uint128_t tmp;
- void *valp = &tmp;
+ void *valp = &tmp, *addr;
u64 off;
int err = 0;
@@ -167,7 +227,8 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(vcpu, reg))
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -182,7 +243,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
- if (!system_supports_32bit_el0())
+ if (!kvm_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ:
@@ -193,6 +254,11 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
+ case PSR_MODE_EL2h:
+ case PSR_MODE_EL2t:
+ if (!vcpu_has_nv(vcpu))
+ return -EINVAL;
+ fallthrough;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
@@ -205,7 +271,36 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
}
- memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+ memcpy(addr, valp, KVM_REG_SIZE(reg->id));
+
+ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ int i, nr_reg;
+
+ switch (*vcpu_cpsr(vcpu)) {
+ /*
+ * Either we are dealing with user mode, and only the
+ * first 15 registers (+ PC) must be narrowed to 32bit.
+ * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
+ */
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_SYS:
+ nr_reg = 15;
+ break;
+
+ /*
+ * Otherwise, this is a privileged mode, and *all* the
+ * registers must be narrowed to 32bit.
+ */
+ default:
+ nr_reg = 31;
+ break;
+ }
+
+ for (i = 0; i < nr_reg; i++)
+ vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
+
+ *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
+ }
out:
return err;
}
@@ -227,7 +322,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
memset(vqs, 0, sizeof(vqs));
- max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ max_vq = vcpu_sve_max_vq(vcpu);
for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
if (sve_vq_available(vq))
vqs[vq_word(vq)] |= vq_mask(vq);
@@ -266,7 +361,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
/*
* Vector lengths supported by the host can't currently be
* hidden from the guest individually: instead we can only set a
- * maxmium via ZCR_EL2.LEN. So, make sure the available vector
+ * maximum via ZCR_EL2.LEN. So, make sure the available vector
* lengths match the set requested exactly up to the requested
* maximum:
*/
@@ -336,7 +431,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
unsigned int reg_num;
unsigned int reqoffset, reqlen; /* User-requested offset and length */
- unsigned int maxlen; /* Maxmimum permitted length */
+ unsigned int maxlen; /* Maximum permitted length */
size_t sve_state_size;
@@ -355,7 +450,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
- vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
@@ -365,7 +460,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
- vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
@@ -495,11 +590,16 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
return copy_core_reg_indices(vcpu, NULL);
}
-/**
- * ARM64 versions of the TIMER registers, always available on arm64
- */
+static const u64 timer_reg_list[] = {
+ KVM_REG_ARM_TIMER_CTL,
+ KVM_REG_ARM_TIMER_CNT,
+ KVM_REG_ARM_TIMER_CVAL,
+ KVM_REG_ARM_PTIMER_CTL,
+ KVM_REG_ARM_PTIMER_CNT,
+ KVM_REG_ARM_PTIMER_CVAL,
+};
-#define NUM_TIMER_REGS 3
+#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
static bool is_timer_reg(u64 index)
{
@@ -507,6 +607,9 @@ static bool is_timer_reg(u64 index)
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
+ case KVM_REG_ARM_PTIMER_CTL:
+ case KVM_REG_ARM_PTIMER_CNT:
+ case KVM_REG_ARM_PTIMER_CVAL:
return true;
}
return false;
@@ -514,14 +617,11 @@ static bool is_timer_reg(u64 index)
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
- return -EFAULT;
+ for (int i = 0; i < NUM_TIMER_REGS; i++) {
+ if (put_user(timer_reg_list[i], uindices))
+ return -EFAULT;
+ uindices++;
+ }
return 0;
}
@@ -667,7 +767,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
- case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
+ case KVM_REG_ARM_FW:
+ case KVM_REG_ARM_FW_FEAT_BMAP:
+ return kvm_arm_get_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
}
@@ -685,7 +787,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
- case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
+ case KVM_REG_ARM_FW:
+ case KVM_REG_ARM_FW_FEAT_BMAP:
+ return kvm_arm_set_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
}
@@ -711,7 +815,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
- events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+ events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
@@ -733,7 +837,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr) {
- if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
@@ -750,7 +854,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
return 0;
}
-int __attribute_const__ kvm_target_cpu(void)
+u32 __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_number = read_cpuid_part_number();
@@ -770,7 +874,7 @@ int __attribute_const__ kvm_target_cpu(void)
break;
case ARM_CPU_IMP_APM:
switch (part_number) {
- case APM_CPU_PART_POTENZA:
+ case APM_CPU_PART_XGENE:
return KVM_ARM_TARGET_XGENE_POTENZA;
}
break;
@@ -780,26 +884,6 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8;
}
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
-{
- int target = kvm_target_cpu();
-
- if (target < 0)
- return -ENODEV;
-
- memset(init, 0, sizeof(*init));
-
- /*
- * For now, we don't return any features.
- * In future, we might use features to return target
- * specific features available for the preferred
- * target type.
- */
- init->target = (__u32)target;
-
- return 0;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
@@ -816,11 +900,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return -EINVAL;
}
-#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
- KVM_GUESTDBG_USE_SW_BP | \
- KVM_GUESTDBG_USE_HW | \
- KVM_GUESTDBG_SINGLESTEP)
-
/**
* kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
* @kvm: pointer to the KVM struct
@@ -854,6 +933,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
} else {
/* If not enabled clear all flags */
vcpu->guest_debug = 0;
+ vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
}
out:
@@ -867,7 +947,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL:
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
@@ -928,3 +1010,97 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
+
+int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags)
+{
+ gpa_t guest_ipa = copy_tags->guest_ipa;
+ size_t length = copy_tags->length;
+ void __user *tags = copy_tags->addr;
+ gpa_t gfn;
+ bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
+ int ret = 0;
+
+ if (!kvm_has_mte(kvm))
+ return -EINVAL;
+
+ if (copy_tags->reserved[0] || copy_tags->reserved[1])
+ return -EINVAL;
+
+ if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
+ return -EINVAL;
+
+ if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
+ return -EINVAL;
+
+ /* Lengths above INT_MAX cannot be represented in the return value */
+ if (length > INT_MAX)
+ return -EINVAL;
+
+ gfn = gpa_to_gfn(guest_ipa);
+
+ mutex_lock(&kvm->slots_lock);
+
+ while (length > 0) {
+ kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
+ void *maddr;
+ unsigned long num_tags;
+ struct page *page;
+
+ if (is_error_noslot_pfn(pfn)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ page = pfn_to_online_page(pfn);
+ if (!page) {
+ /* Reject ZONE_DEVICE memory */
+ ret = -EFAULT;
+ goto out;
+ }
+ maddr = page_address(page);
+
+ if (!write) {
+ if (page_mte_tagged(page))
+ num_tags = mte_copy_tags_to_user(tags, maddr,
+ MTE_GRANULES_PER_PAGE);
+ else
+ /* No tags in memory, so write zeros */
+ num_tags = MTE_GRANULES_PER_PAGE -
+ clear_user(tags, MTE_GRANULES_PER_PAGE);
+ kvm_release_pfn_clean(pfn);
+ } else {
+ /*
+ * Only locking to serialise with a concurrent
+ * set_pte_at() in the VMM but still overriding the
+ * tags, hence ignoring the return value.
+ */
+ try_page_mte_tagging(page);
+ num_tags = mte_copy_tags_from_user(maddr, tags,
+ MTE_GRANULES_PER_PAGE);
+
+ /* uaccess failed, don't leave stale tags */
+ if (num_tags != MTE_GRANULES_PER_PAGE)
+ mte_clear_page_tags(maddr);
+ set_page_mte_tagged(page);
+
+ kvm_release_pfn_dirty(pfn);
+ }
+
+ if (num_tags != MTE_GRANULES_PER_PAGE) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ gfn++;
+ tags += num_tags;
+ length -= PAGE_SIZE;
+ }
+
+out:
+ mutex_unlock(&kvm->slots_lock);
+ /* If some data has been copied report the number of bytes copied */
+ if (length != copy_tags->length)
+ return copy_tags->length - length;
+ return ret;
+}