summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.vnet.ibm.com>2017-02-08 08:59:56 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-03-24 16:11:30 +0100
commit2fa5ed7d87074268b5d29cdff6c4f81f51666885 (patch)
tree036ff1a54360054615b448b1c97b66975c0c2f02 /arch/s390/mm
parent0b7bb6af1d734b15dbebec942767708e8ca40ca3 (diff)
s390/mm: Remove double gaddr calculation when notifying
ptep_notify and gmap_shadow_notify both need a guest address and therefore retrieve them from the available virtual host address. As they operate on the same guest address, we can calculate it once and then pass it on. As a gmap normally has more than one shadow gmap, we also do not recalculate for each of them any more. Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c35
1 files changed, 14 insertions, 21 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index a07b1ec1391d..ffb55c935eda 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2004,20 +2004,12 @@ EXPORT_SYMBOL_GPL(gmap_shadow_page);
* Called with sg->parent->shadow_lock.
*/
static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
- unsigned long offset, pte_t *pte)
+ unsigned long gaddr, pte_t *pte)
{
struct gmap_rmap *rmap, *rnext, *head;
- unsigned long gaddr, start, end, bits, raddr;
- unsigned long *table;
+ unsigned long start, end, bits, raddr;
BUG_ON(!gmap_is_shadow(sg));
- spin_lock(&sg->parent->guest_table_lock);
- table = radix_tree_lookup(&sg->parent->host_to_guest,
- vmaddr >> PMD_SHIFT);
- gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
- spin_unlock(&sg->parent->guest_table_lock);
- if (!table)
- return;
spin_lock(&sg->guest_table_lock);
if (sg->removed) {
@@ -2076,7 +2068,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
pte_t *pte, unsigned long bits)
{
- unsigned long offset, gaddr;
+ unsigned long offset, gaddr = 0;
unsigned long *table;
struct gmap *gmap, *sg, *next;
@@ -2084,22 +2076,23 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
offset = offset * (4096 / sizeof(pte_t));
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
- if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
- spin_lock(&gmap->shadow_lock);
- list_for_each_entry_safe(sg, next,
- &gmap->children, list)
- gmap_shadow_notify(sg, vmaddr, offset, pte);
- spin_unlock(&gmap->shadow_lock);
- }
- if (!(bits & PGSTE_IN_BIT))
- continue;
spin_lock(&gmap->guest_table_lock);
table = radix_tree_lookup(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (table)
gaddr = __gmap_segment_gaddr(table) + offset;
spin_unlock(&gmap->guest_table_lock);
- if (table)
+ if (!table)
+ continue;
+
+ if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
+ spin_lock(&gmap->shadow_lock);
+ list_for_each_entry_safe(sg, next,
+ &gmap->children, list)
+ gmap_shadow_notify(sg, vmaddr, gaddr, pte);
+ spin_unlock(&gmap->shadow_lock);
+ }
+ if (bits & PGSTE_IN_BIT)
gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
}
rcu_read_unlock();