summaryrefslogtreecommitdiff
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c83
1 files changed, 38 insertions, 45 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d954b71c4f9c..644f746e167a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -103,26 +103,39 @@ static inline unsigned char swap_count(unsigned char ent)
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
}
+/* Reclaim the swap entry anyway if possible */
+#define TTRS_ANYWAY 0x1
+/*
+ * Reclaim the swap entry if there are no more mappings of the
+ * corresponding page
+ */
+#define TTRS_UNMAPPED 0x2
+/* Reclaim the swap entry if swap is getting full*/
+#define TTRS_FULL 0x4
+
/* returns 1 if swap entry is freed */
-static int
-__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
+static int __try_to_reclaim_swap(struct swap_info_struct *si,
+ unsigned long offset, unsigned long flags)
{
swp_entry_t entry = swp_entry(si->type, offset);
struct page *page;
int ret = 0;
- page = find_get_page(swap_address_space(entry), swp_offset(entry));
+ page = find_get_page(swap_address_space(entry), offset);
if (!page)
return 0;
/*
- * This function is called from scan_swap_map() and it's called
- * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
- * We have to use trylock for avoiding deadlock. This is a special
+ * When this function is called from scan_swap_map_slots() and it's
+ * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
+ * here. We have to use trylock for avoiding deadlock. This is a special
* case and you should use try_to_free_swap() with explicit lock_page()
* in usual operations.
*/
if (trylock_page(page)) {
- ret = try_to_free_swap(page);
+ if ((flags & TTRS_ANYWAY) ||
+ ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
+ ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
+ ret = try_to_free_swap(page);
unlock_page(page);
}
put_page(page);
@@ -780,7 +793,7 @@ checks:
int swap_was_freed;
unlock_cluster(ci);
spin_unlock(&si->lock);
- swap_was_freed = __try_to_reclaim_swap(si, offset);
+ swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed)
@@ -919,6 +932,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
struct swap_cluster_info *ci;
ci = lock_cluster(si, offset);
+ memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
cluster_set_count_flag(ci, 0, 0);
free_cluster(si, idx);
unlock_cluster(ci);
@@ -989,7 +1003,7 @@ start_over:
goto nextsi;
}
if (size == SWAPFILE_CLUSTER) {
- if (!(si->flags & SWP_FILE))
+ if (!(si->flags & SWP_FS))
n_ret = swap_alloc_cluster(si, swp_entries);
} else
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
@@ -1169,6 +1183,8 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
ci = lock_cluster_or_swap_info(p, offset);
usage = __swap_entry_free_locked(p, offset, usage);
unlock_cluster_or_swap_info(p, ci);
+ if (!usage)
+ free_swap_slot(entry);
return usage;
}
@@ -1199,10 +1215,8 @@ void swap_free(swp_entry_t entry)
struct swap_info_struct *p;
p = _swap_info_get(entry);
- if (p) {
- if (!__swap_entry_free(p, entry, 1))
- free_swap_slot(entry);
- }
+ if (p)
+ __swap_entry_free(p, entry, 1);
}
/*
@@ -1237,9 +1251,6 @@ void put_swap_page(struct page *page, swp_entry_t entry)
if (free_entries == SWAPFILE_CLUSTER) {
unlock_cluster_or_swap_info(si, ci);
spin_lock(&si->lock);
- ci = lock_cluster(si, offset);
- memset(map, 0, SWAPFILE_CLUSTER);
- unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
swap_free_cluster(si, idx);
spin_unlock(&si->lock);
@@ -1612,7 +1623,6 @@ int try_to_free_swap(struct page *page)
int free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct *p;
- struct page *page = NULL;
unsigned char count;
if (non_swap_entry(entry))
@@ -1622,30 +1632,9 @@ int free_swap_and_cache(swp_entry_t entry)
if (p) {
count = __swap_entry_free(p, entry, 1);
if (count == SWAP_HAS_CACHE &&
- !swap_page_trans_huge_swapped(p, entry)) {
- page = find_get_page(swap_address_space(entry),
- swp_offset(entry));
- if (page && !trylock_page(page)) {
- put_page(page);
- page = NULL;
- }
- } else if (!count)
- free_swap_slot(entry);
- }
- if (page) {
- /*
- * Not mapped elsewhere, or swap space full? Free it!
- * Also recheck PageSwapCache now page is locked (above).
- */
- if (PageSwapCache(page) && !PageWriteback(page) &&
- (!page_mapped(page) || mem_cgroup_swap_full(page)) &&
- !swap_page_trans_huge_swapped(p, entry)) {
- page = compound_head(page);
- delete_from_swap_cache(page);
- SetPageDirty(page);
- }
- unlock_page(page);
- put_page(page);
+ !swap_page_trans_huge_swapped(p, entry))
+ __try_to_reclaim_swap(p, swp_offset(entry),
+ TTRS_UNMAPPED | TTRS_FULL);
}
return p != NULL;
}
@@ -2310,12 +2299,13 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
kfree(se);
}
- if (sis->flags & SWP_FILE) {
+ if (sis->flags & SWP_ACTIVATED) {
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
- sis->flags &= ~SWP_FILE;
- mapping->a_ops->swap_deactivate(swap_file);
+ sis->flags &= ~SWP_ACTIVATED;
+ if (mapping->a_ops->swap_deactivate)
+ mapping->a_ops->swap_deactivate(swap_file);
}
}
@@ -2364,6 +2354,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
+EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
@@ -2411,8 +2402,10 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
if (mapping->a_ops->swap_activate) {
ret = mapping->a_ops->swap_activate(sis, swap_file, span);
+ if (ret >= 0)
+ sis->flags |= SWP_ACTIVATED;
if (!ret) {
- sis->flags |= SWP_FILE;
+ sis->flags |= SWP_FS;
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
}