summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lustre/osc/osc_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/osc/osc_cache.c')
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c279
1 files changed, 101 insertions, 178 deletions
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index d011135802d5..4bbe219add98 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -44,7 +44,7 @@ static int extent_debug; /* set it to be true for more debug */
static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state);
+ enum osc_extent_state state);
static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct osc_async_page *oap, int sent, int rc);
static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
@@ -177,7 +177,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
{
struct osc_object *obj = ext->oe_obj;
struct osc_async_page *oap;
- int page_count;
+ size_t page_count;
int rc = 0;
if (!osc_object_is_locked(obj)) {
@@ -632,7 +632,7 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
*/
static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int *grants)
+ unsigned int *grants)
{
struct client_obd *cli = osc_cli(obj);
struct osc_lock *olck;
@@ -643,10 +643,10 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_extent *found = NULL;
pgoff_t chunk;
pgoff_t max_end;
- int max_pages; /* max_pages_per_rpc */
- int chunksize;
+ unsigned int max_pages; /* max_pages_per_rpc */
+ unsigned int chunksize;
int ppc_bits; /* pages per chunk bits */
- int chunk_mask;
+ pgoff_t chunk_mask;
int rc;
cur = osc_extent_alloc(obj);
@@ -700,8 +700,8 @@ restart:
if (!ext)
ext = first_extent(obj);
while (ext) {
- loff_t ext_chk_start = ext->oe_start >> ppc_bits;
- loff_t ext_chk_end = ext->oe_end >> ppc_bits;
+ pgoff_t ext_chk_start = ext->oe_start >> ppc_bits;
+ pgoff_t ext_chk_end = ext->oe_end >> ppc_bits;
LASSERT(sanity_check_nolock(ext) == 0);
if (chunk > ext_chk_end + 1)
@@ -913,7 +913,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
return 0;
}
-static int extent_wait_cb(struct osc_extent *ext, int state)
+static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state)
{
int ret;
@@ -928,7 +928,7 @@ static int extent_wait_cb(struct osc_extent *ext, int state)
* Wait for the extent's state to become @state.
*/
static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state)
+ enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
@@ -958,8 +958,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s: wait ext to %d timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ "%s: wait ext to %u timedout, recovery in progress?\n",
+ osc_export(obj)->exp_obd->obd_name, state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -1099,7 +1099,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
struct osc_async_page *oap;
struct osc_async_page *last = NULL;
struct osc_object *obj = ext->oe_obj;
- int page_count = 0;
+ unsigned int page_count = 0;
int rc;
/* we're going to grab page lock, so object lock must not be taken. */
@@ -1140,9 +1140,11 @@ static int osc_extent_make_ready(const struct lu_env *env,
* the size of file.
*/
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
- last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
- LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
+ int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
+
+ LASSERT(last_oap_count > 0);
+ LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
+ last->oap_count = last_oap_count;
spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&last->oap_lock);
@@ -1174,7 +1176,8 @@ static int osc_extent_make_ready(const struct lu_env *env,
* called to expand the extent for the same IO. To expand the extent, the
* page index must be in the same or next chunk of ext->oe_end.
*/
-static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
+static int osc_extent_expand(struct osc_extent *ext, pgoff_t index,
+ unsigned int *grants)
{
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
@@ -1183,7 +1186,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
- int chunksize = 1 << cli->cl_chunkbits;
+ unsigned int chunksize = 1 << cli->cl_chunkbits;
int rc = 0;
LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
@@ -1361,7 +1364,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
if (rc == 0 && srvlock) {
struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- int bytes = oap->oap_count;
+ size_t bytes = oap->oap_count;
if (crt == CRT_READ)
stats->os_lockless_reads += bytes;
@@ -1383,18 +1386,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
- "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
- "reserved: %ld, flight: %d } lru {in list: %d, " \
- "left: %d, waiters: %d }" fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
+ "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
__tmp->cl_import->imp_obd->obd_name, \
- __tmp->cl_dirty, __tmp->cl_dirty_max, \
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
- atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
+ __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
+ atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
__tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- atomic_read(&__tmp->cl_lru_in_list), \
- atomic_read(&__tmp->cl_lru_busy), \
+ atomic_long_read(&__tmp->cl_lru_in_list), \
+ atomic_long_read(&__tmp->cl_lru_busy), \
atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
@@ -1404,8 +1405,8 @@ static void osc_consume_write_grant(struct client_obd *cli,
{
assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_SIZE;
+ atomic_long_inc(&obd_dirty_pages);
+ cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
PAGE_SIZE, pga, pga->pg);
@@ -1424,12 +1425,12 @@ static void osc_release_write_grant(struct client_obd *cli,
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_SIZE;
+ atomic_long_dec(&obd_dirty_pages);
+ cli->cl_dirty_pages--;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_SIZE;
+ atomic_long_dec(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit--;
}
}
@@ -1494,11 +1495,11 @@ static void osc_unreserve_grant(struct client_obd *cli,
static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
unsigned int lost_grant)
{
- int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ unsigned long grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
spin_lock(&cli->cl_loi_list_lock);
- atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_SHIFT;
+ atomic_long_sub(nr_pages, &obd_dirty_pages);
+ cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
@@ -1511,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty);
+ cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT);
}
/**
@@ -1535,19 +1536,18 @@ static int osc_enter_cache_try(struct client_obd *cli,
{
int rc;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
- if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+ if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
+ atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += PAGE_SIZE;
- atomic_inc(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit++;
+ atomic_long_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
rc = 1;
@@ -1581,11 +1581,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi;
int rc = -EDQUOT;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(AT_OFF ? obd_timeout : at_max),
+ NULL, LWI_ON_SIGNAL_NOOP, NULL);
+
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
spin_lock(&cli->cl_loi_list_lock);
@@ -1593,14 +1595,16 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream
*/
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
+ !cli->cl_dirty_max_pages || cli->cl_ar.ar_force_sync ||
+ loi->loi_ar.ar_force_sync) {
+ OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n");
rc = -EDQUOT;
goto out;
}
/* Hopefully normal case - cache space and write credits available */
if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n");
rc = 0;
goto out;
}
@@ -1615,7 +1619,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
init_waitqueue_head(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_grant = bytes;
- while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
+ while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
spin_unlock(&cli->cl_loi_list_lock);
@@ -1629,32 +1633,49 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
spin_lock(&cli->cl_loi_list_lock);
- /* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
- if (rc == -ETIMEDOUT) {
- OSC_DUMP_GRANT(D_ERROR, cli,
- "try to reserve %d.\n", bytes);
- osc_extent_tree_dump(D_ERROR, osc);
- rc = -EDQUOT;
- }
-
+ /* l_wait_event is interrupted by signal, or timed out */
list_del_init(&ocw.ocw_entry);
- goto out;
+ break;
}
-
LASSERT(list_empty(&ocw.ocw_entry));
rc = ocw.ocw_rc;
if (rc != -EDQUOT)
- goto out;
+ break;
if (osc_enter_cache_try(cli, oap, bytes, 0)) {
rc = 0;
- goto out;
+ break;
}
}
+
+ switch (rc) {
+ case 0:
+ OSC_DUMP_GRANT(D_CACHE, cli, "finally got grant space\n");
+ break;
+ case -ETIMEDOUT:
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "timeout, fall back to sync i/o\n");
+ osc_extent_tree_dump(D_CACHE, osc);
+ /* fall back to synchronous I/O */
+ rc = -EDQUOT;
+ break;
+ case -EINTR:
+ /* Ensures restartability - LU-3581 */
+ OSC_DUMP_GRANT(D_CACHE, cli, "interrupted\n");
+ rc = -ERESTARTSYS;
+ break;
+ case -EDQUOT:
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "no grant space, fall back to sync i/o\n");
+ break;
+ default:
+ CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
+ cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ break;
+ }
out:
spin_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
return rc;
}
@@ -1670,19 +1691,17 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
- cli->cl_dirty,
- cli->cl_dirty_max, obd_max_dirty_pages);
+ if ((cli->cl_dirty_pages > cli->cl_dirty_max_pages) ||
+ (atomic_long_read(&obd_dirty_pages) + 1 >
+ obd_max_dirty_pages)) {
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %ld\n",
+ cli->cl_dirty_pages, cli->cl_dirty_max_pages,
+ obd_max_dirty_pages);
goto wakeup;
}
- ocw->ocw_rc = 0;
- if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
- ocw->ocw_rc = -EDQUOT;
-
+ if (osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
+ ocw->ocw_rc = 0;
wakeup:
CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
@@ -1843,97 +1862,6 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
ar->ar_force_sync = 0;
}
-/**
- * Performs "unstable" page accounting. This function balances the
- * increment operations performed in osc_inc_unstable_pages. It is
- * registered as the RPC request callback, and is executed when the
- * bulk RPC is committed on the server. Thus at this point, the pages
- * involved in the bulk transfer are no longer considered unstable.
- */
-void osc_dec_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- int page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (!cli->cl_cache)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- dec_node_page_state(desc->bd_iov[i].kiov_page,
- NR_UNSTABLE_NFS);
-
- atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
-
- atomic_sub(page_count, &cli->cl_unstable_count);
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
-
- atomic_sub(page_count, &obd_unstable_pages);
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
-
- spin_lock(&req->rq_lock);
- req->rq_committed = 1;
- req->rq_unstable = 0;
- spin_unlock(&req->rq_lock);
-
- wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
-}
-
-/* "unstable" page accounting. See: osc_dec_unstable_pages. */
-void osc_inc_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- long page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (!cli->cl_cache)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- inc_node_page_state(desc->bd_iov[i].kiov_page,
- NR_UNSTABLE_NFS);
-
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
- atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
-
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
- atomic_add(page_count, &cli->cl_unstable_count);
-
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
- atomic_add(page_count, &obd_unstable_pages);
-
- spin_lock(&req->rq_lock);
-
- /*
- * If the request has already been committed (i.e. brw_commit
- * called via rq_commit_cb), we need to undo the unstable page
- * increments we just performed because rq_commit_cb wont be
- * called again. Otherwise, just set the commit callback so the
- * unstable page accounting is properly updated when the request
- * is committed
- */
- if (req->rq_committed) {
- /* Drop lock before calling osc_dec_unstable_pages */
- spin_unlock(&req->rq_lock);
- osc_dec_unstable_pages(req);
- spin_lock(&req->rq_lock);
- } else {
- req->rq_unstable = 1;
- req->rq_commit_cb = osc_dec_unstable_pages;
- }
-
- spin_unlock(&req->rq_lock);
-}
-
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request
*/
@@ -1945,9 +1873,6 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
__u64 xid = 0;
if (oap->oap_request) {
- if (!rc)
- osc_inc_unstable_pages(oap->oap_request);
-
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1979,7 +1904,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
*/
static int try_to_add_extent_for_io(struct client_obd *cli,
struct osc_extent *ext, struct list_head *rpclist,
- int *pc, unsigned int *max_pages)
+ unsigned int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
@@ -2032,12 +1957,13 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
* 5. Traverse the extent tree from the 1st extent;
* 6. Above steps exit if there is no space in this RPC.
*/
-static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
+static unsigned int get_write_extents(struct osc_object *obj,
+ struct list_head *rpclist)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
struct osc_extent *temp;
- int page_count = 0;
+ unsigned int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
LASSERT(osc_object_is_locked(obj));
@@ -2175,7 +2101,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_extent *ext;
struct osc_extent *next;
LIST_HEAD(rpclist);
- int page_count = 0;
+ unsigned int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
int rc = 0;
@@ -2390,7 +2316,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct client_obd *cli = oap->oap_cli;
struct osc_object *osc = oap->oap_obj;
pgoff_t index;
- int grants = 0;
+ unsigned int grants = 0, tmp;
int brw_flags = OBD_BRW_ASYNC;
int cmd = OBD_BRW_WRITE;
int need_release = 0;
@@ -2434,9 +2360,6 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
return rc;
}
- if (osc_over_unstable_soft_limit(cli))
- brw_flags |= OBD_BRW_SOFT_SYNC;
-
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
@@ -2476,7 +2399,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
grants = 0;
need_release = 1;
} else if (ext->oe_end < index) {
- int tmp = grants;
+ tmp = grants;
/* try to expand this extent */
rc = osc_extent_expand(ext, index, &tmp);
if (rc < 0) {
@@ -2501,7 +2424,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
}
if (!ext) {
- int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
/* try to find new extent to cover this page */
LASSERT(!oio->oi_active);
@@ -2645,7 +2568,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
goto out;
spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
+ oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT;
spin_unlock(&oap->oap_lock);
if (memory_pressure_get())