diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 09fe9412eab49d049c8ac017975ad191a686500e..240e079cb3fbb9f7d3809f93b19b1e42f6faf5c3 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -963,9 +963,7 @@ xfs_ag_shrink_space( * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return. */ - error = xfs_ag_resv_free(pag); - if (error) - return error; + xfs_ag_resv_free(pag); /* internal log shouldn't also show up in the free space btrees */ error = xfs_alloc_vextent_exact_bno(&args, diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index da1057bd0e6067f143d36af29f50ac850e7079a0..216423df939e5cb9bf0ad3198ea41d911834383e 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -126,14 +126,13 @@ xfs_ag_resv_needed( } /* Clean out a reservation */ -static int +static void __xfs_ag_resv_free( struct xfs_perag *pag, enum xfs_ag_resv_type type) { struct xfs_ag_resv *resv; xfs_extlen_t oldresv; - int error; trace_xfs_ag_resv_free(pag, type, 0); @@ -149,30 +148,19 @@ __xfs_ag_resv_free( oldresv = resv->ar_orig_reserved; else oldresv = resv->ar_reserved; - error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); + xfs_add_fdblocks(pag->pag_mount, oldresv); resv->ar_reserved = 0; resv->ar_asked = 0; resv->ar_orig_reserved = 0; - - if (error) - trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno, - error, _RET_IP_); - return error; } /* Free a per-AG reservation. */ -int +void xfs_ag_resv_free( struct xfs_perag *pag) { - int error; - int err2; - - error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); - err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); - if (err2 && !error) - error = err2; - return error; + __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); + __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); } static int @@ -216,7 +204,7 @@ __xfs_ag_resv_init( if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL)) error = -ENOSPC; else - error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); + error = xfs_dec_fdblocks(mp, hidden_space, true); if (error) { trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, error, _RET_IP_); diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index b74b210008ea7e973f296b3fca524887fe27bef3..ff20ed93de77243610125edd75b8d69e4f101e20 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -6,7 +6,7 @@ #ifndef __XFS_AG_RESV_H__ #define __XFS_AG_RESV_H__ -int xfs_ag_resv_free(struct xfs_perag *pag); +void xfs_ag_resv_free(struct xfs_perag *pag); int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp); bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 9da52e92172aba1640011dfabc115fbf5424432a..6cb8b2ddc541b4b28d553ffee414b903725e97f8 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -79,7 +79,7 @@ xfs_prealloc_blocks( } /* - * The number of blocks per AG that we withhold from xfs_mod_fdblocks to + * The number of blocks per AG that we withhold from xfs_dec_fdblocks to * guarantee that we can refill the AGFL prior to allocating space in a nearly * full AG. Although the space described by the free space btrees, the * blocks used by the freesp btrees themselves, and the blocks owned by the @@ -89,7 +89,7 @@ xfs_prealloc_blocks( * until the fs goes down, we subtract this many AG blocks from the incore * fdblocks to ensure user allocation does not overcommit the space the * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to - * withhold space from xfs_mod_fdblocks, so we do not account for that here. + * withhold space from xfs_dec_fdblocks, so we do not account for that here. */ #define XFS_ALLOCBT_AGFL_RESERVE 4 diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 22d44627ec59938f45c726d94d02e1872998ae93..0311a98df1c59d044a2645a6b633e7ee6b440eef 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -1985,10 +1985,11 @@ xfs_bmap_add_extent_delay_real( } /* adjust for changes in reserved delayed indirect blocks */ - if (da_new != da_old) { - ASSERT(state == 0 || da_new < da_old); - error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), - false); + if (da_new < da_old) { + xfs_add_fdblocks(mp, da_old - da_new); + } else if (da_new > da_old) { + ASSERT(state == 0); + error = xfs_dec_fdblocks(mp, da_new - da_old, false); } xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); @@ -2690,8 +2691,8 @@ xfs_bmap_add_extent_hole_delay( } if (oldlen != newlen) { ASSERT(oldlen > newlen); - xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), - false); + xfs_add_fdblocks(ip->i_mount, oldlen - newlen); + /* * Nothing to do for disk quota accounting here. */ @@ -4110,11 +4111,11 @@ xfs_bmapi_reserve_delalloc( indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); ASSERT(indlen > 0); - error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); + error = xfs_dec_fdblocks(mp, alen, false); if (error) goto out_unreserve_quota; - error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); + error = xfs_dec_fdblocks(mp, indlen, false); if (error) goto out_unreserve_blocks; @@ -4142,7 +4143,7 @@ xfs_bmapi_reserve_delalloc( return 0; out_unreserve_blocks: - xfs_mod_fdblocks(mp, alen, false); + xfs_add_fdblocks(mp, alen); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_quota_unreserve_blkres(ip, alen); @@ -4928,7 +4929,7 @@ xfs_bmap_del_extent_delay( ASSERT(got_endoff >= del_endoff); if (isrt) - xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount)); + xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount)); /* * Update the inode delalloc counter now and wait to update the @@ -5015,7 +5016,7 @@ xfs_bmap_del_extent_delay( if (!isrt) da_diff += del->br_blockcount; if (da_diff) { - xfs_mod_fdblocks(mp, da_diff, false); + xfs_add_fdblocks(mp, da_diff); xfs_mod_delalloc(mp, -da_diff); } return error; diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index 67ac870052b16dbcb221d0115cea60b083da4d2d..b662e52fe69564891ca53a4a0fe521ea45f7d927 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -517,7 +517,7 @@ xchk_fscounters( /* * If the filesystem is not frozen, the counter summation calls above - * can race with xfs_mod_freecounter, which subtracts a requested space + * can race with xfs_dec_freecounter, which subtracts a requested space * reservation from the counter and undoes the subtraction if that made * the counter go negative. Therefore, it's possible to see negative * values here, and we should only flag that as a corruption if we diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 92f81e374bc0819fe9c7ca9630cbb916a82ac2d8..67478294f11ae8616195f259a841cdede1f5d2f9 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -968,9 +968,7 @@ xrep_reset_perag_resv( ASSERT(sc->tp); sc->flags &= ~XREP_RESET_PERAG_RESV; - error = xfs_ag_resv_free(sc->sa.pag); - if (error) - goto out; + xfs_ag_resv_free(sc->sa.pag); error = xfs_ag_resv_init(sc->sa.pag, sc->tp); if (error == -ENOSPC) { xfs_err(sc->mp, @@ -979,7 +977,6 @@ xrep_reset_perag_resv( error = 0; } -out: return error; } diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 83f708f62ed9f29ece9406f0eb3de13adb16301d..c211ea2b63c4dd4f7108797d73f78e20285f47cd 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -213,10 +213,8 @@ xfs_growfs_data_private( struct xfs_perag *pag; pag = xfs_perag_get(mp, id.agno); - error = xfs_ag_resv_free(pag); + xfs_ag_resv_free(pag); xfs_perag_put(pag); - if (error) - return error; } /* * Reserve AG metadata blocks. ENOSPC here does not mean there @@ -385,14 +383,14 @@ xfs_reserve_blocks( */ if (mp->m_resblks > request) { lcounter = mp->m_resblks_avail - request; - if (lcounter > 0) { /* release unused blocks */ + if (lcounter > 0) { /* release unused blocks */ fdblks_delta = lcounter; mp->m_resblks_avail -= lcounter; } mp->m_resblks = request; if (fdblks_delta) { spin_unlock(&mp->m_sb_lock); - error = xfs_mod_fdblocks(mp, fdblks_delta, 0); + xfs_add_fdblocks(mp, fdblks_delta); spin_lock(&mp->m_sb_lock); } @@ -428,9 +426,9 @@ xfs_reserve_blocks( */ fdblks_delta = min(free, delta); spin_unlock(&mp->m_sb_lock); - error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); + error = xfs_dec_fdblocks(mp, fdblks_delta, 0); if (!error) - xfs_mod_fdblocks(mp, fdblks_delta, 0); + xfs_add_fdblocks(mp, fdblks_delta); spin_lock(&mp->m_sb_lock); } out: @@ -556,24 +554,13 @@ xfs_fs_reserve_ag_blocks( /* * Free space reserved for per-AG metadata. */ -int +void xfs_fs_unreserve_ag_blocks( struct xfs_mount *mp) { xfs_agnumber_t agno; struct xfs_perag *pag; - int error = 0; - int err2; - for_each_perag(mp, agno, pag) { - err2 = xfs_ag_resv_free(pag); - if (err2 && !error) - error = err2; - } - - if (error) - xfs_warn(mp, - "Error %d freeing per-AG metadata reserve pool.", error); - - return error; + for_each_perag(mp, agno, pag) + xfs_ag_resv_free(pag); } diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index 44457b0a0593765875d04fd98a4f9078d795d9e4..3e2f73bcf8314b87520cbb7ae0bf6542f756b5d5 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h @@ -12,6 +12,6 @@ int xfs_reserve_blocks(struct xfs_mount *mp, uint64_t request); int xfs_fs_goingdown(struct xfs_mount *mp, uint32_t inflags); int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp); -int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); +void xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); #endif /* __XFS_FSOPS_H__ */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d37ba10f5fa335d099ffcae707ab92c75da47a3d..b9df7cc9c473dd3678fd161fe18d19272183a912 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1136,16 +1136,44 @@ xfs_fs_writable( return true; } -/* Adjust m_fdblocks or m_frextents. */ +void +xfs_add_freecounter( + struct xfs_mount *mp, + struct percpu_counter *counter, + uint64_t delta) +{ + bool has_resv_pool = (counter == &mp->m_fdblocks); + uint64_t res_used; + + /* + * If the reserve pool is depleted, put blocks back into it first. + * Most of the time the pool is full. + */ + if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) { + percpu_counter_add(counter, delta); + return; + } + + spin_lock(&mp->m_sb_lock); + res_used = mp->m_resblks - mp->m_resblks_avail; + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + delta -= res_used; + mp->m_resblks_avail = mp->m_resblks; + percpu_counter_add(counter, delta); + } + spin_unlock(&mp->m_sb_lock); +} + int -xfs_mod_freecounter( +xfs_dec_freecounter( struct xfs_mount *mp, struct percpu_counter *counter, - int64_t delta, + uint64_t delta, bool rsvd) { int64_t lcounter; - long long res_used; uint64_t set_aside = 0; s32 batch; bool has_resv_pool; @@ -1155,31 +1183,6 @@ xfs_mod_freecounter( if (rsvd) ASSERT(has_resv_pool); - if (delta > 0) { - /* - * If the reserve pool is depleted, put blocks back into it - * first. Most of the time the pool is full. - */ - if (likely(!has_resv_pool || - mp->m_resblks == mp->m_resblks_avail)) { - percpu_counter_add(counter, delta); - return 0; - } - - spin_lock(&mp->m_sb_lock); - res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); - - if (res_used > delta) { - mp->m_resblks_avail += delta; - } else { - delta -= res_used; - mp->m_resblks_avail = mp->m_resblks; - percpu_counter_add(counter, delta); - } - spin_unlock(&mp->m_sb_lock); - return 0; - } - /* * Taking blocks away, need to be more accurate the closer we * are to zero. @@ -1207,7 +1210,7 @@ xfs_mod_freecounter( */ if (has_resv_pool) set_aside = xfs_fdblocks_unavailable(mp); - percpu_counter_add_batch(counter, delta, batch); + percpu_counter_add_batch(counter, -((int64_t)delta), batch); if (__percpu_counter_compare(counter, set_aside, XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ @@ -1219,11 +1222,11 @@ xfs_mod_freecounter( * that took us to ENOSPC. */ spin_lock(&mp->m_sb_lock); - percpu_counter_add(counter, -delta); + percpu_counter_add(counter, delta); if (!has_resv_pool || !rsvd) goto fdblocks_enospc; - lcounter = (long long)mp->m_resblks_avail + delta; + lcounter = (long long)mp->m_resblks_avail - delta; if (lcounter >= 0) { mp->m_resblks_avail = lcounter; spin_unlock(&mp->m_sb_lock); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 283a5a26db3170f47059fbe323cb42a8935bc9fe..3a985c2ff06fe5c0ab700328e6d4bd05f5cb568b 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -562,19 +562,30 @@ xfs_fdblocks_unavailable( return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); } -int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, - int64_t delta, bool rsvd); +int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, + uint64_t delta, bool rsvd); +void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, + uint64_t delta); -static inline int -xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved) +static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta, + bool reserved) { - return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved); + return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved); } -static inline int -xfs_mod_frextents(struct xfs_mount *mp, int64_t delta) +static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta) { - return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false); + xfs_add_freecounter(mp, &mp->m_fdblocks, delta); +} + +static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta) +{ + return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false); +} + +static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta) +{ + xfs_add_freecounter(mp, &mp->m_frextents, delta); } extern int xfs_readsb(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 02c1a4aeaa4c3b90da86b6f582e2b42956510cdb..ed4a2f8c1936fb394d17fea26269f710d97e8995 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1882,11 +1882,7 @@ xfs_remount_ro( xfs_inodegc_stop(mp); /* Free the per-AG metadata reservation pool. */ - error = xfs_fs_unreserve_ag_blocks(mp); - if (error) { - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return error; - } + xfs_fs_unreserve_ag_blocks(mp); /* * Before we sync the metadata, we need to free up the reserve block diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 0f844396754d71148f7fd21306f4da6097f9cc04..08df5bb70a6c7598b4864507afdff12082add324 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -3069,7 +3069,6 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent); DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical); DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed); -DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error); DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error); /* refcount tracepoint classes */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 95921ad935477f1b53e0984dfa724fdee8ed4b71..828da4ac4316d6da8b484a480218d7adda3989bb 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -163,7 +163,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (blocks > 0) { - error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); + error = xfs_dec_fdblocks(mp, blocks, rsvd); if (error != 0) return -ENOSPC; tp->t_blk_res += blocks; @@ -210,7 +210,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (rtextents > 0) { - error = xfs_mod_frextents(mp, -((int64_t)rtextents)); + error = xfs_dec_frextents(mp, rtextents); if (error) { error = -ENOSPC; goto undo_log; @@ -234,7 +234,7 @@ xfs_trans_reserve( undo_blocks: if (blocks > 0) { - xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); + xfs_add_fdblocks(mp, blocks); tp->t_blk_res = 0; } return error; @@ -593,12 +593,10 @@ xfs_trans_unreserve_and_mod_sb( struct xfs_trans *tp) { struct xfs_mount *mp = tp->t_mountp; - bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; int64_t blkdelta = tp->t_blk_res; int64_t rtxdelta = tp->t_rtx_res; int64_t idelta = 0; int64_t ifreedelta = 0; - int error; /* * Calculate the deltas. @@ -631,10 +629,8 @@ xfs_trans_unreserve_and_mod_sb( } /* apply the per-cpu counters */ - if (blkdelta) { - error = xfs_mod_fdblocks(mp, blkdelta, rsvd); - ASSERT(!error); - } + if (blkdelta) + xfs_add_fdblocks(mp, blkdelta); if (idelta) percpu_counter_add_batch(&mp->m_icount, idelta, @@ -643,10 +639,8 @@ xfs_trans_unreserve_and_mod_sb( if (ifreedelta) percpu_counter_add(&mp->m_ifree, ifreedelta); - if (rtxdelta) { - error = xfs_mod_frextents(mp, rtxdelta); - ASSERT(!error); - } + if (rtxdelta) + xfs_add_frextents(mp, rtxdelta); if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) return; @@ -682,7 +676,6 @@ xfs_trans_unreserve_and_mod_sb( */ ASSERT(mp->m_sb.sb_imax_pct >= 0); ASSERT(mp->m_sb.sb_rextslog >= 0); - return; } /* Add the given log item to the transaction's list of log items. */ @@ -1301,9 +1294,9 @@ xfs_trans_reserve_more_inode( return 0; /* Quota failed, give back the new reservation. */ - xfs_mod_fdblocks(mp, dblocks, tp->t_flags & XFS_TRANS_RESERVE); + xfs_add_fdblocks(mp, dblocks); tp->t_blk_res -= dblocks; - xfs_mod_frextents(mp, rtx); + xfs_add_frextents(mp, rtx); tp->t_rtx_res -= rtx; return error; }