diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..8ce4c39 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +edurank.hust.college diff --git a/detail/1.html b/detail/1.html new file mode 100644 index 0000000..a47abca --- /dev/null +++ b/detail/1.html @@ -0,0 +1,662 @@ + + + + + + Patches contributed by Massachusetts Institute of Technology + + + +

Patches contributed by Massachusetts Institute of Technology

+ +
+
commit ee6a12d0d4d85f3833d177cd382cd417f0ef011b
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Thu Oct 5 21:42:47 2023 -0400
+
+    ext4: add missing initialization of call_notify_error in update_super_work()
+    
+    Fixes: ff0722de896e ("ext4: add periodic superblock update check")
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index dbebd8b3127e..6f48dec19f4a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -768,7 +768,8 @@ static void update_super_work(struct work_struct *work)
+ 	 */
+ 	if (!sb_rdonly(sbi->s_sb) && journal) {
+ 		struct buffer_head *sbh = sbi->s_sbh;
+-		bool call_notify_err;
++		bool call_notify_err = false;
++
+ 		handle = jbd2_journal_start(journal, 1);
+ 		if (IS_ERR(handle))
+ 			goto write_directly;

commit bb15cea20f211e110150e528fca806f38d5789e0
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Tue Aug 22 23:43:38 2023 -0400
+
+    ext4: rename s_error_work to s_sb_upd_work
+    
+    The most common use that s_error_work will get scheduled is now the
+    periodic update of the superblock.  So rename it to s_sb_upd_work.
+    
+    Also rename the function flush_stashed_error_work() to
+    update_super_work().
+    
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 2c2c3191bf41..84618c46f239 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1698,10 +1698,13 @@ struct ext4_sb_info {
+ 	const char *s_last_error_func;
+ 	time64_t s_last_error_time;
+ 	/*
+-	 * If we are in a context where we cannot update error information in
+-	 * the on-disk superblock, we queue this work to do it.
++	 * If we are in a context where we cannot update the on-disk
++	 * superblock, we queue the work here.  This is used to update
++	 * the error information in the superblock, and for periodic
++	 * updates of the superblock called from the commit callback
++	 * function.
+ 	 */
+-	struct work_struct s_error_work;
++	struct work_struct s_sb_upd_work;
+ 
+ 	/* Ext4 fast commit sub transaction ID */
+ 	atomic_t s_fc_subtid;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index bf0cfdffa9d0..91f20afa1d71 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -481,7 +481,7 @@ static void ext4_maybe_update_superblock(struct super_block *sb)
+ 	diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
+ 
+ 	if (diff_size > EXT4_SB_REFRESH_INTERVAL_KB)
+-		schedule_work(&EXT4_SB(sb)->s_error_work);
++		schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
+ }
+ 
+ /*
+@@ -723,7 +723,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 		 * defer superblock flushing to a workqueue.
+ 		 */
+ 		if (continue_fs && journal)
+-			schedule_work(&EXT4_SB(sb)->s_error_work);
++			schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
+ 		else
+ 			ext4_commit_super(sb);
+ 	}
+@@ -750,10 +750,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 	sb->s_flags |= SB_RDONLY;
+ }
+ 
+-static void flush_stashed_error_work(struct work_struct *work)
++static void update_super_work(struct work_struct *work)
+ {
+ 	struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
+-						s_error_work);
++						s_sb_upd_work);
+ 	journal_t *journal = sbi->s_journal;
+ 	handle_t *handle;
+ 
+@@ -1078,7 +1078,7 @@ __acquires(bitlock)
+ 		if (!bdev_read_only(sb->s_bdev)) {
+ 			save_error_info(sb, EFSCORRUPTED, ino, block, function,
+ 					line);
+-			schedule_work(&EXT4_SB(sb)->s_error_work);
++			schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
+ 		}
+ 		return;
+ 	}
+@@ -1318,10 +1318,10 @@ static void ext4_put_super(struct super_block *sb)
+ 	 * Unregister sysfs before destroying jbd2 journal.
+ 	 * Since we could still access attr_journal_task attribute via sysfs
+ 	 * path which could have sbi->s_journal->j_task as NULL
+-	 * Unregister sysfs before flush sbi->s_error_work.
++	 * Unregister sysfs before flush sbi->s_sb_upd_work.
+ 	 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
+ 	 * read metadata verify failed then will queue error work.
+-	 * flush_stashed_error_work will call start_this_handle may trigger
++	 * update_super_work will call start_this_handle may trigger
+ 	 * BUG_ON.
+ 	 */
+ 	ext4_unregister_sysfs(sb);
+@@ -1333,7 +1333,7 @@ static void ext4_put_super(struct super_block *sb)
+ 	ext4_unregister_li_request(sb);
+ 	ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+ 
+-	flush_work(&sbi->s_error_work);
++	flush_work(&sbi->s_sb_upd_work);
+ 	destroy_workqueue(sbi->rsv_conversion_wq);
+ 	ext4_release_orphan_info(sb);
+ 
+@@ -4998,8 +4998,8 @@ static int ext4_load_and_init_journal(struct super_block *sb,
+ 	return 0;
+ 
+ out:
+-	/* flush s_error_work before journal destroy. */
+-	flush_work(&sbi->s_error_work);
++	/* flush s_sb_upd_work before destroying the journal. */
++	flush_work(&sbi->s_sb_upd_work);
+ 	jbd2_journal_destroy(sbi->s_journal);
+ 	sbi->s_journal = NULL;
+ 	return -EINVAL;
+@@ -5322,7 +5322,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 
+ 	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
+ 	spin_lock_init(&sbi->s_error_lock);
+-	INIT_WORK(&sbi->s_error_work, flush_stashed_error_work);
++	INIT_WORK(&sbi->s_sb_upd_work, update_super_work);
+ 
+ 	err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed);
+ 	if (err)
+@@ -5666,16 +5666,16 @@ failed_mount9: __maybe_unused
+ 	sbi->s_ea_block_cache = NULL;
+ 
+ 	if (sbi->s_journal) {
+-		/* flush s_error_work before journal destroy. */
+-		flush_work(&sbi->s_error_work);
++		/* flush s_sb_upd_work before journal destroy. */
++		flush_work(&sbi->s_sb_upd_work);
+ 		jbd2_journal_destroy(sbi->s_journal);
+ 		sbi->s_journal = NULL;
+ 	}
+ failed_mount3a:
+ 	ext4_es_unregister_shrinker(sbi);
+ failed_mount3:
+-	/* flush s_error_work before sbi destroy */
+-	flush_work(&sbi->s_error_work);
++	/* flush s_sb_upd_work before sbi destroy */
++	flush_work(&sbi->s_sb_upd_work);
+ 	del_timer_sync(&sbi->s_err_report);
+ 	ext4_stop_mmpd(sbi);
+ 	ext4_group_desc_free(sbi);
+@@ -6551,7 +6551,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	}
+ 
+ 	/* Flush outstanding errors before changing fs state */
+-	flush_work(&sbi->s_error_work);
++	flush_work(&sbi->s_sb_upd_work);
+ 
+ 	if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
+ 		if (ext4_forced_shutdown(sb)) {

commit 2ef6c32a914b85217b44a0a2418e830e520b085e
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Fri Jun 23 10:18:51 2023 -0400
+
+    ext4: avoid updating the superblock on a r/o mount if not needed
+    
+    This was noticed by a user who noticied that the mtime of a file
+    backing a loopback device was getting bumped when the loopback device
+    is mounted read/only.  Note: This doesn't show up when doing a
+    loopback mount of a file directly, via "mount -o ro /tmp/foo.img
+    /mnt", since the loop device is set read-only when mount automatically
+    creates loop device.  However, this is noticeable for a LUKS loop
+    device like this:
+    
+    % cryptsetup luksOpen /tmp/foo.img test
+    % mount -o ro /dev/loop0 /mnt ; umount /mnt
+    
+    or, if LUKS is not in use, if the user manually creates the loop
+    device like this:
+    
+    % losetup /dev/loop0 /tmp/foo.img
+    % mount -o ro /dev/loop0 /mnt ; umount /mnt
+    
+    The modified mtime causes rsync to do a rolling checksum scan of the
+    file on the local and remote side, incrementally increasing the time
+    to rsync the not-modified-but-touched image file.
+    
+    Fixes: eee00237fa5e ("ext4: commit super block if fs record error when journal record without error")
+    Cc: stable@kernel.org
+    Link: https://lore.kernel.org/r/ZIauBR7YiV3rVAHL@glitch
+    Reported-by: Sean Greenslade <sean@seangreenslade.com>
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b3819e70093e..c638b0db3b2b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5997,19 +5997,27 @@ static int ext4_load_journal(struct super_block *sb,
+ 		err = jbd2_journal_wipe(journal, !really_read_only);
+ 	if (!err) {
+ 		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
++		__le16 orig_state;
++		bool changed = false;
+ 
+ 		if (save)
+ 			memcpy(save, ((char *) es) +
+ 			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
+ 		err = jbd2_journal_load(journal);
+-		if (save)
++		if (save && memcmp(((char *) es) + EXT4_S_ERR_START,
++				   save, EXT4_S_ERR_LEN)) {
+ 			memcpy(((char *) es) + EXT4_S_ERR_START,
+ 			       save, EXT4_S_ERR_LEN);
++			changed = true;
++		}
+ 		kfree(save);
++		orig_state = es->s_state;
+ 		es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
+ 					   EXT4_ERROR_FS);
++		if (orig_state != es->s_state)
++			changed = true;
+ 		/* Write out restored error information to the superblock */
+-		if (!bdev_read_only(sb->s_bdev)) {
++		if (changed && !really_read_only) {
+ 			int err2;
+ 			err2 = ext4_commit_super(sb);
+ 			err = err ? : err2;

commit 4c0cfebdf3c34c9cd2c55844f549fa46b1da3164
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Thu Jun 8 10:39:35 2023 -0400
+
+    ext4: clean up mballoc criteria comments
+    
+    Line wrap and slightly clarify the comments describing mballoc's
+    cirtiera.
+    
+    Define EXT4_MB_NUM_CRS as part of the enum, so that it will
+    automatically get updated when criteria is added or removed.
+    
+    Also fix a potential unitialized use of 'cr' variable if
+    CONFIG_EXT4_DEBUG is enabled.
+    
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 6a1f013d23f7..45a531446ea2 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -128,47 +128,52 @@ enum SHIFT_DIRECTION {
+ };
+ 
+ /*
+- * Number of criterias defined. For each criteria, mballoc has slightly
+- * different way of finding the required blocks nad usually, higher the
+- * criteria the slower the allocation. We start at lower criterias and keep
+- * falling back to higher ones if we are not able to find any blocks.
+- */
+-#define EXT4_MB_NUM_CRS 5
+-/*
+- * All possible allocation criterias for mballoc. Lower are faster.
++ * For each criteria, mballoc has slightly different way of finding
++ * the required blocks nad usually, higher the criteria the slower the
++ * allocation.  We start at lower criterias and keep falling back to
++ * higher ones if we are not able to find any blocks.  Lower (earlier)
++ * criteria are faster.
+  */
+ enum criteria {
+ 	/*
+-	 * Used when number of blocks needed is a power of 2. This doesn't
+-	 * trigger any disk IO except prefetch and is the fastest criteria.
++	 * Used when number of blocks needed is a power of 2. This
++	 * doesn't trigger any disk IO except prefetch and is the
++	 * fastest criteria.
+ 	 */
+ 	CR_POWER2_ALIGNED,
+ 
+ 	/*
+-	 * Tries to lookup in-memory data structures to find the most suitable
+-	 * group that satisfies goal request. No disk IO except block prefetch.
++	 * Tries to lookup in-memory data structures to find the most
++	 * suitable group that satisfies goal request. No disk IO
++	 * except block prefetch.
+ 	 */
+ 	CR_GOAL_LEN_FAST,
+ 
+         /*
+-	 * Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal length to
+-         * the best available length for faster allocation.
++	 * Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal
++         * length to the best available length for faster allocation.
+ 	 */
+ 	CR_BEST_AVAIL_LEN,
+ 
+ 	/*
+-	 * Reads each block group sequentially, performing disk IO if necessary, to
+-	 * find find_suitable block group. Tries to allocate goal length but might trim
+-	 * the request if nothing is found after enough tries.
++	 * Reads each block group sequentially, performing disk IO if
++	 * necessary, to find find_suitable block group. Tries to
++	 * allocate goal length but might trim the request if nothing
++	 * is found after enough tries.
+ 	 */
+ 	CR_GOAL_LEN_SLOW,
+ 
+ 	/*
+-	 * Finds the first free set of blocks and allocates those. This is only
+-	 * used in rare cases when CR_GOAL_LEN_SLOW also fails to allocate
+-	 * anything.
++	 * Finds the first free set of blocks and allocates
++	 * those. This is only used in rare cases when
++	 * CR_GOAL_LEN_SLOW also fails to allocate anything.
+ 	 */
+ 	CR_ANY_FREE,
++
++	/*
++	 * Number of criterias defined.
++	 */
++	EXT4_MB_NUM_CRS
+ };
+ 
+ /* criteria below which we use fast block scanning and avoid unnecessary IO */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 74ebe31f8d0f..a2475b8c9fb5 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1035,11 +1035,9 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+ 
+ 		if (num_stripe_clusters > 0) {
+ 			/*
+-			 * Try to round up the adjusted goal to stripe size
+-			 * (in cluster units) multiple for efficiency.
+-			 *
+-			 * XXX: Is s->stripe always a power of 2? In that case
+-			 * we can use the faster round_up() variant.
++			 * Try to round up the adjusted goal length to
++			 * stripe size (in cluster units) multiple for
++			 * efficiency.
+ 			 */
+ 			ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
+ 						     num_stripe_clusters);
+@@ -2758,7 +2756,7 @@ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+ 	ext4_group_t prefetch_grp = 0, ngroups, group, i;
+-	enum criteria cr, new_cr;
++	enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
+ 	int err = 0, first_err = 0;
+ 	unsigned int nr = 0, prefetch_ios = 0;
+ 	struct ext4_sb_info *sbi;
+@@ -2815,12 +2813,13 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ 		spin_unlock(&sbi->s_md_lock);
+ 	}
+ 
+-	/* Let's just scan groups to find more-less suitable blocks */
+-	cr = ac->ac_2order ? CR_POWER2_ALIGNED : CR_GOAL_LEN_FAST;
+ 	/*
+-	 * cr == CR_POWER2_ALIGNED try to get exact allocation,
+-	 * cr == CR_ANY_FREE try to get anything
++	 * Let's just scan groups to find more-less suitable blocks We
++	 * start with CR_GOAL_LEN_FAST, unless it is power of 2
++	 * aligned, in which case let's do that faster approach first.
+ 	 */
++	if (ac->ac_2order)
++		cr = CR_POWER2_ALIGNED;
+ repeat:
+ 	for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+ 		ac->ac_criteria = cr;

commit dea9d8f7643fab07bf89a1155f1f94f37d096a5e
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Thu Jun 8 10:06:40 2023 -0400
+
+    ext4: only check dquot_initialize_needed() when debugging
+    
+    ext4_xattr_block_set() relies on its caller to call dquot_initialize()
+    on the inode.  To assure that this has happened there are WARN_ON
+    checks.  Unfortunately, this is subject to false positives if there is
+    an antagonist thread which is flipping the file system at high rates
+    between r/o and rw.  So only do the check if EXT4_XATTR_DEBUG is
+    enabled.
+    
+    Link: https://lore.kernel.org/r/20230608044056.GA1418535@mit.edu
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 13d7f17a9c8c..321e3a888c20 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2056,8 +2056,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 			else {
+ 				u32 ref;
+ 
++#ifdef EXT4_XATTR_DEBUG
+ 				WARN_ON_ONCE(dquot_initialize_needed(inode));
+-
++#endif
+ 				/* The old block is released after updating
+ 				   the inode. */
+ 				error = dquot_alloc_block(inode,
+@@ -2120,8 +2121,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 			/* We need to allocate a new block */
+ 			ext4_fsblk_t goal, block;
+ 
++#ifdef EXT4_XATTR_DEBUG
+ 			WARN_ON_ONCE(dquot_initialize_needed(inode));
+-
++#endif
+ 			goal = ext4_group_first_block_no(sb,
+ 						EXT4_I(inode)->i_block_group);
+ 			block = ext4_new_meta_blocks(handle, inode, goal, 0,

commit 1b29243933098cdbc31b579b5616e183b4275e2f
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Thu Jun 8 09:57:04 2023 -0400
+
+    Revert "ext4: don't clear SB_RDONLY when remounting r/w until quota is re-enabled"
+    
+    This reverts commit a44be64bbecb15a452496f60db6eacfee2b59c79.
+    
+    Link: https://lore.kernel.org/r/653b3359-2005-21b1-039d-c55ca4cffdcc@gmail.com
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 56a5d1c469fc..05fcecc36244 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6388,7 +6388,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	struct ext4_mount_options old_opts;
+ 	ext4_group_t g;
+ 	int err = 0;
+-	int enable_rw = 0;
+ #ifdef CONFIG_QUOTA
+ 	int enable_quota = 0;
+ 	int i, j;
+@@ -6575,7 +6574,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 			if (err)
+ 				goto restore_opts;
+ 
+-			enable_rw = 1;
++			sb->s_flags &= ~SB_RDONLY;
+ 			if (ext4_has_feature_mmp(sb)) {
+ 				err = ext4_multi_mount_protect(sb,
+ 						le64_to_cpu(es->s_mmp_block));
+@@ -6622,9 +6621,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+ 
+-	if (enable_rw)
+-		sb->s_flags &= ~SB_RDONLY;
+-
+ 	/*
+ 	 * Reinitialize lazy itable initialization thread based on
+ 	 * current settings

commit eb1f822c76beeaa76ab8b6737ab9dc9f9798408c
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Fri May 26 23:57:29 2023 -0400
+
+    ext4: enable the lazy init thread when remounting read/write
+    
+    In commit a44be64bbecb ("ext4: don't clear SB_RDONLY when remounting
+    r/w until quota is re-enabled") we defer clearing tyhe SB_RDONLY flag
+    in struct super.  However, we didn't defer when we checked sb_rdonly()
+    to determine the lazy itable init thread should be enabled, with the
+    next result that the lazy inode table initialization would not be
+    properly started.  This can cause generic/231 to fail in ext4's
+    nojournal mode.
+    
+    Fix this by moving when we decide to start or stop the lazy itable
+    init thread to after we clear the SB_RDONLY flag when we are
+    remounting the file system read/write.
+    
+    Fixes a44be64bbecb ("ext4: don't clear SB_RDONLY when remounting r/w until...")
+    
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+    Link: https://lore.kernel.org/r/20230527035729.1001605-1-tytso@mit.edu
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9680fe753e59..56a5d1c469fc 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6588,18 +6588,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 		}
+ 	}
+ 
+-	/*
+-	 * Reinitialize lazy itable initialization thread based on
+-	 * current settings
+-	 */
+-	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
+-		ext4_unregister_li_request(sb);
+-	else {
+-		ext4_group_t first_not_zeroed;
+-		first_not_zeroed = ext4_has_uninit_itable(sb);
+-		ext4_register_li_request(sb, first_not_zeroed);
+-	}
+-
+ 	/*
+ 	 * Handle creation of system zone data early because it can fail.
+ 	 * Releasing of existing data is done when we are sure remount will
+@@ -6637,6 +6625,18 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (enable_rw)
+ 		sb->s_flags &= ~SB_RDONLY;
+ 
++	/*
++	 * Reinitialize lazy itable initialization thread based on
++	 * current settings
++	 */
++	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
++		ext4_unregister_li_request(sb);
++	else {
++		ext4_group_t first_not_zeroed;
++		first_not_zeroed = ext4_has_uninit_itable(sb);
++		ext4_register_li_request(sb, first_not_zeroed);
++	}
++
+ 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ 		ext4_stop_mmpd(sbi);
+ 

commit aff3bea95388299eec63440389b4545c8041b357
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Tue May 23 23:49:51 2023 -0400
+
+    ext4: add lockdep annotations for i_data_sem for ea_inode's
+    
+    Treat i_data_sem for ea_inodes as being in their own lockdep class to
+    avoid lockdep complaints about ext4_setattr's use of inode_lock() on
+    normal inodes potentially causing lock ordering with i_data_sem on
+    ea_inodes in ext4_xattr_inode_write().  However, ea_inodes will be
+    operated on by ext4_setattr(), so this isn't a problem.
+    
+    Cc: stable@kernel.org
+    Link: https://syzkaller.appspot.com/bug?extid=298c5d8fb4a128bc27b0
+    Reported-by: syzbot+298c5d8fb4a128bc27b0@syzkaller.appspotmail.com
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+    Link: https://lore.kernel.org/r/20230524034951.779531-5-tytso@mit.edu
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9525c52b78dc..8104a21b001a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -918,11 +918,13 @@ do {									       \
+  *			  where the second inode has larger inode number
+  *			  than the first
+  *  I_DATA_SEM_QUOTA  - Used for quota inodes only
++ *  I_DATA_SEM_EA     - Used for ea_inodes only
+  */
+ enum {
+ 	I_DATA_SEM_NORMAL = 0,
+ 	I_DATA_SEM_OTHER,
+ 	I_DATA_SEM_QUOTA,
++	I_DATA_SEM_EA
+ };
+ 
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index ff7ab63c5b4f..13d7f17a9c8c 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -121,7 +121,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
+ #ifdef CONFIG_LOCKDEP
+ void ext4_xattr_inode_set_class(struct inode *ea_inode)
+ {
++	struct ext4_inode_info *ei = EXT4_I(ea_inode);
++
+ 	lockdep_set_subclass(&ea_inode->i_rwsem, 1);
++	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
++	lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
+ }
+ #endif
+ 

commit 2bc7e7c1a3bc9bd0cbf0f71006f6fe7ef24a00c2
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Tue May 23 23:49:50 2023 -0400
+
+    ext4: disallow ea_inodes with extended attributes
+    
+    An ea_inode stores the value of an extended attribute; it can not have
+    extended attributes itself, or this will cause recursive nightmares.
+    Add a check in ext4_iget() to make sure this is the case.
+    
+    Cc: stable@kernel.org
+    Reported-by: syzbot+e44749b6ba4d0434cd47@syzkaller.appspotmail.com
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+    Link: https://lore.kernel.org/r/20230524034951.779531-4-tytso@mit.edu
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 258f3cbed347..02de439bf1f0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4647,6 +4647,9 @@ static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
+ 	if (flags & EXT4_IGET_EA_INODE) {
+ 		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+ 			return "missing EA_INODE flag";
++		if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
++		    EXT4_I(inode)->i_file_acl)
++			return "ea_inode with extended attributes";
+ 	} else {
+ 		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+ 			return "unexpected EA_INODE flag";

commit b928dfdcb27d8fa59917b794cfba53052a2f050f
+Author: Theodore Ts'o <tytso@mit.edu>
+Date:   Tue May 23 23:49:49 2023 -0400
+
+    ext4: set lockdep subclass for the ea_inode in ext4_xattr_inode_cache_find()
+    
+    If the ea_inode has been pushed out of the inode cache while there is
+    still a reference in the mb_cache, the lockdep subclass will not be
+    set on the inode, which can lead to some lockdep false positives.
+    
+    Fixes: 33d201e0277b ("ext4: fix lockdep warning about recursive inode locking")
+    Cc: stable@kernel.org
+    Reported-by: syzbot+d4b971e744b1f5439336@syzkaller.appspotmail.com
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+    Link: https://lore.kernel.org/r/20230524034951.779531-3-tytso@mit.edu
+    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index a27208129a80..ff7ab63c5b4f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1539,6 +1539,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ 				     EXT4_IGET_EA_INODE);
+ 		if (IS_ERR(ea_inode))
+ 			goto next_entry;
++		ext4_xattr_inode_set_class(ea_inode);
+ 		if (i_size_read(ea_inode) == value_len &&
+ 		    !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
+ 		    !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
+