[Concept,06/15] ext4l: Adjust folio offset and mapping operations

Message ID 20251230234134.906477-7-sjg@u-boot.org
State New
Headers
Series ext4l: Infrastructure and fixes for write support (part K) |

Commit Message

Simon Glass Dec. 30, 2025, 11:41 p.m. UTC
  From: Simon Glass <simon.glass@canonical.com>

The existing folios macros assume page-aligned memory, but U-Boot uses
malloc'd buffers for simplicity.

Update the macros accordinging:

- offset_in_folio(): Calculate the offset from the folio's data pointer
- bh_offset(): Calculate the actual offset within the folio
- folio_set_bh(): Actually set b_folio and b_data
- kmap_local_folio(): Return a pointer to folio data + offset

Implement __filemap_get_folio(), folio_put() and folio_get() for
folio-lifecycle management.

Co-developed-by: Claude <noreply@anthropic.com>
Signed-off-by: Simon Glass <simon.glass@canonical.com>
---

 fs/ext4l/ext4_uboot.h | 33 ++++++++++--------
 fs/ext4l/support.c    | 80 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+), 14 deletions(-)
  

Patch

diff --git a/fs/ext4l/ext4_uboot.h b/fs/ext4l/ext4_uboot.h
index 4ce98eeb7ed..d7053c11d31 100644
--- a/fs/ext4l/ext4_uboot.h
+++ b/fs/ext4l/ext4_uboot.h
@@ -1247,16 +1247,21 @@  struct folio_batch {
 
 /* folio operations - stubs */
 #define folio_mark_dirty(f)			do { (void)(f); } while (0)
-#define offset_in_folio(f, p)			({ (void)(f); (unsigned int)((unsigned long)(p) & (PAGE_SIZE - 1)); })
+/*
+ * offset_in_folio - calculate offset of pointer within folio's data
+ * In Linux this uses page alignment, but in U-Boot we use the folio's
+ * actual data pointer since our buffers are malloc'd.
+ */
+#define offset_in_folio(f, p)			((f) ? (unsigned int)((uintptr_t)(p) - (uintptr_t)(f)->data) : 0U)
 #define folio_buffers(f)			({ (void)(f); (struct buffer_head *)NULL; })
 #define virt_to_folio(p)			({ (void)(p); (struct folio *)NULL; })
-#define folio_set_bh(bh, f, off)		do { (void)(bh); (void)(f); (void)(off); } while (0)
+#define folio_set_bh(bh, f, off)		do { if ((bh) && (f)) { (bh)->b_folio = (f); (bh)->b_data = (char *)(f)->data + (off); } } while (0)
 #define memcpy_from_folio(dst, f, off, len)	do { (void)(dst); (void)(f); (void)(off); (void)(len); } while (0)
 #define folio_test_uptodate(f)			({ (void)(f); 1; })
 #define folio_pos(f)				({ (void)(f); 0LL; })
 #define folio_size(f)				({ (void)(f); PAGE_SIZE; })
 #define folio_unlock(f)				do { (void)(f); } while (0)
-#define folio_put(f)				do { (void)(f); } while (0)
+/* folio_put and folio_get are implemented in support.c */
 #define folio_lock(f)				do { (void)(f); } while (0)
 #define folio_batch_init(fb)			do { (fb)->nr = 0; } while (0)
 #define filemap_get_folios(m, i, e, fb)		({ (void)(m); (void)(i); (void)(e); (void)(fb); 0U; })
@@ -1357,7 +1362,7 @@  static inline int generic_error_remove_folio(struct address_space *mapping,
 #define FGP_WRITEBEGIN	(FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
 
 /* kmap/kunmap stubs for inline.c */
-#define kmap_local_folio(folio, off)	({ (void)(folio); (void)(off); (void *)NULL; })
+#define kmap_local_folio(folio, off)	((folio) ? (char *)(folio)->data + (off) : NULL)
 #define kunmap_local(addr)		do { (void)(addr); } while (0)
 
 /* Folio zeroing stubs for inline.c */
@@ -1367,13 +1372,12 @@  static inline int generic_error_remove_folio(struct address_space *mapping,
 /* mapping_gfp_mask stub */
 #define mapping_gfp_mask(m)		({ (void)(m); GFP_KERNEL; })
 
-/* __filemap_get_folio stub */
-static inline struct folio *__filemap_get_folio(struct address_space *mapping,
-						pgoff_t index, unsigned int fgp_flags,
-						gfp_t gfp)
-{
-	return NULL;
-}
+/* Folio operations - implemented in support.c */
+struct folio *__filemap_get_folio(struct address_space *mapping,
+				  pgoff_t index, unsigned int fgp_flags,
+				  gfp_t gfp);
+void folio_put(struct folio *folio);
+void folio_get(struct folio *folio);
 
 /* projid_t - project ID type */
 typedef unsigned int projid_t;
@@ -1545,7 +1549,9 @@  static inline char *d_path(const struct path *path, char *buf, int buflen)
 /* Buffer operations - additional */
 #define getblk_unmovable(bdev, block, size)	sb_getblk(bdev->bd_super, block)
 #define create_empty_buffers(f, s, flags)	({ (void)(f); (void)(s); (void)(flags); (struct buffer_head *)NULL; })
-#define bh_offset(bh)				(0UL)
+/* bh_offset returns offset of b_data within the folio */
+#define bh_offset(bh)				((bh)->b_folio ? \
+	(unsigned long)((char *)(bh)->b_data - (char *)(bh)->b_folio->data) : 0UL)
 #define block_invalidate_folio(f, o, l)		do { } while (0)
 #define block_write_end(pos, len, copied, folio) ({ (void)(pos); (void)(len); (void)(folio); (copied); })
 #define block_dirty_folio(m, f)			({ (void)(m); (void)(f); false; })
@@ -2542,8 +2548,7 @@  static inline unsigned long ext4_find_next_bit_le(const void *addr,
 /* WARN_RATELIMIT - just evaluate condition, no warning in U-Boot */
 #define WARN_RATELIMIT(condition, ...) (condition)
 
-/* folio_get - increment folio refcount (no-op in U-Boot) */
-#define folio_get(f)			do { (void)(f); } while (0)
+/* folio_get - now implemented in support.c */
 
 /* array_index_nospec - bounds checking without speculation (no-op in U-Boot) */
 #define array_index_nospec(index, size) (index)
diff --git a/fs/ext4l/support.c b/fs/ext4l/support.c
index 127a3920c96..e5343aab198 100644
--- a/fs/ext4l/support.c
+++ b/fs/ext4l/support.c
@@ -626,3 +626,83 @@  int bh_read(struct buffer_head *bh, int flags)
 	submit_bh(REQ_OP_READ | flags, bh);
 	return buffer_uptodate(bh) ? 0 : -EIO;
 }
+
+/**
+ * __filemap_get_folio() - Get or create a folio for a mapping
+ * @mapping: The address_space to search
+ * @index: The page index
+ * @fgp_flags: Flags (FGP_CREAT to create if not found)
+ * @gfp: Memory allocation flags
+ * Return: Folio pointer or ERR_PTR on error
+ */
+struct folio *__filemap_get_folio(struct address_space *mapping,
+				  pgoff_t index, unsigned int fgp_flags,
+				  gfp_t gfp)
+{
+	struct folio *folio;
+	int i;
+
+	/* Search for existing folio in cache */
+	if (mapping) {
+		for (i = 0; i < mapping->folio_cache_count; i++) {
+			folio = mapping->folio_cache[i];
+			if (folio && folio->index == index) {
+				/* Found existing folio, bump refcount */
+				folio->_refcount++;
+				return folio;
+			}
+		}
+	}
+
+	/* If not creating, return error */
+	if (!(fgp_flags & FGP_CREAT))
+		return ERR_PTR(-ENOENT);
+
+	/* Create new folio */
+	folio = kzalloc(sizeof(struct folio), gfp);
+	if (!folio)
+		return ERR_PTR(-ENOMEM);
+
+	folio->data = kzalloc(PAGE_SIZE, gfp);
+	if (!folio->data) {
+		kfree(folio);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	folio->index = index;
+	folio->mapping = mapping;
+	folio->_refcount = 1;
+
+	/* Add to cache if there's room */
+	if (mapping && mapping->folio_cache_count < FOLIO_CACHE_MAX) {
+		mapping->folio_cache[mapping->folio_cache_count++] = folio;
+		/* Extra ref for cache */
+		folio->_refcount++;
+	}
+
+	return folio;
+}
+
+/**
+ * folio_put() - Release a reference to a folio
+ * @folio: The folio to release
+ */
+void folio_put(struct folio *folio)
+{
+	if (!folio)
+		return;
+	if (--folio->_refcount > 0)
+		return;
+	kfree(folio->data);
+	kfree(folio);
+}
+
+/**
+ * folio_get() - Acquire a reference to a folio
+ * @folio: The folio to reference
+ */
+void folio_get(struct folio *folio)
+{
+	if (folio)
+		folio->_refcount++;
+}