[RFC] shrink the size of struct page

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch squeezes the ->zone field of struct page into flags, and
then uses an address calculation off the local zone_start_paddr to
recover the kvaddr of the page it represents.


Cheers,
Bill


diff -urN linux-2.4.15-pre5-virgin/arch/i386/config.in linux-2.4.15-pre5-bootmem/arch/i386/config.in
--- linux-2.4.15-pre5-virgin/arch/i386/config.in	Mon Nov 19 13:07:07 2001
+++ linux-2.4.15-pre5-bootmem/arch/i386/config.in	Mon Nov 19 01:30:10 2001
@@ -227,6 +227,12 @@
    fi
 fi
 
+if [ "$CONFIG_HIGHMEM" != "y" ]; then
+	bool 'Bloat the size of struct page.' CONFIG_PIGMEM
+else
+	define_bool CONFIG_PIGMEM y
+fi
+
 source drivers/pci/Config.in
 
 bool 'EISA support' CONFIG_EISA
diff -urN linux-2.4.15-pre5-virgin/include/asm-i386/pgtable.h linux-2.4.15-pre5-bootmem/include/asm-i386/pgtable.h
--- linux-2.4.15-pre5-virgin/include/asm-i386/pgtable.h	Fri Nov 16 19:35:57 2001
+++ linux-2.4.15-pre5-bootmem/include/asm-i386/pgtable.h	Mon Nov 19 12:59:03 2001
@@ -263,11 +263,39 @@
 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 
+#ifdef CONFIG_PIGMEM
+
+#define PageZone(page) ((page)->zone)
+
+#define SetPageZone(page, zone_id)				\
+	do {							\
+		(page)->zone = zone_table[zone_id];		\
+	} while(0)
+
+#else /* !CONFIG_PIGMEM */
+
+#define PageZone(page) \
+	(zone_table[((page)->flags >> (BITS_PER_LONG - 5)) & 0x3UL])
+
+#define SetPageZone(page, zone_id)					\
+	do {								\
+		(page)->flags &= ~(0x3UL << (BITS_PER_LONG-5));		\
+		(page)->flags |= (zone_id & 0x3UL) << (BITS_PER_LONG-5);\
+	} while(0)
+
+#endif /* !CONFIG_PIGMEM */
+
 /*
  * Permanent address of a page. Obviously must never be
  * called on a highmem page.
  */
+#if defined(CONFIG_PIGMEM) || defined(CONFIG_HIGHMEM)
 #define page_address(page) ((page)->virtual)
+#else
+#define page_address(page) \
+	__va((((page) - PageZone(page)->zone_mem_map) << PAGE_SHIFT) \
+		+ PageZone(page)->zone_start_paddr)
+#endif
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 
 /*
diff -urN linux-2.4.15-pre5-virgin/include/linux/mm.h linux-2.4.15-pre5-bootmem/include/linux/mm.h
--- linux-2.4.15-pre5-virgin/include/linux/mm.h	Mon Nov 19 13:07:08 2001
+++ linux-2.4.15-pre5-bootmem/include/linux/mm.h	Mon Nov 19 12:59:05 2001
@@ -161,9 +161,15 @@
 	wait_queue_head_t wait;		/* Page locked?  Stand in line... */
 	struct page **pprev_hash;	/* Complement to *next_hash. */
 	struct buffer_head * buffers;	/* Buffer maps us to a disk block. */
+
+#if defined(CONFIG_PIGMEM) || defined(CONFIG_HIGHMEM)
 	void *virtual;			/* Kernel virtual address (NULL if
 					   not kmapped, ie. highmem) */
+#endif
+
+#ifdef CONFIG_PIGMEM
 	struct zone_struct *zone;	/* Memory zone we are in. */
+#endif
 } mem_map_t;
 
 /*
@@ -337,6 +343,13 @@
 
 #define SetPageReserved(page)		set_bit(PG_reserved, &(page)->flags)
 #define ClearPageReserved(page)		clear_bit(PG_reserved, &(page)->flags)
+
+/*
+ * The PageZone and SetPageZone access methods must be defined
+ * elsewhere, in order to deal with arch-specific representation
+ * issues, for instance, the wordsize and needed number of zones
+ * to encode.
+ */
 
 /*
  * Error return values for the *_nopage functions
diff -urN linux-2.4.15-pre5-virgin/include/linux/mmzone.h linux-2.4.15-pre5-bootmem/include/linux/mmzone.h
--- linux-2.4.15-pre5-virgin/include/linux/mmzone.h	Mon Nov  5 12:42:13 2001
+++ linux-2.4.15-pre5-bootmem/include/linux/mmzone.h	Mon Nov 19 02:32:57 2001
@@ -62,6 +62,8 @@
 	unsigned long		size;
 } zone_t;
 
+extern zone_t *zone_table[];
+
 #define ZONE_DMA		0
 #define ZONE_NORMAL		1
 #define ZONE_HIGHMEM		2
diff -urN linux-2.4.15-pre5-virgin/mm/page_alloc.c linux-2.4.15-pre5-bootmem/mm/page_alloc.c
--- linux-2.4.15-pre5-virgin/mm/page_alloc.c	Fri Nov 16 19:36:11 2001
+++ linux-2.4.15-pre5-bootmem/mm/page_alloc.c	Mon Nov 19 12:55:58 2001
@@ -54,7 +54,14 @@
 /*
  * Temporary debugging check.
  */
-#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->zone_start_mapnr) || (((x)-mem_map) >= (zone)->zone_start_mapnr+(zone)->size))
+
+#define BAD_RANGE(zone, page)						\
+(		((zone) != PageZone(page))				\
+	||	( (page) < (zone)->zone_mem_map)			\
+	||	(((page) - (zone)->zone_mem_map) >= (zone)->size)	\
+)
+
+zone_t *zone_table[MAX_NR_ZONES];
 
 /*
  * Buddy system. Hairy. You really aren't expected to understand this
@@ -90,7 +97,7 @@
 		goto local_freelist;
  back_local_freelist:
 
-	zone = page->zone;
+	zone = PageZone(page);
 
 	mask = (~0UL) << order;
 	base = zone->zone_mem_map;
@@ -255,7 +262,9 @@
 			entry = local_pages->next;
 			do {
 				tmp = list_entry(entry, struct page, list);
-				if (tmp->index == order && memclass(tmp->zone, classzone)) {
+				if (tmp->index == order
+					&& memclass(PageZone(tmp), classzone)) {
+
 					list_del(entry);
 					current->nr_local_pages--;
 					set_page_count(tmp, 1);
@@ -650,6 +659,8 @@
 	for (i = 0; i < MAX_NR_ZONES; i++) {
 		unsigned long size = zones_size[i];
 		totalpages += size;
+
+		zone_table[i] = pgdat->node_zones + i;
 	}
 	realtotalpages = totalpages;
 	if (zholes_size)
@@ -732,9 +743,12 @@
 
 		for (i = 0; i < size; i++) {
 			struct page *page = mem_map + offset + i;
-			page->zone = zone;
+			SetPageZone(page, j);
+
+#if defined(CONFIG_PIGMEM) || defined(CONFIG_HIGHMEM)
 			if (j != ZONE_HIGHMEM)
 				page->virtual = __va(zone_start_paddr);
+#endif
 			zone_start_paddr += PAGE_SIZE;
 		}
 
diff -urN linux-2.4.15-pre5-virgin/mm/vmscan.c linux-2.4.15-pre5-bootmem/mm/vmscan.c
--- linux-2.4.15-pre5-virgin/mm/vmscan.c	Mon Nov 19 13:07:08 2001
+++ linux-2.4.15-pre5-bootmem/mm/vmscan.c	Mon Nov 19 01:26:04 2001
@@ -58,7 +58,7 @@
 		return 0;
 
 	/* Don't bother replenishing zones not under pressure.. */
-	if (!memclass(page->zone, classzone))
+	if (!memclass(PageZone(page), classzone))
 		return 0;
 
 	if (TryLockPage(page))
@@ -225,7 +225,7 @@
 		count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone);
 		if (!count)
 			break;
-		debug_lock_break(551);
+		debug_lock_break(1);
 #if 0
 		if (conditional_schedule_needed())
 			return count;
@@ -396,7 +396,7 @@
 		if (unlikely(!page_count(page)))
 			continue;
 
-		if (!memclass(page->zone, classzone))
+		if (!memclass(PageZone(page), classzone))
 			continue;
 
 		/* Racy check to avoid trylocking when not worthwhile */
--
Kernelnewbies: Help each other learn about the Linux kernel.
Archive:       http://mail.nl.linux.org/kernelnewbies/
IRC Channel:   irc.openprojects.net / #kernelnewbies
Web Page:      http://www.kernelnewbies.org/



[Index of Archives]     [Newbies FAQ]     [Linux Kernel Mentors]     [Linux Kernel Development]     [IETF Annouce]     [Git]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux SCSI]     [Linux ACPI]
  Powered by Linux