From: "Jose R. Santos" <jrsantos@austin.ibm.com>

I was looking at the recent for >MAX_ORDER hash tables but it seems that
the patch limits the number of entries to what it thinks are good values
and the i/dhash_entries cmdline options can not exceed this.

This seems to limit the usability of the patch on systems were larger
allocations that the ones the kernel calculates are desired.

- Make ihash_entries and dhash_entries cmdline option behave like it use to.

- Remove MAX_SYS_HASH_TABLE_ORDER.  Limit the max size to 1/16 the total
  number of pages.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-sparc64-akpm/include/linux/mmzone.h |   12 ---------
 25-sparc64-akpm/mm/page_alloc.c        |   43 ++++++++++++++++-----------------
 2 files changed, 21 insertions(+), 34 deletions(-)

diff -puN include/linux/mmzone.h~making-i-dhash_entries-cmdline-work-as-it-use-to include/linux/mmzone.h
--- 25-sparc64/include/linux/mmzone.h~making-i-dhash_entries-cmdline-work-as-it-use-to	2004-07-31 20:57:37.994483920 -0700
+++ 25-sparc64-akpm/include/linux/mmzone.h	2004-07-31 20:57:38.001482856 -0700
@@ -20,18 +20,6 @@
 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
 #endif
 
-/*
- * system hash table size limits
- * - on large memory machines, we may want to allocate a bigger hash than that
- *   permitted by MAX_ORDER, so we allocate with the bootmem allocator, and are
- *   limited to this size
- */
-#if MAX_ORDER > 14
-#define MAX_SYS_HASH_TABLE_ORDER MAX_ORDER
-#else
-#define MAX_SYS_HASH_TABLE_ORDER 14
-#endif
-
 struct free_area {
 	struct list_head	free_list;
 	unsigned long		*map;
diff -puN mm/page_alloc.c~making-i-dhash_entries-cmdline-work-as-it-use-to mm/page_alloc.c
--- 25-sparc64/mm/page_alloc.c~making-i-dhash_entries-cmdline-work-as-it-use-to	2004-07-31 20:57:37.995483768 -0700
+++ 25-sparc64-akpm/mm/page_alloc.c	2004-07-31 20:57:38.003482552 -0700
@@ -1993,31 +1993,30 @@ void *__init alloc_large_system_hash(con
 				     unsigned int *_hash_shift,
 				     unsigned int *_hash_mask)
 {
-	unsigned long mem, max, log2qty, size;
+	unsigned long max, log2qty, size;
 	void *table;
 
-	/* round applicable memory size up to nearest megabyte */
-	mem = consider_highmem ? nr_all_pages : nr_kernel_pages;
-	mem += (1UL << (20 - PAGE_SHIFT)) - 1;
-	mem >>= 20 - PAGE_SHIFT;
-	mem <<= 20 - PAGE_SHIFT;
-
-	/* limit to 1 bucket per 2^scale bytes of low memory (rounded up to
-	 * nearest power of 2 in size) */
-	if (scale > PAGE_SHIFT)
-		mem >>= (scale - PAGE_SHIFT);
-	else
-		mem <<= (PAGE_SHIFT - scale);
-
-	mem = 1UL << (long_log2(mem) + 1);
-
-	/* limit allocation size */
-	max = (1UL << (PAGE_SHIFT + MAX_SYS_HASH_TABLE_ORDER)) / bucketsize;
-	if (max > mem)
-		max = mem;
-
 	/* allow the kernel cmdline to have a say */
-	if (!numentries || numentries > max)
+	if (!numentries) {
+		/* round applicable memory size up to nearest megabyte */
+		numentries = consider_highmem ? nr_all_pages : nr_kernel_pages;
+		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
+		numentries >>= 20 - PAGE_SHIFT;
+		numentries <<= 20 - PAGE_SHIFT;
+
+		/* limit to 1 bucket per 2^scale bytes of low memory */
+		if (scale > PAGE_SHIFT)
+			numentries >>= (scale - PAGE_SHIFT);
+		else
+			numentries <<= (PAGE_SHIFT - scale);
+	}
+	/* rounded up to nearest power of 2 in size */
+	numentries = 1UL << (long_log2(numentries) + 1);
+
+	/* limit allocation size to 1/16 total memory */
+	max = ((nr_all_pages << PAGE_SHIFT)/16) / bucketsize;
+
+	if (numentries > max)
 		numentries = max;
 
 	log2qty = long_log2(numentries);
_