The patch titled Apply type enum zone_type has been removed from the -mm tree. Its filename is apply-type-enum-zone_type.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ Subject: Apply type enum zone_type From: Christoph Lameter <clameter@xxxxxxx> After we have done this we can now do some typing cleanup. The memory policy layer keeps a policy_zone that specifies the zone that gets memory policies applied. This variable can now be of type enum zone_type. The check_highest_zone function and the build_zonelists funnctionm must then also take a enum zone_type parameter. Plus there are a number of loops over zones that also should use zone_type. We run into some troubles at some points with functions that need a zone_type variable to become -1. Fix that up. [pj@xxxxxxx: fix set_mempolicy() crash] Signed-off-by: Christoph Lameter <clameter@xxxxxxx> Signed-off-by: Paul Jackson <pj@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- include/linux/mempolicy.h | 4 ++-- mm/mempolicy.c | 11 ++++++++--- mm/page_alloc.c | 27 +++++++++++++++++---------- 3 files changed, 27 insertions(+), 15 deletions(-) diff -puN include/linux/mempolicy.h~apply-type-enum-zone_type include/linux/mempolicy.h --- a/include/linux/mempolicy.h~apply-type-enum-zone_type +++ a/include/linux/mempolicy.h @@ -162,9 +162,9 @@ extern struct zonelist *huge_zonelist(st unsigned long addr); extern unsigned slab_node(struct mempolicy *policy); -extern int policy_zone; +extern enum zone_type policy_zone; -static inline void check_highest_zone(int k) +static inline void check_highest_zone(enum zone_type k) { if (k > policy_zone) policy_zone = k; diff -puN mm/mempolicy.c~apply-type-enum-zone_type mm/mempolicy.c --- a/mm/mempolicy.c~apply-type-enum-zone_type +++ a/mm/mempolicy.c @@ -105,7 +105,7 @@ static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ -int policy_zone = ZONE_DMA; +enum zone_type policy_zone = ZONE_DMA; struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ @@ -137,7 +137,8 @@ static int mpol_check_policy(int mode, n static struct zonelist *bind_zonelist(nodemask_t *nodes) { struct zonelist *zl; - int num, max, nd, k; + int num, max, nd; + enum zone_type k; max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); @@ -148,12 +149,16 @@ static struct zonelist *bind_zonelist(no lower zones etc. Avoid empty zones because the memory allocator doesn't like them. If you implement node hot removal you have to fix that. */ - for (k = policy_zone; k >= 0; k--) { + k = policy_zone; + while (1) { for_each_node_mask(nd, *nodes) { struct zone *z = &NODE_DATA(nd)->node_zones[k]; if (z->present_pages > 0) zl->zones[num++] = z; } + if (k == 0) + break; + k--; } zl->zones[num] = NULL; return zl; diff -puN mm/page_alloc.c~apply-type-enum-zone_type mm/page_alloc.c --- a/mm/page_alloc.c~apply-type-enum-zone_type +++ a/mm/page_alloc.c @@ -637,7 +637,8 @@ static int rmqueue_bulk(struct zone *zon */ void drain_node_pages(int nodeid) { - int i, z; + int i; + enum zone_type z; unsigned long flags; for (z = 0; z < MAX_NR_ZONES; z++) { @@ -1158,7 +1159,8 @@ EXPORT_SYMBOL(nr_free_pages); #ifdef CONFIG_NUMA unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) { - unsigned int i, sum = 0; + unsigned int sum = 0; + enum zone_type i; for (i = 0; i < MAX_NR_ZONES; i++) sum += pgdat->node_zones[i].free_pages; @@ -1358,21 +1360,22 @@ void show_free_areas(void) * Add all populated zones of a node to the zonelist. */ static int __meminit build_zonelists_node(pg_data_t *pgdat, - struct zonelist *zonelist, int nr_zones, int zone_type) + struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) { struct zone *zone; BUG_ON(zone_type >= MAX_NR_ZONES); + zone_type++; do { + zone_type--; zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { zonelist->zones[nr_zones++] = zone; check_highest_zone(zone_type); } - zone_type--; - } while (zone_type >= 0); + } while (zone_type); return nr_zones; } @@ -1441,10 +1444,11 @@ static int __meminit find_next_best_node static void __meminit build_zonelists(pg_data_t *pgdat) { - int i, j, k, node, local_node; + int i, j, node, local_node; int prev_node, load; struct zonelist *zonelist; nodemask_t used_mask; + enum zone_type k; /* initialize zonelists */ for (i = 0; i < GFP_ZONETYPES; i++) { @@ -1628,7 +1632,7 @@ static void __init calculate_zone_totalp unsigned long *zones_size, unsigned long *zholes_size) { unsigned long realtotalpages, totalpages = 0; - int i; + enum zone_type i; for (i = 0; i < MAX_NR_ZONES; i++) totalpages += zones_size[i]; @@ -2116,7 +2120,7 @@ static void calculate_totalreserve_pages { struct pglist_data *pgdat; unsigned long reserve_pages = 0; - int i, j; + enum zone_type i, j; for_each_online_pgdat(pgdat) { for (i = 0; i < MAX_NR_ZONES; i++) { @@ -2149,7 +2153,7 @@ static void calculate_totalreserve_pages static void setup_per_zone_lowmem_reserve(void) { struct pglist_data *pgdat; - int j, idx; + enum zone_type j, idx; for_each_online_pgdat(pgdat) { for (j = 0; j < MAX_NR_ZONES; j++) { @@ -2158,9 +2162,12 @@ static void setup_per_zone_lowmem_reserv zone->lowmem_reserve[j] = 0; - for (idx = j-1; idx >= 0; idx--) { + idx = j; + while (idx) { struct zone *lower_zone; + idx--; + if (sysctl_lowmem_reserve_ratio[idx] < 1) sysctl_lowmem_reserve_ratio[idx] = 1; _ Patches currently in -mm which might be from clameter@xxxxxxx are origin.patch git-ia64.patch slab-fix-kmalloc_node-applying-memory-policies-if-nodeid-==-numa_node_id.patch add-numa_build-definition-in-kernelh-to-avoid-ifdef.patch disable-gfp_thisnode-in-the-non-numa-case.patch gfp_thisnode-for-the-slab-allocator-v2.patch gfp_thisnode-for-the-slab-allocator-v2-fix-3.patch add-node-to-zone-for-the-numa-case.patch add-node-to-zone-for-the-numa-case-fix.patch get-rid-of-zone_table.patch get-rid-of-zone_table-fix.patch do-not-allocate-pagesets-for-unpopulated-zones.patch zone_statistics-use-hot-node-instead-of-cold-zone_pgdat.patch deal-with-cases-of-zone_dma-meaning-the-first-zone.patch introduce-config_zone_dma.patch optional-zone_dma-in-the-vm.patch optional-zone_dma-for-i386.patch optional-zone_dma-for-x86_64.patch optional-zone_dma-for-ia64.patch remove-zone_dma-remains-from-parisc.patch remove-zone_dma-remains-from-sh-sh64.patch radix-tree-rcu-lockless-readside.patch scheduler-numa-aware-placement-of-sched_group_allnodes.patch zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable-swap_prefetch.patch reduce-max_nr_zones-swap_prefetch-remove-incorrect-use-of-zone_highmem.patch numa-add-zone_to_nid-function-swap_prefetch.patch readahead-state-based-method-aging-accounting-apply-type-enum-zone_type-readahead.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html