[merged] mm-sparsemem-cleanup-section-number-data-types.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/sparsemem: cleanup 'section number' data types
has been removed from the -mm tree.  Its filename was
     mm-sparsemem-cleanup-section-number-data-types.patch

This patch was dropped because it was merged into mainline or a subsystem tree

------------------------------------------------------
From: Dan Williams <dan.j.williams@xxxxxxxxx>
Subject: mm/sparsemem: cleanup 'section number' data types

David points out that there is a mixture of 'int' and 'unsigned long'
usage for section number data types.  Update the memory hotplug path to
use 'unsigned long' consistently for section numbers.

[akpm@xxxxxxxxxxxxxxxxxxxx: fix printk format]
Link: http://lkml.kernel.org/r/156107543656.1329419.11505835211949439815.stgit@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx>
Reported-by: David Hildenbrand <david@xxxxxxxxxx>
Reviewed-by: David Hildenbrand <david@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Oscar Salvador <osalvador@xxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 mm/memory_hotplug.c |   10 +++++-----
 mm/sparse.c         |    8 ++++----
 2 files changed, 9 insertions(+), 9 deletions(-)

--- a/mm/memory_hotplug.c~mm-sparsemem-cleanup-section-number-data-types
+++ a/mm/memory_hotplug.c
@@ -288,8 +288,8 @@ static int check_pfn_span(unsigned long
 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 		struct mhp_restrictions *restrictions)
 {
-	unsigned long i;
-	int start_sec, end_sec, err;
+	int err;
+	unsigned long nr, start_sec, end_sec;
 	struct vmem_altmap *altmap = restrictions->altmap;
 
 	if (altmap) {
@@ -310,7 +310,7 @@ int __ref __add_pages(int nid, unsigned
 
 	start_sec = pfn_to_section_nr(pfn);
 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		unsigned long pfns;
 
 		pfns = min(nr_pages, PAGES_PER_SECTION
@@ -541,7 +541,7 @@ void __remove_pages(struct zone *zone, u
 		    unsigned long nr_pages, struct vmem_altmap *altmap)
 {
 	unsigned long map_offset = 0;
-	int i, start_sec, end_sec;
+	unsigned long nr, start_sec, end_sec;
 
 	map_offset = vmem_altmap_offset(altmap);
 
@@ -552,7 +552,7 @@ void __remove_pages(struct zone *zone, u
 
 	start_sec = pfn_to_section_nr(pfn);
 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		unsigned long pfns;
 
 		cond_resched();
--- a/mm/sparse.c~mm-sparsemem-cleanup-section-number-data-types
+++ a/mm/sparse.c
@@ -229,21 +229,21 @@ void subsection_mask_set(unsigned long *
 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
 {
 	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	int i, start_sec = pfn_to_section_nr(pfn);
+	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
 
 	if (!nr_pages)
 		return;
 
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		struct mem_section *ms;
 		unsigned long pfns;
 
 		pfns = min(nr_pages, PAGES_PER_SECTION
 				- (pfn & ~PAGE_SECTION_MASK));
-		ms = __nr_to_section(i);
+		ms = __nr_to_section(nr);
 		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
 
-		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
+		pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
 				pfns, subsection_map_index(pfn),
 				subsection_map_index(pfn + pfns - 1));
 
_

Patches currently in -mm which might be from dan.j.williams@xxxxxxxxx are





[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux