Re: [PATCH v2 0/6] mm/memory_hotplug: Consider all zones when removing memory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 26.08.19 18:20, David Hildenbrand wrote:
> On 26.08.19 18:01, Aneesh Kumar K.V wrote:
>> On 8/26/19 9:13 PM, David Hildenbrand wrote:
>>> On 26.08.19 16:53, Aneesh Kumar K.V wrote:
>>>> David Hildenbrand <david@xxxxxxxxxx> writes:
>>>>
>>>>>
>>
>> ....
>>
>>>>
>>>> I did report a variant of the issue at
>>>>
>>>> https://lore.kernel.org/linux-mm/20190514025354.9108-1-aneesh.kumar@xxxxxxxxxxxxx/
>>>>
>>>> This patch series still doesn't handle the fact that struct page backing
>>>> the start_pfn might not be initialized. ie, it results in crash like
>>>> below
>>>
>>> Okay, that's a related but different issue I think.
>>>
>>> I can see that current shrink_zone_span() might read-access the
>>> uninitialized struct page of a PFN if
>>>
>>> 1. The zone has holes and we check for "zone all holes". If we get
>>> pfn_valid(pfn), we check if "page_zone(pfn_to_page(pfn)) != zone".
>>>
>>> 2. Via find_smallest_section_pfn() / find_biggest_section_pfn() find a
>>> spanned pfn_valid(). We check
>>> - pfn_to_nid(start_pfn) != nid
>>> - zone != page_zone(pfn_to_page(start_pfn)
>>>
>>> So we don't actually use the zone/nid, only use it to sanity check. That
>>> might result in false-positives (not that bad).
>>>
>>> It all boils down to shrink_zone_span() not working only on active
>>> memory, for which the PFN is not only valid but also initialized
>>> (something for which we need a new section flag I assume).
>>>
>>> Which access triggers the issue you describe? pfn_to_nid()?
>>>
>>>>
>>>>      pc: c0000000004bc1ec: shrink_zone_span+0x1bc/0x290
>>>>      lr: c0000000004bc1e8: shrink_zone_span+0x1b8/0x290
>>>>      sp: c0000000dac7f910
>>>>     msr: 800000000282b033
>>>>    current = 0xc0000000da2fa000
>>>>    paca    = 0xc00000000fffb300   irqmask: 0x03   irq_happened: 0x01
>>>>      pid   = 1224, comm = ndctl
>>>> kernel BUG at /home/kvaneesh/src/linux/include/linux/mm.h:1088!
>>>> Linux version 5.3.0-rc6-17495-gc7727d815970-dirty (kvaneesh@ltc-boston123) (gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)) #183 SMP Mon Aug 26 09:37:32 CDT 2019
>>>> enter ? for help
>>>
>>> Which exact kernel BUG are you hitting here? (my tree doesn't seem t
>>> have any BUG statement around  include/linux/mm.h:1088). 
>>
>>
>>
>> This is against upstream linus with your patches applied.
> 
> I'm
> 
>>
>>
>> static inline int page_to_nid(const struct page *page)
>> {
>> 	struct page *p = (struct page *)page;
>>
>> 	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
>> }
>>
>>
>> #define PF_POISONED_CHECK(page) ({					\
>> 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
>> 		page; })
>> #
>>
>>
>> It is the node id access.
> 
> A right. A temporary hack would be to assume in these functions
> (shrink_zone_span() and friends) that we might have invalid NIDs /
> zonenumbers and simply skip these. After all we're only using them for
> finding zone boundaries. Not what we ultimately want, but I think until
> we have a proper SECTION_ACTIVE, it might take a while.
> 

I am talking about something as hacky as this:

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8d1c7313ab3f..57ed3dd76a4f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1099,6 +1099,7 @@ static inline int page_zone_id(struct page *page)

 #ifdef NODE_NOT_IN_PAGE_FLAGS
 extern int page_to_nid(const struct page *page);
+#define __page_to_nid page_to_nid
 #else
 static inline int page_to_nid(const struct page *page)
 {
@@ -1106,6 +1107,10 @@ static inline int page_to_nid(const struct page
*page)

 	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
 }
+static inline int __page_to_nid(const struct page *page)
+{
+	return ((page)->flags >> NODES_PGSHIFT) & NODES_MASK;
+}
 #endif

 #ifdef CONFIG_NUMA_BALANCING
@@ -1249,6 +1254,12 @@ static inline struct zone *page_zone(const struct
page *page)
 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 }

+static inline struct zone *__page_zone(const struct page *page)
+{
+	return &NODE_DATA(__page_to_nid(page))->node_zones[page_zonenum(page)];
+}
+
+
 static inline pg_data_t *page_pgdat(const struct page *page)
 {
 	return NODE_DATA(page_to_nid(page));
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 49ca3364eb70..378b593d1fe1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -334,10 +334,10 @@ static unsigned long find_smallest_section_pfn(int
nid, struct zone *zone,
 		if (unlikely(!pfn_valid(start_pfn)))
 			continue;

-		if (unlikely(pfn_to_nid(start_pfn) != nid))
+		/* We might have uninitialized memmaps */
+		if (unlikely(__page_to_nid(pfn_to_page(start_pfn)) != nid))
 			continue;
-
-		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
+		if (zone && zone != __page_zone(pfn_to_page(start_pfn)))
 			continue;

 		return start_pfn;
@@ -359,10 +359,10 @@ static unsigned long find_biggest_section_pfn(int
nid, struct zone *zone,
 		if (unlikely(!pfn_valid(pfn)))
 			continue;

-		if (unlikely(pfn_to_nid(pfn) != nid))
+		/* We might have uninitialized memmaps */
+		if (unlikely(__page_to_nid(pfn_to_page(pfn)) != nid))
 			continue;
-
-		if (zone && zone != page_zone(pfn_to_page(pfn)))
+		if (zone && zone != __page_zone(pfn_to_page(pfn)))
 			continue;

 		return pfn;
@@ -418,7 +418,10 @@ static void shrink_zone_span(struct zone *zone,
unsigned long start_pfn,
 		if (unlikely(!pfn_valid(pfn)))
 			continue;

-		if (page_zone(pfn_to_page(pfn)) != zone)
+		/* We might have uninitialized memmaps */
+		if (unlikely(__page_to_nid(pfn_to_page(pfn)) != nid))
+			continue;
+		if (__page_zone(pfn_to_page(pfn)) != zone)
 			continue;

 		/* Skip range to be removed */
@@ -483,7 +486,8 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
 		if (unlikely(!pfn_valid(pfn)))
 			continue;

-		if (pfn_to_nid(pfn) != nid)
+		/* We might have uninitialized memmaps */
+		if (unlikely(__page_to_nid(pfn_to_page(pfn)) != nid))
 			continue;

 		/* Skip range to be removed */

-- 

Thanks,

David / dhildenb




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux