On Tue, 2022-11-22 at 11:10 +0100, Peter Zijlstra wrote: > On Mon, Nov 21, 2022 at 01:26:32PM +1300, Kai Huang wrote: > > > +static int build_tdx_memory(void) > > +{ > > + unsigned long start_pfn, end_pfn; > > + int i, nid, ret; > > + > > + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { > > + /* > > + * The first 1MB may not be reported as TDX convertible > > + * memory. Manually exclude them as TDX memory. > > + * > > + * This is fine as the first 1MB is already reserved in > > + * reserve_real_mode() and won't end up to ZONE_DMA as > > + * free page anyway. > > + */ > > + start_pfn = max(start_pfn, (unsigned long)SZ_1M >> PAGE_SHIFT); > > + if (start_pfn >= end_pfn) > > + continue; > > + > > + /* Verify memory is truly TDX convertible memory */ > > + if (!pfn_range_covered_by_cmr(start_pfn, end_pfn)) { > > + pr_info("Memory region [0x%lx, 0x%lx) is not TDX convertible memorry.\n", > > + start_pfn << PAGE_SHIFT, > > + end_pfn << PAGE_SHIFT); > > + return -EINVAL; > > Given how tdx_cc_memory_compatible() below relies on tdx_memlist being > empty; this error patch is wrong and should goto err. Oops. Thanks for catching. Also thanks for review! Today is too late for me and I'll catch up with others tomorrow. > > > + } > > + > > + /* > > + * Add the memory regions as TDX memory. The regions in > > + * memblock has already guaranteed they are in address > > + * ascending order and don't overlap. > > + */ > > + ret = add_tdx_memblock(start_pfn, end_pfn, nid); > > + if (ret) > > + goto err; > > + } > > + > > + return 0; > > +err: > > + free_tdx_memory(); > > + return ret; > > +} > > > +bool tdx_cc_memory_compatible(unsigned long start_pfn, unsigned long end_pfn) > > +{ > > + struct tdx_memblock *tmb; > > + > > + /* Empty list means TDX isn't enabled successfully */ > > + if (list_empty(&tdx_memlist)) > > + return true; > > + > > + list_for_each_entry(tmb, &tdx_memlist, list) { > > + /* > > + * The new range is TDX memory if it is fully covered > > + * by any TDX memory block. > > + */ > > + if (start_pfn >= tmb->start_pfn && end_pfn <= tmb->end_pfn) > > + return true; > > + } > > + return false; > > +} >