Hi Andrew, Today's linux-next merge of the akpm tree got a conflict in include/linux/mm.h between commit 1dbd3d35fe64 ("mm/pgprot: Move the pgprot_modify() fallback definition to mm.h") from the tip tree and commit "mm, x86, pat: rework linear pfn-mmap tracking" from the akpm tree. I fixed it up (see below) and can carry the fix as necessary (no action is required). -- Cheers, Stephen Rothwell sfr@xxxxxxxxxxxxxxxx diff --cc include/linux/mm.h index 4adea2c,a66f646..0000000 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@@ -160,37 -160,6 +160,19 @@@ extern pgprot_t protection_map[16] #define FAULT_FLAG_TRIED 0x40 /* second try */ /* - * This interface is used by x86 PAT code to identify a pfn mapping that is - * linear over entire vma. This is to optimize PAT code that deals with - * marking the physical region with a particular prot. This is not for generic - * mm use. Note also that this check will not work if the pfn mapping is - * linear for a vma starting at physical address 0. In which case PAT code - * falls back to slow path of reserving physical range page by page. - */ - static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) - { - return !!(vma->vm_flags & VM_PFN_AT_MMAP); - } - - static inline int is_pfn_mapping(struct vm_area_struct *vma) - { - return !!(vma->vm_flags & VM_PFNMAP); - } - - /* + * Some architectures (such as x86) may need to preserve certain pgprot + * bits, without complicating generic pgprot code. + * + * Most architectures don't care: + */ +#ifndef pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + return newprot; +} +#endif + +/* * vm_fault is filled by the the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled.
Attachment:
pgpdyO_HAdrc0.pgp
Description: PGP signature