mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
fork: only invoke khugepaged, ksm hooks if no error
There is no reason to invoke these hooks early against an mm that is in an incomplete state. The change in commitd240629148
("fork: use __mt_dup() to duplicate maple tree in dup_mmap()") makes this more pertinent as we may be in a state where entries in the maple tree are not yet consistent. Their placement early in dup_mmap() only appears to have been meaningful for early error checking, and since functionally it'd require a very small allocation to fail (in practice 'too small to fail') that'd only occur in the most dire circumstances, meaning the fork would fail or be OOM'd in any case. Since both khugepaged and KSM tracking are there to provide optimisations to memory performance rather than critical functionality, it doesn't really matter all that much if, under such dire memory pressure, we fail to register an mm with these. As a result, we follow the example of commitd2081b2bf8
("mm: khugepaged: make khugepaged_enter() void function") and make ksm_fork() a void function also. We only expose the mm to these functions once we are done with them and only if no error occurred in the fork operation. Link: https://lkml.kernel.org/r/e0cb8b840c9d1d5a6e84d4f8eff5f3f2022aa10c.1729014377.git.lorenzo.stoakes@oracle.com Fixes:d240629148
("fork: use __mt_dup() to duplicate maple tree in dup_mmap()") Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reported-by: Jann Horn <jannh@google.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Jann Horn <jannh@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Christian Brauner <brauner@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Linus Torvalds <torvalds@linuxfoundation.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f64e67e5d3
commit
985da552a9
2 changed files with 6 additions and 11 deletions
|
@ -54,12 +54,11 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
|
|||
return atomic_long_read(&mm->ksm_zero_pages);
|
||||
}
|
||||
|
||||
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
/* Adding mm to ksm is best effort on fork. */
|
||||
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
|
||||
return __ksm_enter(mm);
|
||||
|
||||
return 0;
|
||||
__ksm_enter(mm);
|
||||
}
|
||||
|
||||
static inline int ksm_execve(struct mm_struct *mm)
|
||||
|
@ -107,9 +106,8 @@ static inline int ksm_disable(struct mm_struct *mm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ksm_execve(struct mm_struct *mm)
|
||||
|
|
|
@ -653,11 +653,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
|||
mm->exec_vm = oldmm->exec_vm;
|
||||
mm->stack_vm = oldmm->stack_vm;
|
||||
|
||||
retval = ksm_fork(mm, oldmm);
|
||||
if (retval)
|
||||
goto out;
|
||||
khugepaged_fork(mm, oldmm);
|
||||
|
||||
/* Use __mt_dup() to efficiently build an identical maple tree. */
|
||||
retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
|
||||
if (unlikely(retval))
|
||||
|
@ -760,6 +755,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
|||
vma_iter_free(&vmi);
|
||||
if (!retval) {
|
||||
mt_set_in_rcu(vmi.mas.tree);
|
||||
ksm_fork(mm, oldmm);
|
||||
khugepaged_fork(mm, oldmm);
|
||||
} else if (mpnt) {
|
||||
/*
|
||||
* The entire maple tree has already been duplicated. If the
|
||||
|
|
Loading…
Reference in a new issue