mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
x86/uaccess: Avoid barrier_nospec() in 64-bit copy_from_user()
The barrier_nospec() in 64-bit copy_from_user() is slow. Instead use pointer masking to force the user pointer to all 1's for an invalid address. The kernel test robot reports a 2.6% improvement in the per_thread_ops benchmark [1]. This is a variation on a patch originally by Josh Poimboeuf [2]. Link: https://lore.kernel.org/202410281344.d02c72a2-oliver.sang@intel.com [1] Link: https://lore.kernel.org/5b887fe4c580214900e21f6c61095adf9a142735.1730166635.git.jpoimboe@kernel.org [2] Tested-and-reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
14b7d43c5c
commit
0fc810ae3a
1 changed files with 15 additions and 6 deletions
|
@ -38,6 +38,7 @@
|
||||||
#else
|
#else
|
||||||
#define can_do_masked_user_access() 0
|
#define can_do_masked_user_access() 0
|
||||||
#define masked_user_access_begin(src) NULL
|
#define masked_user_access_begin(src) NULL
|
||||||
|
#define mask_user_address(src) (src)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -159,19 +160,27 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
unsigned long res = n;
|
unsigned long res = n;
|
||||||
might_fault();
|
might_fault();
|
||||||
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
|
if (should_fail_usercopy())
|
||||||
|
goto fail;
|
||||||
|
if (can_do_masked_user_access())
|
||||||
|
from = mask_user_address(from);
|
||||||
|
else {
|
||||||
|
if (!access_ok(from, n))
|
||||||
|
goto fail;
|
||||||
/*
|
/*
|
||||||
* Ensure that bad access_ok() speculation will not
|
* Ensure that bad access_ok() speculation will not
|
||||||
* lead to nasty side effects *after* the copy is
|
* lead to nasty side effects *after* the copy is
|
||||||
* finished:
|
* finished:
|
||||||
*/
|
*/
|
||||||
barrier_nospec();
|
barrier_nospec();
|
||||||
instrument_copy_from_user_before(to, from, n);
|
|
||||||
res = raw_copy_from_user(to, from, n);
|
|
||||||
instrument_copy_from_user_after(to, from, n, res);
|
|
||||||
}
|
}
|
||||||
if (unlikely(res))
|
instrument_copy_from_user_before(to, from, n);
|
||||||
memset(to + (n - res), 0, res);
|
res = raw_copy_from_user(to, from, n);
|
||||||
|
instrument_copy_from_user_after(to, from, n, res);
|
||||||
|
if (likely(!res))
|
||||||
|
return 0;
|
||||||
|
fail:
|
||||||
|
memset(to + (n - res), 0, res);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
extern __must_check unsigned long
|
extern __must_check unsigned long
|
||||||
|
|
Loading…
Reference in a new issue