mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
tools/sched_ext: Receive misc updates from SCX repo
Receive misc tools/sched_ext updates from https://github.com/sched-ext/scx to sync userspace bits. - LSP macros to help language servers. - bpf_cpumask_weight() declaration and cast_mask() helper. - Cosmetic updates to scx_flatcg.bpf.c. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
1e123fd73d
commit
a748db0c8c
3 changed files with 21 additions and 3 deletions
|
@ -7,7 +7,13 @@
|
|||
#ifndef __SCX_COMMON_BPF_H
|
||||
#define __SCX_COMMON_BPF_H
|
||||
|
||||
#ifdef LSP
|
||||
#define __bpf__
|
||||
#include "../vmlinux/vmlinux.h"
|
||||
#else
|
||||
#include "vmlinux.h"
|
||||
#endif
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <asm-generic/errno.h>
|
||||
|
@ -309,6 +315,15 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym
|
|||
u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
|
||||
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
|
||||
const struct cpumask *src2) __ksym;
|
||||
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
|
||||
|
||||
/*
|
||||
* Access a cpumask in read-only mode (typically to check bits).
|
||||
*/
|
||||
const struct cpumask *cast_mask(struct bpf_cpumask *mask)
|
||||
{
|
||||
return (const struct cpumask *)mask;
|
||||
}
|
||||
|
||||
/* rcu */
|
||||
void bpf_rcu_read_lock(void) __ksym;
|
||||
|
|
|
@ -25,7 +25,11 @@ struct user_exit_info {
|
|||
|
||||
#ifdef __bpf__
|
||||
|
||||
#ifdef LSP
|
||||
#include "../vmlinux/vmlinux.h"
|
||||
#else
|
||||
#include "vmlinux.h"
|
||||
#endif
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
#define UEI_DEFINE(__name) \
|
||||
|
|
|
@ -225,7 +225,7 @@ static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
|
|||
break;
|
||||
|
||||
/*
|
||||
* We can be oppotunistic here and not grab the
|
||||
* We can be opportunistic here and not grab the
|
||||
* cgv_tree_lock and deal with the occasional races.
|
||||
* However, hweight updates are already cached and
|
||||
* relatively low-frequency. Let's just do the
|
||||
|
@ -258,8 +258,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
|
|||
* and thus can't be updated and repositioned. Instead, we collect the
|
||||
* vtime deltas separately and apply it asynchronously here.
|
||||
*/
|
||||
delta = cgc->cvtime_delta;
|
||||
__sync_fetch_and_sub(&cgc->cvtime_delta, delta);
|
||||
delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
|
||||
cvtime = cgv_node->cvtime + delta;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue