mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
Merge branch 'torvalds:master' into master
This commit is contained in:
commit
a78ca09540
175 changed files with 2271 additions and 1505 deletions
3
.mailmap
3
.mailmap
|
@ -446,7 +446,8 @@ Mythri P K <mythripk@ti.com>
|
|||
Nadav Amit <nadav.amit@gmail.com> <namit@vmware.com>
|
||||
Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
|
||||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Naoya Horiguchi <naoya.horiguchi@nec.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
|
||||
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
|
||||
|
|
|
@ -52,6 +52,9 @@ properties:
|
|||
- const: main
|
||||
- const: mm
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -24,10 +24,10 @@ fragmentation statistics can be obtained through gfp flag information of
|
|||
each page. It is already implemented and activated if page owner is
|
||||
enabled. Other usages are more than welcome.
|
||||
|
||||
It can also be used to show all the stacks and their outstanding
|
||||
allocations, which gives us a quick overview of where the memory is going
|
||||
without the need to screen through all the pages and match the allocation
|
||||
and free operation.
|
||||
It can also be used to show all the stacks and their current number of
|
||||
allocated base pages, which gives us a quick overview of where the memory
|
||||
is going without the need to screen through all the pages and match the
|
||||
allocation and free operation.
|
||||
|
||||
page owner is disabled by default. So, if you'd like to use it, you need
|
||||
to add "page_owner=on" to your boot cmdline. If the kernel is built
|
||||
|
@ -75,42 +75,45 @@ Usage
|
|||
|
||||
cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt
|
||||
cat stacks.txt
|
||||
prep_new_page+0xa9/0x120
|
||||
get_page_from_freelist+0x7e6/0x2140
|
||||
__alloc_pages+0x18a/0x370
|
||||
new_slab+0xc8/0x580
|
||||
___slab_alloc+0x1f2/0xaf0
|
||||
__slab_alloc.isra.86+0x22/0x40
|
||||
kmem_cache_alloc+0x31b/0x350
|
||||
__khugepaged_enter+0x39/0x100
|
||||
dup_mmap+0x1c7/0x5ce
|
||||
copy_process+0x1afe/0x1c90
|
||||
kernel_clone+0x9a/0x3c0
|
||||
__do_sys_clone+0x66/0x90
|
||||
do_syscall_64+0x7f/0x160
|
||||
entry_SYSCALL_64_after_hwframe+0x6c/0x74
|
||||
stack_count: 234
|
||||
post_alloc_hook+0x177/0x1a0
|
||||
get_page_from_freelist+0xd01/0xd80
|
||||
__alloc_pages+0x39e/0x7e0
|
||||
allocate_slab+0xbc/0x3f0
|
||||
___slab_alloc+0x528/0x8a0
|
||||
kmem_cache_alloc+0x224/0x3b0
|
||||
sk_prot_alloc+0x58/0x1a0
|
||||
sk_alloc+0x32/0x4f0
|
||||
inet_create+0x427/0xb50
|
||||
__sock_create+0x2e4/0x650
|
||||
inet_ctl_sock_create+0x30/0x180
|
||||
igmp_net_init+0xc1/0x130
|
||||
ops_init+0x167/0x410
|
||||
setup_net+0x304/0xa60
|
||||
copy_net_ns+0x29b/0x4a0
|
||||
create_new_namespaces+0x4a1/0x820
|
||||
nr_base_pages: 16
|
||||
...
|
||||
...
|
||||
echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold
|
||||
cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt
|
||||
cat stacks_7000.txt
|
||||
prep_new_page+0xa9/0x120
|
||||
get_page_from_freelist+0x7e6/0x2140
|
||||
__alloc_pages+0x18a/0x370
|
||||
alloc_pages_mpol+0xdf/0x1e0
|
||||
folio_alloc+0x14/0x50
|
||||
filemap_alloc_folio+0xb0/0x100
|
||||
page_cache_ra_unbounded+0x97/0x180
|
||||
filemap_fault+0x4b4/0x1200
|
||||
__do_fault+0x2d/0x110
|
||||
do_pte_missing+0x4b0/0xa30
|
||||
__handle_mm_fault+0x7fa/0xb70
|
||||
handle_mm_fault+0x125/0x300
|
||||
do_user_addr_fault+0x3c9/0x840
|
||||
exc_page_fault+0x68/0x150
|
||||
asm_exc_page_fault+0x22/0x30
|
||||
stack_count: 8248
|
||||
post_alloc_hook+0x177/0x1a0
|
||||
get_page_from_freelist+0xd01/0xd80
|
||||
__alloc_pages+0x39e/0x7e0
|
||||
alloc_pages_mpol+0x22e/0x490
|
||||
folio_alloc+0xd5/0x110
|
||||
filemap_alloc_folio+0x78/0x230
|
||||
page_cache_ra_order+0x287/0x6f0
|
||||
filemap_get_pages+0x517/0x1160
|
||||
filemap_read+0x304/0x9f0
|
||||
xfs_file_buffered_read+0xe6/0x1d0 [xfs]
|
||||
xfs_file_read_iter+0x1f0/0x380 [xfs]
|
||||
__kernel_read+0x3b9/0x730
|
||||
kernel_read_file+0x309/0x4d0
|
||||
__do_sys_finit_module+0x381/0x730
|
||||
do_syscall_64+0x8d/0x150
|
||||
entry_SYSCALL_64_after_hwframe+0x62/0x6a
|
||||
nr_base_pages: 20824
|
||||
...
|
||||
|
||||
cat /sys/kernel/debug/page_owner > page_owner_full.txt
|
||||
|
|
|
@ -10024,7 +10024,7 @@ F: drivers/media/platform/st/sti/hva
|
|||
|
||||
HWPOISON MEMORY FAILURE HANDLING
|
||||
M: Miaohe Lin <linmiaohe@huawei.com>
|
||||
R: Naoya Horiguchi <naoya.horiguchi@nec.com>
|
||||
R: Naoya Horiguchi <nao.horiguchi@gmail.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: mm/hwpoison-inject.c
|
||||
|
|
|
@ -289,6 +289,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
adr_l x1, __hyp_text_end
|
||||
adr_l x2, dcache_clean_poc
|
||||
blr x2
|
||||
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
|
@ -323,13 +328,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
cbz x0, 2f
|
||||
|
||||
/* Set a sane SCTLR_EL1, the VHE way */
|
||||
pre_disable_mmu_workaround
|
||||
msr_s SYS_SCTLR_EL12, x1
|
||||
mov x2, #BOOT_CPU_FLAG_E2H
|
||||
b 3f
|
||||
|
||||
2:
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x1
|
||||
mov x2, xzr
|
||||
3:
|
||||
|
|
|
@ -276,7 +276,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
pte_t *ptep = NULL;
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
p4dp = p4d_alloc(mm, pgdp, addr);
|
||||
if (!p4dp)
|
||||
return NULL;
|
||||
|
||||
pudp = pud_alloc(mm, p4dp, addr);
|
||||
if (!pudp)
|
||||
return NULL;
|
||||
|
|
|
@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
|
|||
pte_t *ptep;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
if (!can_set_direct_map())
|
||||
return true;
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return false;
|
||||
|
|
|
@ -340,7 +340,8 @@ SYM_CODE_START(pgm_check_handler)
|
|||
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
|
||||
stctg %c1,%c1,__PT_CR1(%r11)
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
lg %r12,__LC_GMAP
|
||||
ltg %r12,__LC_GMAP
|
||||
jz 5f
|
||||
clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11)
|
||||
jne 5f
|
||||
BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST
|
||||
|
|
|
@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
|
|||
|
||||
static void __cold _credit_init_bits(size_t bits)
|
||||
{
|
||||
static struct execute_work set_ready;
|
||||
static DECLARE_WORK(set_ready, crng_set_ready);
|
||||
unsigned int new, orig, add;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
|
|||
|
||||
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
|
||||
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
|
||||
if (static_key_initialized)
|
||||
execute_in_process_context(crng_set_ready, &set_ready);
|
||||
if (static_key_initialized && system_unbound_wq)
|
||||
queue_work(system_unbound_wq, &set_ready);
|
||||
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
|
@ -890,8 +890,8 @@ void __init random_init(void)
|
|||
|
||||
/*
|
||||
* If we were initialized by the cpu or bootloader before jump labels
|
||||
* are initialized, then we should enable the static branch here, where
|
||||
* it's guaranteed that jump labels have been initialized.
|
||||
* or workqueues are initialized, then we should enable the static
|
||||
* branch here, where it's guaranteed that these have been initialized.
|
||||
*/
|
||||
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
|
||||
crng_set_ready(NULL);
|
||||
|
|
|
@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
|
|||
case 0x5e:
|
||||
return GPIOPANELCTL;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
|
|||
{ .compatible = "nxp,lpc3220-gpio", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
|
||||
|
||||
static struct platform_driver lpc32xx_gpio_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
|
|||
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
|
||||
|
||||
if (gpio >= WCOVE_GPIO_NUM)
|
||||
return -EOPNOTSUPP;
|
||||
return -ENOTSUPP;
|
||||
|
||||
return reg + gpio;
|
||||
}
|
||||
|
|
|
@ -819,7 +819,7 @@ static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
|
|||
|
||||
p->bytes_moved += ctx.bytes_moved;
|
||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
p->bytes_moved_vis += ctx.bytes_moved;
|
||||
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
|
|
|
@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
|
||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||
ctx.bytes_moved);
|
||||
else
|
||||
|
@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
|
|||
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
|
||||
struct amdgpu_mem_stats *stats)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_resource *res = bo->tbo.resource;
|
||||
uint64_t size = amdgpu_bo_size(bo);
|
||||
struct drm_gem_object *obj;
|
||||
unsigned int domain;
|
||||
bool shared;
|
||||
|
||||
/* Abort if the BO doesn't currently have a backing store */
|
||||
if (!bo->tbo.resource)
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
obj = &bo->tbo.base;
|
||||
shared = drm_gem_object_is_shared_for_memory_stats(obj);
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
domain = amdgpu_mem_type_to_domain(res->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
stats->vram += size;
|
||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
stats->visible_vram += size;
|
||||
if (shared)
|
||||
stats->vram_shared += size;
|
||||
|
@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
/* Remember that this BO was accessed by the CPU */
|
||||
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_bo_in_cpu_visible_vram(abo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->resource))
|
||||
return 0;
|
||||
|
||||
/* Can't move a pinned BO to visible VRAM */
|
||||
|
@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
|
||||
/* this should never happen */
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||
!amdgpu_bo_in_cpu_visible_vram(abo))
|
||||
!amdgpu_res_cpu_visible(adev, bo->resource))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||
|
@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
|||
*/
|
||||
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct dma_buf_attachment *attachment;
|
||||
struct dma_buf *dma_buf;
|
||||
const char *placement;
|
||||
|
@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
|||
|
||||
if (dma_resv_trylock(bo->tbo.base.resv)) {
|
||||
unsigned int domain;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
||||
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||
placement = "VRAM VISIBLE";
|
||||
else
|
||||
placement = "VRAM";
|
||||
|
|
|
@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
|||
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
|
||||
*/
|
||||
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
||||
while (cursor.remaining) {
|
||||
if (cursor.start < adev->gmc.visible_vram_size)
|
||||
return true;
|
||||
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
|
||||
*/
|
||||
|
|
|
@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
|
||||
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(abo)) {
|
||||
amdgpu_res_cpu_visible(adev, bo->resource)) {
|
||||
|
||||
/* Try evicting to the CPU inaccessible part of VRAM
|
||||
* first, but only set GTT as busy placement, so this
|
||||
|
@ -403,40 +403,55 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
|
||||
* @adev: amdgpu device
|
||||
* @res: the resource to check
|
||||
*
|
||||
* Returns: true if the full resource is CPU visible, false otherwise.
|
||||
*/
|
||||
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (!res)
|
||||
return false;
|
||||
|
||||
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
|
||||
res->mem_type == AMDGPU_PL_PREEMPT)
|
||||
return true;
|
||||
|
||||
if (res->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(res, 0, res->size, &cursor);
|
||||
while (cursor.remaining) {
|
||||
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
|
||||
return false;
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
|
||||
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
|
||||
*
|
||||
* Called by amdgpu_bo_move()
|
||||
*/
|
||||
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem)
|
||||
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
u64 mem_size = (u64)mem->size;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
u64 end;
|
||||
|
||||
if (mem->mem_type == TTM_PL_SYSTEM ||
|
||||
mem->mem_type == TTM_PL_TT)
|
||||
return true;
|
||||
if (mem->mem_type != TTM_PL_VRAM)
|
||||
if (!amdgpu_res_cpu_visible(adev, mem))
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(mem, 0, mem_size, &cursor);
|
||||
end = cursor.start + cursor.size;
|
||||
while (cursor.remaining) {
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
/* ttm_resource_ioremap only supports contiguous memory */
|
||||
if (mem->mem_type == TTM_PL_VRAM &&
|
||||
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
|
||||
return false;
|
||||
|
||||
if (!cursor.remaining)
|
||||
break;
|
||||
|
||||
/* ttm_resource_ioremap only supports contiguous memory */
|
||||
if (end != cursor.start)
|
||||
return false;
|
||||
|
||||
end = cursor.start + cursor.size;
|
||||
}
|
||||
|
||||
return end <= adev->gmc.visible_vram_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
|
||||
if (r) {
|
||||
/* Check that all memory is CPU accessible */
|
||||
if (!amdgpu_mem_visible(adev, old_mem) ||
|
||||
!amdgpu_mem_visible(adev, new_mem)) {
|
||||
if (!amdgpu_res_copyable(adev, old_mem) ||
|
||||
!amdgpu_res_copyable(adev, new_mem)) {
|
||||
pr_err("Move buffer fallback to memcpy unavailable\n");
|
||||
return r;
|
||||
}
|
||||
|
@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
|||
struct ttm_resource *mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
size_t bus_size = (size_t)mem->size;
|
||||
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
|
@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
|||
break;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
/* check if it's visible */
|
||||
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->mman.aper_base_kaddr &&
|
||||
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
||||
|
|
|
@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
|||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start);
|
||||
|
||||
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *res);
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||
|
|
|
@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
|||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||
}
|
||||
|
||||
/* Validate operation parameters to prevent potential abuse */
|
||||
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo,
|
||||
uint64_t saddr,
|
||||
uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
uint64_t tmp, lpfn;
|
||||
|
||||
if (saddr & AMDGPU_GPU_PAGE_MASK
|
||||
|| offset & AMDGPU_GPU_PAGE_MASK
|
||||
|| size & AMDGPU_GPU_PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (check_add_overflow(saddr, size, &tmp)
|
||||
|| check_add_overflow(offset, size, &tmp)
|
||||
|| size == 0 /* which also leads to end < begin */)
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
if (bo && offset + size > amdgpu_bo_size(bo))
|
||||
return -EINVAL;
|
||||
|
||||
/* Ensure last pfn not exceed max_pfn */
|
||||
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
|
||||
if (lpfn >= adev->vm_manager.max_pfn)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_bo_map - map bo inside a vm
|
||||
*
|
||||
|
@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
uint64_t eaddr;
|
||||
int r;
|
||||
|
||||
/* validate the parameters */
|
||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (saddr + size <= saddr || offset + size <= offset)
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
eaddr = saddr + size - 1;
|
||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
||||
if (tmp) {
|
||||
|
@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|||
uint64_t eaddr;
|
||||
int r;
|
||||
|
||||
/* validate the parameters */
|
||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (saddr + size <= saddr || offset + size <= offset)
|
||||
return -EINVAL;
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
eaddr = saddr + size - 1;
|
||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Allocate all the needed memory */
|
||||
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
|
@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
mapping->start = saddr;
|
||||
mapping->last = eaddr;
|
||||
|
@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
||||
LIST_HEAD(removed);
|
||||
uint64_t eaddr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
eaddr = saddr + size - 1;
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
/* Allocate all the needed memory */
|
||||
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
||||
|
|
|
@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
|||
mutex_lock(&kfd_processes_mutex);
|
||||
|
||||
if (kfd_is_locked()) {
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
pr_debug("KFD is locked! Cannot create process");
|
||||
return ERR_PTR(-EINVAL);
|
||||
process = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* A prior open of /dev/kfd could have already created the process. */
|
||||
|
|
|
@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
|
|||
enc10->base.hpd_source = init_data->hpd_source;
|
||||
enc10->base.connector = init_data->connector;
|
||||
|
||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||
|
||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
|
||||
enc10->base.features = *enc_features;
|
||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||
|
||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||
|
||||
enc10->base.transmitter = init_data->transmitter;
|
||||
|
||||
|
|
|
@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
|
|||
enc10->base.hpd_source = init_data->hpd_source;
|
||||
enc10->base.connector = init_data->connector;
|
||||
|
||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||
|
||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
|
||||
|
@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
|
|||
}
|
||||
|
||||
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
|
||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||
|
||||
if (bp_funcs->get_connector_speed_cap_info)
|
||||
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_bios.h"
|
||||
#include "nouveau_reg.h"
|
||||
#include "dispnv04/hw.h"
|
||||
#include "nouveau_encoder.h"
|
||||
|
@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
|
|||
*/
|
||||
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
|
||||
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
|
|||
#ifdef __powerpc__
|
||||
/* Apple iMac G4 NV17 */
|
||||
if (of_machine_is_compatible("PowerMac4,5")) {
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Make up some sane defaults */
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
|
||||
bios->legacy.i2c_indices.crt, 1, 1);
|
||||
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
|
||||
|
||||
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
|
||||
bios->legacy.i2c_indices.tv,
|
||||
all_heads, 0);
|
||||
all_heads, DCB_OUTPUT_A);
|
||||
|
||||
else if (bios->tmds.output0_script_ptr ||
|
||||
bios->tmds.output1_script_ptr)
|
||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
|
||||
bios->legacy.i2c_indices.panel,
|
||||
all_heads, 1);
|
||||
all_heads, DCB_OUTPUT_B);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
|
|||
u8 *dpcd = nv_encoder->dp.dpcd;
|
||||
int ret = NOUVEAU_DP_NONE, hpd;
|
||||
|
||||
/* If we've already read the DPCD on an eDP device, we don't need to
|
||||
* reread it as it won't change
|
||||
/* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
|
||||
* haven't probed them once before.
|
||||
*/
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
|
||||
dpcd[DP_DPCD_REV] != 0)
|
||||
return NOUVEAU_DP_SST;
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
if (connector->status == connector_status_connected)
|
||||
return NOUVEAU_DP_SST;
|
||||
else if (connector->status == connector_status_disconnected)
|
||||
return NOUVEAU_DP_NONE;
|
||||
}
|
||||
|
||||
// Ensure that the aux bus is enabled for probing
|
||||
drm_dp_dpcd_set_powered(&nv_connector->aux, true);
|
||||
|
||||
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
|
||||
if (mstm) {
|
||||
|
@ -293,6 +299,13 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
|
|||
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
|
||||
nv50_mstm_remove(mstm);
|
||||
|
||||
/* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
|
||||
* and since we don't really have a usecase for that anyway - just disable the aux bus here
|
||||
* if we've decided the connector is disconnected
|
||||
*/
|
||||
if (ret == NOUVEAU_DP_NONE)
|
||||
drm_dp_dpcd_set_powered(&nv_connector->aux, false);
|
||||
|
||||
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
|||
void __iomem *map = NULL;
|
||||
|
||||
/* Already mapped? */
|
||||
if (refcount_inc_not_zero(&iobj->maps))
|
||||
if (refcount_inc_not_zero(&iobj->maps)) {
|
||||
/* read barrier match the wmb on refcount set */
|
||||
smp_rmb();
|
||||
return iobj->map;
|
||||
}
|
||||
|
||||
/* Take the lock, and re-check that another thread hasn't
|
||||
* already mapped the object in the meantime.
|
||||
|
@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
|||
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
||||
else
|
||||
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
||||
/* barrier to ensure the ptrs are written before refcount is set */
|
||||
smp_wmb();
|
||||
refcount_set(&iobj->maps, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
|
|||
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
|
||||
|
||||
mipi_dsi_detach(ctx->dsi);
|
||||
mipi_dsi_device_unregister(ctx->dsi);
|
||||
|
||||
drm_panel_remove(&ctx->panel);
|
||||
}
|
||||
|
||||
|
|
|
@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
|
|||
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
|
||||
|
||||
mipi_dsi_detach(ctx->dsi);
|
||||
mipi_dsi_device_unregister(ctx->dsi);
|
||||
|
||||
drm_panel_remove(&ctx->panel);
|
||||
}
|
||||
|
||||
|
|
|
@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
|
|||
typedef struct _ATOM_PPLIB_STATE_V2
|
||||
{
|
||||
//number of valid dpm levels in this state; Driver uses it to calculate the whole
|
||||
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
|
||||
//size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
|
||||
UCHAR ucNumDPMLevels;
|
||||
|
||||
//a index to the array of nonClockInfos
|
||||
|
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
|
|||
/**
|
||||
* Driver will read the first ucNumDPMLevels in this array
|
||||
*/
|
||||
UCHAR clockInfoIndex[1];
|
||||
UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
|
||||
} ATOM_PPLIB_STATE_V2;
|
||||
|
||||
typedef struct _StateArray{
|
||||
//how many states we have
|
||||
UCHAR ucNumEntries;
|
||||
|
||||
ATOM_PPLIB_STATE_V2 states[1];
|
||||
ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
|
||||
}StateArray;
|
||||
|
||||
|
||||
|
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
|
|||
//sizeof(ATOM_PPLIB_CLOCK_INFO)
|
||||
UCHAR ucEntrySize;
|
||||
|
||||
UCHAR clockInfo[1];
|
||||
UCHAR clockInfo[] __counted_by(ucNumEntries);
|
||||
}ClockInfoArray;
|
||||
|
||||
typedef struct _NonClockInfoArray{
|
||||
|
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
|
|||
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
|
||||
UCHAR ucEntrySize;
|
||||
|
||||
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
|
||||
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
|
||||
}NonClockInfoArray;
|
||||
|
||||
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
|
||||
|
|
|
@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
|||
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
|
||||
|
||||
for (i = 0; i < max_device; i++) {
|
||||
ATOM_CONNECTOR_INFO_I2C ci =
|
||||
supported_devices->info.asConnInfo[i];
|
||||
ATOM_CONNECTOR_INFO_I2C ci;
|
||||
|
||||
if (frev > 1)
|
||||
ci = supported_devices->info_2d1.asConnInfo[i];
|
||||
else
|
||||
ci = supported_devices->info.asConnInfo[i];
|
||||
|
||||
bios_connectors[i].valid = false;
|
||||
|
||||
|
|
|
@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
|
|||
enum ttm_caching caching,
|
||||
unsigned int order)
|
||||
{
|
||||
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
|
||||
if (pool->use_dma_alloc)
|
||||
return &pool->caching[caching].orders[order];
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
switch (caching) {
|
||||
case ttm_write_combined:
|
||||
if (pool->nid != NUMA_NO_NODE)
|
||||
return &pool->caching[caching].orders[order];
|
||||
|
||||
if (pool->use_dma32)
|
||||
return &global_dma32_write_combined[order];
|
||||
|
||||
return &global_write_combined[order];
|
||||
case ttm_uncached:
|
||||
if (pool->nid != NUMA_NO_NODE)
|
||||
return &pool->caching[caching].orders[order];
|
||||
|
||||
if (pool->use_dma32)
|
||||
return &global_dma32_uncached[order];
|
||||
|
||||
|
@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
|
|||
pool->use_dma_alloc = use_dma_alloc;
|
||||
pool->use_dma32 = use_dma32;
|
||||
|
||||
if (use_dma_alloc || nid != NUMA_NO_NODE) {
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
||||
ttm_pool_type_init(&pool->caching[i].orders[j],
|
||||
pool, i, j);
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
|
||||
struct ttm_pool_type *pt;
|
||||
|
||||
/* Initialize only pool types which are actually used */
|
||||
pt = ttm_pool_select_type(pool, i, j);
|
||||
if (pt != &pool->caching[i].orders[j])
|
||||
continue;
|
||||
|
||||
ttm_pool_type_init(pt, pool, i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_init);
|
||||
|
@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
|
|||
{
|
||||
unsigned int i, j;
|
||||
|
||||
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
||||
ttm_pool_type_fini(&pool->caching[i].orders[j]);
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
|
||||
struct ttm_pool_type *pt;
|
||||
|
||||
pt = ttm_pool_select_type(pool, i, j);
|
||||
if (pt != &pool->caching[i].orders[j])
|
||||
continue;
|
||||
|
||||
ttm_pool_type_fini(pt);
|
||||
}
|
||||
}
|
||||
|
||||
/* We removed the pool types from the LRU, but we need to also make sure
|
||||
|
|
|
@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
|
|||
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
|
||||
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
|
||||
|
||||
file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
|
||||
file->jobs_sent[V3D_BIN]++;
|
||||
v3d->queue[V3D_BIN].jobs_sent++;
|
||||
|
||||
|
@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
|
|||
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
|
||||
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
|
||||
|
||||
file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
|
||||
file->jobs_sent[V3D_RENDER]++;
|
||||
v3d->queue[V3D_RENDER].jobs_sent++;
|
||||
|
||||
|
@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
|
|||
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
|
||||
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
|
||||
|
||||
file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
|
||||
file->jobs_sent[V3D_CSD]++;
|
||||
v3d->queue[V3D_CSD].jobs_sent++;
|
||||
|
||||
|
@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
|
|||
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
|
||||
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
|
||||
|
||||
file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
|
||||
file->jobs_sent[V3D_TFU]++;
|
||||
v3d->queue[V3D_TFU].jobs_sent++;
|
||||
|
||||
|
|
|
@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||
.no_wait_gpu = false
|
||||
};
|
||||
u32 j, initial_line = dst_offset / dst_stride;
|
||||
struct vmw_bo_blit_line_data d;
|
||||
struct vmw_bo_blit_line_data d = {0};
|
||||
int ret = 0;
|
||||
struct page **dst_pages = NULL;
|
||||
struct page **src_pages = NULL;
|
||||
|
||||
/* Buffer objects need to be either pinned or reserved: */
|
||||
if (!(dst->pin_count))
|
||||
|
@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (!src->ttm->pages && src->ttm->sg) {
|
||||
src_pages = kvmalloc_array(src->ttm->num_pages,
|
||||
sizeof(struct page *), GFP_KERNEL);
|
||||
if (!src_pages)
|
||||
return -ENOMEM;
|
||||
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
|
||||
src->ttm->num_pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
if (!dst->ttm->pages && dst->ttm->sg) {
|
||||
dst_pages = kvmalloc_array(dst->ttm->num_pages,
|
||||
sizeof(struct page *), GFP_KERNEL);
|
||||
if (!dst_pages) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
|
||||
dst->ttm->num_pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
d.mapped_dst = 0;
|
||||
d.mapped_src = 0;
|
||||
d.dst_addr = NULL;
|
||||
d.src_addr = NULL;
|
||||
d.dst_pages = dst->ttm->pages;
|
||||
d.src_pages = src->ttm->pages;
|
||||
d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
|
||||
d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
|
||||
d.dst_num_pages = PFN_UP(dst->resource->size);
|
||||
d.src_num_pages = PFN_UP(src->resource->size);
|
||||
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
||||
|
@ -504,6 +529,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||
kunmap_atomic(d.src_addr);
|
||||
if (d.dst_addr)
|
||||
kunmap_atomic(d.dst_addr);
|
||||
if (src_pages)
|
||||
kvfree(src_pages);
|
||||
if (dst_pages)
|
||||
kvfree(dst_pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
|||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = params->bo_type != ttm_bo_type_kernel,
|
||||
.no_wait_gpu = false
|
||||
.no_wait_gpu = false,
|
||||
.resv = params->resv,
|
||||
};
|
||||
struct ttm_device *bdev = &dev_priv->bdev;
|
||||
struct drm_device *vdev = &dev_priv->drm;
|
||||
|
@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
|||
|
||||
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
|
||||
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
|
||||
&vmw_bo->placement, 0, &ctx, NULL,
|
||||
NULL, destroy);
|
||||
&vmw_bo->placement, 0, &ctx,
|
||||
params->sg, params->resv, destroy);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ struct vmw_bo_params {
|
|||
enum ttm_bo_type bo_type;
|
||||
size_t size;
|
||||
bool pin;
|
||||
struct dma_resv *resv;
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
|
|||
|
||||
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
||||
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
||||
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
|
||||
|
||||
.fops = &vmwgfx_driver_fops,
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
|
|
|
@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
|
|||
struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t flags,
|
||||
int *prime_fd);
|
||||
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *table);
|
||||
|
||||
/*
|
||||
* MemoryOBject management - vmwgfx_mob.c
|
||||
|
|
|
@ -149,6 +149,38 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *table)
|
||||
{
|
||||
int ret;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_gem_object *gem = NULL;
|
||||
struct vmw_bo *vbo;
|
||||
struct vmw_bo_params params = {
|
||||
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
|
||||
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||
.bo_type = ttm_bo_type_sg,
|
||||
.size = attach->dmabuf->size,
|
||||
.pin = false,
|
||||
.resv = attach->dmabuf->resv,
|
||||
.sg = table,
|
||||
|
||||
};
|
||||
|
||||
dma_resv_lock(params.resv, NULL);
|
||||
|
||||
ret = vmw_bo_create(dev_priv, ¶ms, &vbo);
|
||||
if (ret != 0)
|
||||
goto out_no_bo;
|
||||
|
||||
vbo->tbo.base.funcs = &vmw_gem_object_funcs;
|
||||
|
||||
gem = &vbo->tbo.base;
|
||||
out_no_bo:
|
||||
dma_resv_unlock(params.resv);
|
||||
return gem;
|
||||
}
|
||||
|
||||
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
|
|
|
@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
|
|||
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vmw_private *vmw = vmw_priv(crtc->dev);
|
||||
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
||||
|
@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
bool has_primary = new_state->plane_mask &
|
||||
drm_plane_mask(crtc->primary);
|
||||
|
||||
/* We always want to have an active plane with an active CRTC */
|
||||
if (has_primary != new_state->enable)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* This is fine in general, but broken userspace might expect
|
||||
* some actual rendering so give a clue as why it's blank.
|
||||
*/
|
||||
if (new_state->enable && !has_primary)
|
||||
drm_dbg_driver(&vmw->drm,
|
||||
"CRTC without a primary plane will be blank.\n");
|
||||
|
||||
|
||||
if (new_state->connector_mask != connector_mask &&
|
||||
|
|
|
@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
|
|||
|
||||
|
||||
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
|
||||
DRM_FORMAT_XRGB1555,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_XRGB1555,
|
||||
};
|
||||
|
||||
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
|
||||
|
|
|
@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
|
|||
int fd, u32 *handle)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
|
||||
|
||||
return ttm_prime_fd_to_handle(tfile, fd, handle);
|
||||
if (ret)
|
||||
ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_prime_handle_to_fd(struct drm_device *dev,
|
||||
|
@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
|
|||
int *prime_fd)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
||||
int ret;
|
||||
|
||||
if (handle > VMWGFX_NUM_MOB)
|
||||
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
||||
else
|
||||
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||
switch (dev_priv->map_mode) {
|
||||
case vmw_dma_map_bind:
|
||||
case vmw_dma_map_populate:
|
||||
vsgt->sgt = &vmw_tt->sgt;
|
||||
ret = sg_alloc_table_from_pages_segment(
|
||||
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
|
||||
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
||||
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_sg_alloc_fail;
|
||||
if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
|
||||
vsgt->sgt = vmw_tt->dma_ttm.sg;
|
||||
} else {
|
||||
vsgt->sgt = &vmw_tt->sgt;
|
||||
ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
|
||||
vsgt->pages, vsgt->num_pages, 0,
|
||||
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
||||
dma_get_max_seg_size(dev_priv->drm.dev),
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_sg_alloc_fail;
|
||||
}
|
||||
|
||||
ret = vmw_ttm_map_for_dma(vmw_tt);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||
return 0;
|
||||
|
||||
out_map_fail:
|
||||
sg_free_table(vmw_tt->vsgt.sgt);
|
||||
vmw_tt->vsgt.sgt = NULL;
|
||||
drm_warn(&dev_priv->drm, "VSG table map failed!");
|
||||
sg_free_table(vsgt->sgt);
|
||||
vsgt->sgt = NULL;
|
||||
out_sg_alloc_fail:
|
||||
return ret;
|
||||
}
|
||||
|
@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
|||
static int vmw_ttm_populate(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||
|
||||
/* TODO: maybe completely drop this ? */
|
||||
if (ttm_tt_is_populated(ttm))
|
||||
return 0;
|
||||
|
||||
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||
if (external && ttm->sg)
|
||||
return drm_prime_sg_to_dma_addr_array(ttm->sg,
|
||||
ttm->dma_address,
|
||||
ttm->num_pages);
|
||||
|
||||
return ret;
|
||||
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||
}
|
||||
|
||||
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
||||
|
@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
|||
{
|
||||
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||
dma_ttm);
|
||||
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||
|
||||
if (external)
|
||||
return;
|
||||
|
||||
vmw_ttm_unbind(bdev, ttm);
|
||||
|
||||
|
@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
{
|
||||
struct vmw_ttm_tt *vmw_be;
|
||||
int ret;
|
||||
bool external = bo->type == ttm_bo_type_sg;
|
||||
|
||||
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||
if (!vmw_be)
|
||||
|
@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
|
||||
vmw_be->mob = NULL;
|
||||
|
||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
if (external)
|
||||
page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
|
||||
|
||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
|
||||
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
||||
ttm_cached);
|
||||
else
|
||||
|
|
|
@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||
|
||||
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
|
||||
/*
|
||||
|
@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||
*/
|
||||
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
|
||||
ttm_bo_unreserve(&bo->ttm);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
bo->flags |= XE_BO_SCANOUT_BIT;
|
||||
}
|
||||
ttm_bo_unreserve(&bo->ttm);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
xe_bo_put(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1577,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
|||
xe->usm.num_vm_in_fault_mode--;
|
||||
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
|
||||
xe->usm.num_vm_in_non_fault_mode--;
|
||||
|
||||
if (vm->usm.asid) {
|
||||
void *lookup;
|
||||
|
||||
xe_assert(xe, xe->info.has_asid);
|
||||
xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
|
||||
|
||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||
xe_assert(xe, lookup == vm);
|
||||
}
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
|
@ -1592,24 +1602,15 @@ static void vm_destroy_work_func(struct work_struct *w)
|
|||
struct xe_device *xe = vm->xe;
|
||||
struct xe_tile *tile;
|
||||
u8 id;
|
||||
void *lookup;
|
||||
|
||||
/* xe_vm_close_and_put was not called? */
|
||||
xe_assert(xe, !vm->size);
|
||||
|
||||
mutex_destroy(&vm->snap_mutex);
|
||||
|
||||
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
|
||||
if (!(vm->flags & XE_VM_FLAG_MIGRATION))
|
||||
xe_device_mem_access_put(xe);
|
||||
|
||||
if (xe->info.has_asid && vm->usm.asid) {
|
||||
mutex_lock(&xe->usm.lock);
|
||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||
xe_assert(xe, lookup == vm);
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
}
|
||||
}
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
XE_WARN_ON(vm->pt_root[id]);
|
||||
|
||||
|
|
|
@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
|
||||
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
|
||||
enum ib_cm_state old_state)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
|
||||
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
}
|
||||
|
||||
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
enum ib_cm_state old_state;
|
||||
struct cm_work *work;
|
||||
int ret;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
old_state = cm_id->state;
|
||||
retest:
|
||||
switch (cm_id->state) {
|
||||
case IB_CM_LISTEN:
|
||||
|
@ -1151,7 +1154,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
|||
msecs_to_jiffies(
|
||||
CM_DESTROY_ID_WAIT_TIMEOUT));
|
||||
if (!ret) /* timeout happened */
|
||||
cm_destroy_id_wait_timeout(cm_id);
|
||||
cm_destroy_id_wait_timeout(cm_id, old_state);
|
||||
} while (!ret);
|
||||
|
||||
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
||||
|
|
|
@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
|
|||
mdev = dev->mdev;
|
||||
mdev_port_num = 1;
|
||||
}
|
||||
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
|
||||
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
|
||||
!mlx5_core_mp_enabled(mdev)) {
|
||||
/* set local port to one for Function-Per-Port HCA. */
|
||||
mdev = dev->mdev;
|
||||
mdev_port_num = 1;
|
||||
|
|
|
@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
|
|||
|
||||
if (rxe->tfm)
|
||||
crypto_free_shash(rxe->tfm);
|
||||
|
||||
mutex_destroy(&rxe->usdev_lock);
|
||||
}
|
||||
|
||||
/* initialize rxe device parameters */
|
||||
|
|
|
@ -37,6 +37,7 @@ config IOMMUFD_TEST
|
|||
depends on DEBUG_KERNEL
|
||||
depends on FAULT_INJECTION
|
||||
depends on RUNTIME_TESTING_MENU
|
||||
select IOMMUFD_DRIVER
|
||||
default n
|
||||
help
|
||||
This is dangerous, do not enable unless running
|
||||
|
|
|
@ -1883,14 +1883,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
|
|||
|
||||
static int mt753x_mirror_port_get(unsigned int id, u32 val)
|
||||
{
|
||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
|
||||
MIRROR_PORT(val);
|
||||
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||
MT7531_MIRROR_PORT_GET(val) :
|
||||
MIRROR_PORT(val);
|
||||
}
|
||||
|
||||
static int mt753x_mirror_port_set(unsigned int id, u32 val)
|
||||
{
|
||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
|
||||
MIRROR_PORT(val);
|
||||
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||
MT7531_MIRROR_PORT_SET(val) :
|
||||
MIRROR_PORT(val);
|
||||
}
|
||||
|
||||
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
|
||||
|
@ -2480,6 +2482,9 @@ mt7530_setup(struct dsa_switch *ds)
|
|||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
|
||||
/* Allow mirroring frames received on the local port (monitor port). */
|
||||
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||
|
||||
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
||||
ret = mt7530_setup_vlan0(priv);
|
||||
if (ret)
|
||||
|
@ -2591,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
|
|||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
|
||||
/* Allow mirroring frames received on the local port (monitor port). */
|
||||
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||
|
||||
/* Flush the FDB table */
|
||||
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -32,6 +32,10 @@ enum mt753x_id {
|
|||
#define SYSC_REG_RSTCTRL 0x34
|
||||
#define RESET_MCM BIT(2)
|
||||
|
||||
/* Register for ARL global control */
|
||||
#define MT753X_AGC 0xc
|
||||
#define LOCAL_EN BIT(7)
|
||||
|
||||
/* Registers to mac forward control for unknown frames */
|
||||
#define MT7530_MFC 0x10
|
||||
#define BC_FFP(x) (((x) & 0xff) << 24)
|
||||
|
|
|
@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
|
|||
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
|
||||
* - Tunnel flag (present if tunnel)
|
||||
*/
|
||||
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
|
||||
lkups_cnt++;
|
||||
|
||||
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
|
||||
lkups_cnt++;
|
||||
|
@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
|
|||
/* Always add direction metadata */
|
||||
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
|
||||
|
||||
if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
|
||||
ice_rule_add_src_vsi_metadata(&list[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
|
||||
if (tc_fltr->tunnel_type != TNL_LAST) {
|
||||
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
|
||||
|
@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
|
||||
if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
|
||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
|||
|
||||
/* specify the cookie as filter_rule_id */
|
||||
rule_info.fltr_rule_id = fltr->cookie;
|
||||
rule_info.src_vsi = vsi->idx;
|
||||
|
||||
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
|
||||
if (ret == -EEXIST) {
|
||||
|
@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
|
|||
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
|
||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
|
||||
return -EOPNOTSUPP;
|
||||
} else {
|
||||
|
|
|
@ -689,6 +689,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
|||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
u32 val;
|
||||
|
||||
flow_rule_match_control(rule, &match);
|
||||
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
|
@ -697,12 +698,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
|||
}
|
||||
|
||||
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
|
||||
if (ntohs(flow_spec->etype) == ETH_P_IP) {
|
||||
flow_spec->ip_flag = IPV4_FLAG_MORE;
|
||||
flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
|
||||
flow_mask->ip_flag = IPV4_FLAG_MORE;
|
||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
|
||||
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
|
||||
flow_spec->next_header = IPPROTO_FRAGMENT;
|
||||
flow_spec->next_header = val ?
|
||||
IPPROTO_FRAGMENT : 0;
|
||||
flow_mask->next_header = 0xff;
|
||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
|
||||
} else {
|
||||
|
|
|
@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
|
|||
static void
|
||||
mtk_wed_stop(struct mtk_wed_device *dev)
|
||||
{
|
||||
mtk_wed_dma_disable(dev);
|
||||
mtk_wed_set_ext_int(dev, false);
|
||||
|
||||
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
|
||||
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
|
||||
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
|
||||
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
|
||||
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
|
||||
|
||||
if (!mtk_wed_get_rx_capa(dev))
|
||||
return;
|
||||
|
@ -1093,7 +1093,6 @@ static void
|
|||
mtk_wed_deinit(struct mtk_wed_device *dev)
|
||||
{
|
||||
mtk_wed_stop(dev);
|
||||
mtk_wed_dma_disable(dev);
|
||||
|
||||
wed_clr(dev, MTK_WED_CTRL,
|
||||
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
||||
|
@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
|
|||
static void
|
||||
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
|
||||
{
|
||||
if (!dev->running)
|
||||
return;
|
||||
|
||||
mtk_wed_set_ext_int(dev, !!mask);
|
||||
wed_w32(dev, MTK_WED_INT_MASK, mask);
|
||||
}
|
||||
|
|
|
@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
|
|||
mlx5e_reset_txqsq_cc_pc(sq);
|
||||
sq->stats->recover++;
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
|
||||
rtnl_lock();
|
||||
mlx5e_activate_txqsq(sq);
|
||||
rtnl_unlock();
|
||||
|
||||
if (sq->channel)
|
||||
mlx5e_trigger_napi_icosq(sq->channel);
|
||||
else
|
||||
|
@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
|
|||
carrier_ok = netif_carrier_ok(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
rtnl_lock();
|
||||
mlx5e_deactivate_priv_channels(priv);
|
||||
rtnl_unlock();
|
||||
|
||||
mlx5e_ptp_close(chs->ptp);
|
||||
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
|
||||
|
||||
rtnl_lock();
|
||||
mlx5e_activate_priv_channels(priv);
|
||||
rtnl_unlock();
|
||||
|
||||
/* return carrier back if needed */
|
||||
if (carrier_ok)
|
||||
|
|
|
@ -46,6 +46,10 @@ struct arfs_table {
|
|||
struct hlist_head rules_hash[ARFS_HASH_SIZE];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_ARFS_STATE_ENABLED,
|
||||
};
|
||||
|
||||
enum arfs_type {
|
||||
ARFS_IPV4_TCP,
|
||||
ARFS_IPV6_TCP,
|
||||
|
@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
|
|||
spinlock_t arfs_lock;
|
||||
int last_filter_id;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
struct arfs_tuple {
|
||||
|
@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
|
|||
return err;
|
||||
}
|
||||
}
|
||||
set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
|
|||
int i;
|
||||
int j;
|
||||
|
||||
clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
|
||||
hlist_del_init(&rule->hlist);
|
||||
|
@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
|
|||
struct mlx5_flow_handle *rule;
|
||||
|
||||
arfs = mlx5e_fs_get_arfs(priv->fs);
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
hlist_del(&arfs_rule->hlist);
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
kfree(arfs_rule);
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
|
||||
return;
|
||||
|
||||
if (!arfs_rule->rule) {
|
||||
rule = arfs_add_rule(priv, arfs_rule);
|
||||
|
@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
return -EPROTONOSUPPORT;
|
||||
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
||||
if (arfs_rule) {
|
||||
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
|
||||
|
|
|
@ -589,12 +589,12 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
|||
static void
|
||||
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int tc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->channels.num; ++i) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
|
||||
for (tc = 0; tc < c->num_tc; tc++) {
|
||||
mlx5_core_modify_cq_moderation(mdev,
|
||||
|
@ -608,11 +608,11 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
|
|||
static void
|
||||
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->channels.num; ++i) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
|
||||
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
|
||||
coal->rx_coalesce_usecs,
|
||||
|
|
|
@ -209,8 +209,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
|
|||
*data,
|
||||
mlx5e_devcom_event_mpv,
|
||||
priv);
|
||||
if (IS_ERR_OR_NULL(priv->devcom))
|
||||
return -EOPNOTSUPP;
|
||||
if (IS_ERR(priv->devcom))
|
||||
return PTR_ERR(priv->devcom);
|
||||
|
||||
if (mlx5_core_is_mp_master(priv->mdev)) {
|
||||
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
|
||||
|
|
|
@ -3060,7 +3060,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
|
|||
key,
|
||||
mlx5_esw_offloads_devcom_event,
|
||||
esw);
|
||||
if (IS_ERR_OR_NULL(esw->devcom))
|
||||
if (IS_ERR(esw->devcom))
|
||||
return;
|
||||
|
||||
mlx5_devcom_send_event(esw->devcom,
|
||||
|
|
|
@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
|
||||
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
|
||||
mlx5_lag_port_sel_destroy(ldev);
|
||||
ldev->buckets = 1;
|
||||
}
|
||||
if (mlx5_lag_has_drop_rule(ldev))
|
||||
mlx5_lag_drop_rule_cleanup(ldev);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
|
|||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devc))
|
||||
return NULL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&comp_list_lock);
|
||||
comp = devcom_component_get(devc, id, key, handler);
|
||||
|
|
|
@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
|
|||
sd = mlx5_get_sd(dev);
|
||||
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
|
||||
sd->group_id, NULL, dev);
|
||||
if (!devcom)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(devcom))
|
||||
return PTR_ERR(devcom);
|
||||
|
||||
sd->devcom = devcom;
|
||||
|
||||
|
|
|
@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
|
|||
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
|
||||
mlx5_query_nic_system_image_guid(dev),
|
||||
NULL, dev);
|
||||
if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
|
||||
if (IS_ERR(dev->priv.hca_devcom_comp))
|
||||
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
|
||||
}
|
||||
|
||||
|
@ -1699,12 +1699,15 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
|
|||
err = mlx5_devlink_params_register(priv_to_devlink(dev));
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
|
||||
goto query_hca_caps_err;
|
||||
goto params_reg_err;
|
||||
}
|
||||
|
||||
devl_unlock(devlink);
|
||||
return 0;
|
||||
|
||||
params_reg_err:
|
||||
devl_unregister(devlink);
|
||||
devl_unlock(devlink);
|
||||
query_hca_caps_err:
|
||||
devl_unregister(devlink);
|
||||
devl_unlock(devlink);
|
||||
|
|
|
@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
|
|||
goto peer_devlink_set_err;
|
||||
}
|
||||
|
||||
devlink_register(devlink);
|
||||
return 0;
|
||||
|
||||
peer_devlink_set_err:
|
||||
|
|
|
@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
|
|||
u16 l3_proto; /* protocol specified in the template */
|
||||
};
|
||||
|
||||
/* SparX-5 VCAP fragment types:
|
||||
* 0 = no fragment, 1 = initial fragment,
|
||||
* 2 = suspicious fragment, 3 = valid follow-up fragment
|
||||
*/
|
||||
enum { /* key / mask */
|
||||
FRAG_NOT = 0x03, /* 0 / 3 */
|
||||
FRAG_SOME = 0x11, /* 1 / 1 */
|
||||
FRAG_FIRST = 0x13, /* 1 / 3 */
|
||||
FRAG_LATER = 0x33, /* 3 / 3 */
|
||||
FRAG_INVAL = 0xff, /* invalid */
|
||||
};
|
||||
|
||||
/* Flower fragment flag to VCAP fragment type mapping */
|
||||
static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
|
||||
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
|
||||
{ FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
|
||||
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
|
||||
{ FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
|
||||
/* 0/0 0/1 1/0 1/1 <-- first_frag */
|
||||
};
|
||||
|
||||
static int
|
||||
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
|
||||
{
|
||||
|
@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
|
|||
flow_rule_match_control(st->frule, &mt);
|
||||
|
||||
if (mt.mask->flags) {
|
||||
if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
value = 1; /* initial fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
value = 3; /* follow up fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
value = 0; /* no fragment */
|
||||
mask = 0x3;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
value = 3; /* follow up fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
value = 0; /* no fragment */
|
||||
mask = 0x3;
|
||||
}
|
||||
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
|
||||
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
|
||||
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
|
||||
|
||||
u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
|
||||
u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
|
||||
u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
|
||||
|
||||
/* Lookup verdict based on the 2 + 2 input bits */
|
||||
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
|
||||
|
||||
if (vdt == FRAG_INVAL) {
|
||||
NL_SET_ERR_MSG_MOD(st->fco->common.extack,
|
||||
"Match on invalid fragment flag combination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Extract VCAP fragment key and mask from verdict */
|
||||
value = (vdt >> 4) & 0x3;
|
||||
mask = vdt & 0x3;
|
||||
|
||||
err = vcap_rule_add_key_u32(st->vrule,
|
||||
VCAP_KF_L3_FRAGMENT_TYPE,
|
||||
value, mask);
|
||||
|
|
|
@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
|||
dma_addr_t dma_addr;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
u16 desc_len;
|
||||
u8 die_dt;
|
||||
int entry;
|
||||
int limit;
|
||||
int i;
|
||||
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
stats = &priv->stats[q];
|
||||
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
if (!desc_len)
|
||||
continue;
|
||||
|
||||
if (desc_status & MSC_MC)
|
||||
|
@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
|||
switch (die_dt) {
|
||||
case DT_FSINGLE:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(skb, pkt_len);
|
||||
skb_put(skb, desc_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
stats->rx_bytes += desc_len;
|
||||
break;
|
||||
case DT_FSTART:
|
||||
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
break;
|
||||
case DT_FMID:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case DT_FEND:
|
||||
|
@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
|||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
priv->rx_1st_skb->protocol =
|
||||
eth_type_trans(priv->rx_1st_skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
ravb_rx_csum_gbeth(priv->rx_1st_skb);
|
||||
stats->rx_bytes += priv->rx_1st_skb->len;
|
||||
napi_gro_receive(&priv->napi[q],
|
||||
priv->rx_1st_skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
|
@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
|||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *info = priv->info;
|
||||
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
|
||||
priv->cur_rx[q];
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_ex_rx_desc *desc;
|
||||
unsigned int limit, i;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma_addr;
|
||||
struct timespec64 ts;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
int limit;
|
||||
int entry;
|
||||
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
while (desc->die_dt != DT_FEMPTY) {
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
if (--boguscnt < 0)
|
||||
break;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
continue;
|
||||
|
@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
|||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
stats->rx_packets++;
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
|
@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
|||
desc->die_dt = DT_FEMPTY;
|
||||
}
|
||||
|
||||
*quota -= limit - (++boguscnt);
|
||||
|
||||
return boguscnt <= 0;
|
||||
stats->rx_packets += rx_packets;
|
||||
*quota -= rx_packets;
|
||||
return *quota == 0;
|
||||
}
|
||||
|
||||
/* Packet receive function for Ethernet AVB */
|
||||
|
|
|
@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
|
|||
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
|
||||
|
||||
struct mac_link {
|
||||
u32 caps;
|
||||
u32 speed_mask;
|
||||
u32 speed10;
|
||||
u32 speed100;
|
||||
|
|
|
@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
|
|||
|
||||
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000;
|
||||
/* The loopback bit seems to be re-set when link change
|
||||
* Simply mask it each time
|
||||
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
||||
|
|
|
@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
|
|||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000;
|
||||
mac->link.duplex = GMAC_CONTROL_DM;
|
||||
mac->link.speed10 = GMAC_CONTROL_PS;
|
||||
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
||||
|
|
|
@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
|
|||
dev_info(priv->device, "\tDWMAC100\n");
|
||||
|
||||
mac->pcsr = priv->ioaddr;
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100;
|
||||
mac->link.duplex = MAC_CONTROL_F;
|
||||
mac->link.speed10 = 0;
|
||||
mac->link.speed100 = 0;
|
||||
|
|
|
@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
|
|||
|
||||
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
|
||||
{
|
||||
priv->phylink_config.mac_capabilities |= MAC_2500FD;
|
||||
if (priv->plat->tx_queues_to_use > 1)
|
||||
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
else
|
||||
priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
}
|
||||
|
||||
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
|
||||
|
@ -1378,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
|
|||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
|
||||
mac->link.duplex = GMAC_CONFIG_DM;
|
||||
mac->link.speed10 = GMAC_CONFIG_PS;
|
||||
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
|
||||
|
|
|
@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
|
|||
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
|
||||
}
|
||||
|
||||
static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
|
||||
{
|
||||
priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD | MAC_25000FD |
|
||||
MAC_40000FD | MAC_50000FD |
|
||||
MAC_100000FD;
|
||||
}
|
||||
|
||||
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
|
||||
{
|
||||
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
|
||||
|
@ -1540,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
|
|||
|
||||
const struct stmmac_ops dwxgmac210_ops = {
|
||||
.core_init = dwxgmac2_core_init,
|
||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
||||
.set_mac = dwxgmac2_set_mac,
|
||||
.rx_ipc = dwxgmac2_rx_ipc,
|
||||
.rx_queue_enable = dwxgmac2_rx_queue_enable,
|
||||
|
@ -1601,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
|
|||
|
||||
const struct stmmac_ops dwxlgmac2_ops = {
|
||||
.core_init = dwxgmac2_core_init,
|
||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
||||
.set_mac = dwxgmac2_set_mac,
|
||||
.rx_ipc = dwxgmac2_rx_ipc,
|
||||
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
|
||||
|
@ -1661,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
|
|||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD;
|
||||
mac->link.duplex = 0;
|
||||
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
|
||||
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
|
||||
|
@ -1698,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
|
|||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD | MAC_25000FD |
|
||||
MAC_40000FD | MAC_50000FD |
|
||||
MAC_100000FD;
|
||||
mac->link.duplex = 0;
|
||||
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
|
||||
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
|
||||
|
|
|
@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void stmmac_set_half_duplex(struct stmmac_priv *priv)
|
||||
{
|
||||
/* Half-Duplex can only work with single tx queue */
|
||||
if (priv->plat->tx_queues_to_use > 1)
|
||||
priv->phylink_config.mac_capabilities &=
|
||||
~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
else
|
||||
priv->phylink_config.mac_capabilities |=
|
||||
(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
}
|
||||
|
||||
static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||
{
|
||||
struct stmmac_mdio_bus_data *mdio_bus_data;
|
||||
|
@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
|||
xpcs_get_interfaces(priv->hw->xpcs,
|
||||
priv->phylink_config.supported_interfaces);
|
||||
|
||||
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10FD | MAC_100FD |
|
||||
MAC_1000FD;
|
||||
|
||||
stmmac_set_half_duplex(priv);
|
||||
|
||||
/* Get the MAC specific capabilities */
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
|
@ -7342,6 +7327,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret = 0, i;
|
||||
int max_speed;
|
||||
|
||||
if (netif_running(dev))
|
||||
stmmac_release(dev);
|
||||
|
@ -7355,7 +7341,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|||
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
||||
rx_cnt);
|
||||
|
||||
stmmac_set_half_duplex(priv);
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
|
||||
stmmac_napi_add(dev);
|
||||
|
||||
if (netif_running(dev))
|
||||
|
|
|
@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
|
|||
|
||||
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||
{
|
||||
struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
|
||||
struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
|
||||
struct device *dev = common->dev;
|
||||
struct am65_cpsw_port *port;
|
||||
int ret = 0, i;
|
||||
|
@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* The DMA Channels are not guaranteed to be in a clean state.
|
||||
* Reset and disable them to ensure that they are back to the
|
||||
* clean state and ready to be used.
|
||||
*/
|
||||
for (i = 0; i < common->tx_ch_num; i++) {
|
||||
k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
|
||||
am65_cpsw_nuss_tx_cleanup);
|
||||
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
|
||||
}
|
||||
|
||||
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
|
||||
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
|
||||
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||
|
||||
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
|
||||
|
||||
ret = am65_cpsw_nuss_register_devlink(common);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
tun_is_little_endian(tun), true,
|
||||
vlan_hlen)) {
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
pr_err("unexpected GSO type: "
|
||||
"0x%x, gso_size %d, hdr_len %d\n",
|
||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||
tun16_to_cpu(tun, gso.hdr_len));
|
||||
print_hex_dump(KERN_ERR, "tun: ",
|
||||
DUMP_PREFIX_NONE,
|
||||
16, 1, skb->head,
|
||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||
|
||||
if (net_ratelimit()) {
|
||||
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
|
||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||
tun16_to_cpu(tun, gso.hdr_len));
|
||||
print_hex_dump(KERN_ERR, "tun: ",
|
||||
DUMP_PREFIX_NONE,
|
||||
16, 1, skb->head,
|
||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
|
||||
netif_set_tso_max_size(dev->net, 16384);
|
||||
|
||||
ax88179_reset(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
|
|||
.unbind = ax88179_unbind,
|
||||
.status = ax88179_status,
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
|
@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
|
|||
.unbind = ax88179_unbind,
|
||||
.status = ax88179_status,
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
|
|
|
@ -1431,6 +1431,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
|
||||
{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
|
|
@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
|
|||
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.ident = "Framework Laptop 13 (Phoenix)",
|
||||
.driver_data = &quirk_spurious_8042,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
|
||||
}
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -7,4 +7,4 @@
|
|||
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
|
||||
amd-pmf-objs := core.o acpi.o sps.o \
|
||||
auto-mode.o cnqf.o \
|
||||
tee-if.o spc.o
|
||||
tee-if.o spc.o pmf-quirks.o
|
||||
|
|
|
@ -343,7 +343,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
pdev->supported_func = output.supported_functions;
|
||||
/* only set if not already set by a quirk */
|
||||
if (!pdev->supported_func)
|
||||
pdev->supported_func = output.supported_functions;
|
||||
|
||||
dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
|
||||
output.supported_functions, output.notification_mask, output.version);
|
||||
|
||||
|
@ -437,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
|
|||
|
||||
status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
|
||||
dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -445,6 +445,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
|
|||
mutex_init(&dev->lock);
|
||||
mutex_init(&dev->update_mutex);
|
||||
|
||||
amd_pmf_quirks_init(dev);
|
||||
apmf_acpi_init(dev);
|
||||
platform_set_drvdata(pdev, dev);
|
||||
amd_pmf_dbgfs_register(dev);
|
||||
|
|
51
drivers/platform/x86/amd/pmf/pmf-quirks.c
Normal file
51
drivers/platform/x86/amd/pmf/pmf-quirks.c
Normal file
|
@ -0,0 +1,51 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* AMD Platform Management Framework Driver Quirks
|
||||
*
|
||||
* Copyright (c) 2024, Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Author: Mario Limonciello <mario.limonciello@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include "pmf.h"
|
||||
|
||||
struct quirk_entry {
|
||||
u32 supported_func;
|
||||
};
|
||||
|
||||
static struct quirk_entry quirk_no_sps_bug = {
|
||||
.supported_func = 0x4003,
|
||||
};
|
||||
|
||||
static const struct dmi_system_id fwbug_list[] = {
|
||||
{
|
||||
.ident = "ROG Zephyrus G14",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
|
||||
},
|
||||
.driver_data = &quirk_no_sps_bug,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
struct quirk_entry *quirks;
|
||||
|
||||
dmi_id = dmi_first_match(fwbug_list);
|
||||
if (!dmi_id)
|
||||
return;
|
||||
|
||||
quirks = dmi_id->driver_data;
|
||||
if (quirks->supported_func) {
|
||||
dev->supported_func = quirks->supported_func;
|
||||
pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
|
||||
dmi_id->ident);
|
||||
}
|
||||
}
|
||||
|
|
@ -720,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
|
|||
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
|
||||
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
|
||||
|
||||
/* Quirk infrastructure */
|
||||
void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
|
||||
|
||||
#endif /* PMF_H */
|
||||
|
|
|
@ -719,6 +719,7 @@ static struct miscdevice isst_if_char_driver = {
|
|||
};
|
||||
|
||||
static const struct x86_cpu_id hpm_cpu_ids[] = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
|
||||
{}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "uncore-frequency-common.h"
|
||||
|
||||
#define UNCORE_MAJOR_VERSION 0
|
||||
#define UNCORE_MINOR_VERSION 1
|
||||
#define UNCORE_MINOR_VERSION 2
|
||||
#define UNCORE_HEADER_INDEX 0
|
||||
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
|
||||
|
||||
|
@ -329,7 +329,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
|
|||
goto remove_clusters;
|
||||
}
|
||||
|
||||
if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
|
||||
if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
|
||||
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
|
||||
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
|
||||
|
||||
|
|
|
@ -172,7 +172,6 @@ struct pwm_chip *dwc_pwm_alloc(struct device *dev)
|
|||
dwc->clk_ns = 10;
|
||||
chip->ops = &dwc_pwm_ops;
|
||||
|
||||
dev_set_drvdata(dev, chip);
|
||||
return chip;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
|
||||
|
|
|
@ -31,26 +31,34 @@ static const struct dwc_pwm_info ehl_pwm_info = {
|
|||
.size = 0x1000,
|
||||
};
|
||||
|
||||
static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
|
||||
static int dwc_pwm_init_one(struct device *dev, struct dwc_pwm_drvdata *ddata, unsigned int idx)
|
||||
{
|
||||
struct pwm_chip *chip;
|
||||
struct dwc_pwm *dwc;
|
||||
int ret;
|
||||
|
||||
chip = dwc_pwm_alloc(dev);
|
||||
if (IS_ERR(chip))
|
||||
return PTR_ERR(chip);
|
||||
|
||||
dwc = to_dwc_pwm(chip);
|
||||
dwc->base = base + offset;
|
||||
dwc->base = ddata->io_base + (ddata->info->size * idx);
|
||||
|
||||
return devm_pwmchip_add(dev, chip);
|
||||
ret = devm_pwmchip_add(dev, chip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ddata->chips[idx] = chip;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
{
|
||||
const struct dwc_pwm_info *info;
|
||||
struct device *dev = &pci->dev;
|
||||
int i, ret;
|
||||
struct dwc_pwm_drvdata *ddata;
|
||||
unsigned int idx;
|
||||
int ret;
|
||||
|
||||
ret = pcim_enable_device(pci);
|
||||
if (ret)
|
||||
|
@ -63,17 +71,25 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
|||
return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
|
||||
|
||||
info = (const struct dwc_pwm_info *)id->driver_data;
|
||||
ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
|
||||
if (!ddata)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < info->nr; i++) {
|
||||
/*
|
||||
* No need to check for pcim_iomap_table() failure,
|
||||
* pcim_iomap_regions() already does it for us.
|
||||
*/
|
||||
ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
|
||||
/*
|
||||
* No need to check for pcim_iomap_table() failure,
|
||||
* pcim_iomap_regions() already does it for us.
|
||||
*/
|
||||
ddata->io_base = pcim_iomap_table(pci)[0];
|
||||
ddata->info = info;
|
||||
|
||||
for (idx = 0; idx < ddata->info->nr; idx++) {
|
||||
ret = dwc_pwm_init_one(dev, ddata, idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, ddata);
|
||||
|
||||
pm_runtime_put(dev);
|
||||
pm_runtime_allow(dev);
|
||||
|
||||
|
@ -88,19 +104,24 @@ static void dwc_pwm_remove(struct pci_dev *pci)
|
|||
|
||||
static int dwc_pwm_suspend(struct device *dev)
|
||||
{
|
||||
struct pwm_chip *chip = dev_get_drvdata(dev);
|
||||
struct dwc_pwm *dwc = to_dwc_pwm(chip);
|
||||
int i;
|
||||
struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
|
||||
unsigned int idx;
|
||||
|
||||
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
|
||||
if (chip->pwms[i].state.enabled) {
|
||||
dev_err(dev, "PWM %u in use by consumer (%s)\n",
|
||||
i, chip->pwms[i].label);
|
||||
return -EBUSY;
|
||||
for (idx = 0; idx < ddata->info->nr; idx++) {
|
||||
struct pwm_chip *chip = ddata->chips[idx];
|
||||
struct dwc_pwm *dwc = to_dwc_pwm(chip);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
|
||||
if (chip->pwms[i].state.enabled) {
|
||||
dev_err(dev, "PWM %u in use by consumer (%s)\n",
|
||||
i, chip->pwms[i].label);
|
||||
return -EBUSY;
|
||||
}
|
||||
dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
|
||||
dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
|
||||
dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
|
||||
}
|
||||
dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
|
||||
dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
|
||||
dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -108,14 +129,19 @@ static int dwc_pwm_suspend(struct device *dev)
|
|||
|
||||
static int dwc_pwm_resume(struct device *dev)
|
||||
{
|
||||
struct pwm_chip *chip = dev_get_drvdata(dev);
|
||||
struct dwc_pwm *dwc = to_dwc_pwm(chip);
|
||||
int i;
|
||||
struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
|
||||
unsigned int idx;
|
||||
|
||||
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
|
||||
for (idx = 0; idx < ddata->info->nr; idx++) {
|
||||
struct pwm_chip *chip = ddata->chips[idx];
|
||||
struct dwc_pwm *dwc = to_dwc_pwm(chip);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
|
||||
dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -38,6 +38,12 @@ struct dwc_pwm_info {
|
|||
unsigned int size;
|
||||
};
|
||||
|
||||
struct dwc_pwm_drvdata {
|
||||
const struct dwc_pwm_info *info;
|
||||
void __iomem *io_base;
|
||||
struct pwm_chip *chips[];
|
||||
};
|
||||
|
||||
struct dwc_pwm_ctx {
|
||||
u32 cnt;
|
||||
u32 cnt2;
|
||||
|
|
|
@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
|||
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
ret = ccw_device_online(cdev);
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
if (ret == 0)
|
||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
||||
else {
|
||||
if (ret) {
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
|
||||
"device 0.%x.%04x\n",
|
||||
ret, cdev->private->dev_id.ssid,
|
||||
|
@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
|||
put_device(&cdev->dev);
|
||||
return ret;
|
||||
}
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
/* Wait until a final state is reached */
|
||||
while (!dev_fsm_final_state(cdev)) {
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
}
|
||||
/* Check if online processing was successful */
|
||||
if ((cdev->private->state != DEV_STATE_ONLINE) &&
|
||||
(cdev->private->state != DEV_STATE_W4SENSE)) {
|
||||
|
|
|
@ -504,6 +504,11 @@ void ccw_device_verify_done(struct ccw_device *cdev, int err)
|
|||
ccw_device_done(cdev, DEV_STATE_ONLINE);
|
||||
/* Deliver fake irb to device driver, if needed. */
|
||||
if (cdev->private->flags.fake_irb) {
|
||||
CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
|
||||
cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno,
|
||||
cdev->private->intparm,
|
||||
cdev->private->flags.fake_irb);
|
||||
create_fake_irb(&cdev->private->dma_area->irb,
|
||||
cdev->private->flags.fake_irb);
|
||||
cdev->private->flags.fake_irb = 0;
|
||||
|
|
|
@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
|
|||
if (!cdev->private->flags.fake_irb) {
|
||||
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
|
||||
cdev->private->intparm = intparm;
|
||||
CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
|
||||
cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, intparm,
|
||||
cdev->private->flags.fake_irb);
|
||||
return 0;
|
||||
} else
|
||||
/* There's already a fake I/O around. */
|
||||
|
@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
|
|||
if (!cdev->private->flags.fake_irb) {
|
||||
cdev->private->flags.fake_irb = FAKE_TM_IRB;
|
||||
cdev->private->intparm = intparm;
|
||||
CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
|
||||
cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, intparm,
|
||||
cdev->private->flags.fake_irb);
|
||||
return 0;
|
||||
} else
|
||||
/* There's already a fake I/O around. */
|
||||
|
|
|
@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
|
|||
lgr_info_log();
|
||||
}
|
||||
|
||||
static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
||||
int dstat)
|
||||
static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
||||
int dstat, int dcc)
|
||||
{
|
||||
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
|
||||
|
||||
|
@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
|||
goto error;
|
||||
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
|
||||
goto error;
|
||||
if (dcc == 1)
|
||||
return -EAGAIN;
|
||||
if (!(dstat & DEV_STAT_DEV_END))
|
||||
goto error;
|
||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
|
||||
return;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
|
||||
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
|
||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* qdio interrupt handler */
|
||||
|
@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
{
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
struct subchannel_id schid;
|
||||
int cstat, dstat;
|
||||
int cstat, dstat, rc, dcc;
|
||||
|
||||
if (!intparm || !irq_ptr) {
|
||||
ccw_device_get_schid(cdev, &schid);
|
||||
|
@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
qdio_irq_check_sense(irq_ptr, irb);
|
||||
cstat = irb->scsw.cmd.cstat;
|
||||
dstat = irb->scsw.cmd.dstat;
|
||||
dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
|
||||
rc = 0;
|
||||
|
||||
switch (irq_ptr->state) {
|
||||
case QDIO_IRQ_STATE_INACTIVE:
|
||||
qdio_establish_handle_irq(irq_ptr, cstat, dstat);
|
||||
rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
|
||||
break;
|
||||
case QDIO_IRQ_STATE_CLEANUP:
|
||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
||||
|
@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
if (cstat || dstat)
|
||||
qdio_handle_activate_check(irq_ptr, intparm, cstat,
|
||||
dstat);
|
||||
else if (dcc == 1)
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
case QDIO_IRQ_STATE_STOPPED:
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
if (rc == -EAGAIN) {
|
||||
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
|
||||
rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
|
||||
if (!rc)
|
||||
return;
|
||||
DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
|
||||
DBF_ERROR("rc:%4x", rc);
|
||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
||||
}
|
||||
|
||||
wake_up(&cdev->private->wait_q);
|
||||
}
|
||||
|
||||
|
|
|
@ -292,13 +292,16 @@ static int ism_read_local_gid(struct ism_dev *ism)
|
|||
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||
{
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
|
||||
dmb->cpu_addr, dmb->dma_addr);
|
||||
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
|
||||
DMA_FROM_DEVICE);
|
||||
folio_put(virt_to_folio(dmb->cpu_addr));
|
||||
}
|
||||
|
||||
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||
{
|
||||
struct folio *folio;
|
||||
unsigned long bit;
|
||||
int rc;
|
||||
|
||||
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
|
||||
return -EINVAL;
|
||||
|
@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
|||
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
|
||||
return -EINVAL;
|
||||
|
||||
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
|
||||
&dmb->dma_addr,
|
||||
GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY);
|
||||
if (!dmb->cpu_addr)
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY, get_order(dmb->dmb_len));
|
||||
|
||||
return dmb->cpu_addr ? 0 : -ENOMEM;
|
||||
if (!folio) {
|
||||
rc = -ENOMEM;
|
||||
goto out_bit;
|
||||
}
|
||||
|
||||
dmb->cpu_addr = folio_address(folio);
|
||||
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
|
||||
virt_to_page(dmb->cpu_addr), 0,
|
||||
dmb->dmb_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(dmb->cpu_addr);
|
||||
out_bit:
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
|
||||
|
|
|
@ -635,10 +635,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
|||
if (blk_queue_add_random(q))
|
||||
add_disk_randomness(req->q->disk);
|
||||
|
||||
if (!blk_rq_is_passthrough(req)) {
|
||||
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
|
||||
cmd->flags &= ~SCMD_INITIALIZED;
|
||||
}
|
||||
WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
|
||||
!(cmd->flags & SCMD_INITIALIZED));
|
||||
cmd->flags = 0;
|
||||
|
||||
/*
|
||||
* Calling rcu_barrier() is not necessary here because the
|
||||
|
|
|
@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
|
|||
tze->trip_stats[trip_id].timestamp = now;
|
||||
tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
|
||||
tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
|
||||
tze->trip_stats[trip_id].count++;
|
||||
tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
|
||||
(temperature - tze->trip_stats[trip_id].avg) /
|
||||
tze->trip_stats[trip_id].count;
|
||||
|
|
|
@ -47,7 +47,7 @@ enum {
|
|||
TSTBUS_MAX,
|
||||
};
|
||||
|
||||
#define QCOM_UFS_MAX_GEAR 4
|
||||
#define QCOM_UFS_MAX_GEAR 5
|
||||
#define QCOM_UFS_MAX_LANE 2
|
||||
|
||||
enum {
|
||||
|
@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
|
|||
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
|
||||
[MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
|
||||
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
|
||||
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
|
||||
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
|
||||
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
|
||||
[MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
|
||||
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
|
||||
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
|
||||
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
|
||||
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
|
||||
[MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
|
||||
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
|
||||
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
|
||||
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
|
||||
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
|
||||
[MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
|
||||
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
|
||||
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
|
||||
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
|
||||
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
|
||||
[MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
|
||||
[MODE_MAX][0][0] = { 7643136, 307200 },
|
||||
};
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ static int vmgenid_add(struct acpi_device *device)
|
|||
static void vmgenid_notify(struct acpi_device *device, u32 event)
|
||||
{
|
||||
struct vmgenid_state *state = acpi_driver_data(device);
|
||||
char *envp[] = { "NEW_VMGENID=1", NULL };
|
||||
u8 old_id[VMGENID_SIZE];
|
||||
|
||||
memcpy(old_id, state->this_id, sizeof(old_id));
|
||||
|
@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
|
|||
if (!memcmp(old_id, state->this_id, sizeof(old_id)))
|
||||
return;
|
||||
add_vmfork_randomness(state->this_id, sizeof(state->this_id));
|
||||
kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id vmgenid_ids[] = {
|
||||
|
|
|
@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
|
|||
static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
|
||||
unsigned int s_cache, unsigned int f_flags)
|
||||
{
|
||||
if (fid->qid.type != P9_QTFILE)
|
||||
return;
|
||||
|
||||
if ((!s_cache) ||
|
||||
((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
|
||||
(s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
|
||||
|
|
|
@ -520,6 +520,7 @@ const struct file_operations v9fs_file_operations = {
|
|||
.splice_read = v9fs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = v9fs_file_fsync,
|
||||
.setlease = simple_nosetlease,
|
||||
};
|
||||
|
||||
const struct file_operations v9fs_file_operations_dotl = {
|
||||
|
@ -534,4 +535,5 @@ const struct file_operations v9fs_file_operations_dotl = {
|
|||
.splice_read = v9fs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = v9fs_file_fsync_dotl,
|
||||
.setlease = simple_nosetlease,
|
||||
};
|
||||
|
|
|
@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
|
|||
int res;
|
||||
int mode = stat->mode;
|
||||
|
||||
res = mode & S_IALLUGO;
|
||||
res = mode & 0777; /* S_IRWXUGO */
|
||||
if (v9fs_proto_dotu(v9ses)) {
|
||||
if ((mode & P9_DMSETUID) == P9_DMSETUID)
|
||||
res |= S_ISUID;
|
||||
|
@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended)
|
|||
break;
|
||||
}
|
||||
|
||||
if (uflags & O_TRUNC)
|
||||
ret |= P9_OTRUNC;
|
||||
|
||||
if (extended) {
|
||||
if (uflags & O_EXCL)
|
||||
ret |= P9_OEXCL;
|
||||
|
@ -1061,8 +1064,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
|||
struct v9fs_session_info *v9ses = sb->s_fs_info;
|
||||
struct v9fs_inode *v9inode = V9FS_I(inode);
|
||||
|
||||
set_nlink(inode, 1);
|
||||
|
||||
inode_set_atime(inode, stat->atime, 0);
|
||||
inode_set_mtime(inode, stat->mtime, 0);
|
||||
inode_set_ctime(inode, stat->mtime, 0);
|
||||
|
|
|
@ -244,6 +244,21 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
return res;
|
||||
}
|
||||
|
||||
static int v9fs_drop_inode(struct inode *inode)
|
||||
{
|
||||
struct v9fs_session_info *v9ses;
|
||||
|
||||
v9ses = v9fs_inode2v9ses(inode);
|
||||
if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
|
||||
return generic_drop_inode(inode);
|
||||
/*
|
||||
* in case of non cached mode always drop the
|
||||
* inode because we want the inode attribute
|
||||
* to always match that on the server.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int v9fs_write_inode(struct inode *inode,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
|
@ -268,6 +283,7 @@ static const struct super_operations v9fs_super_ops = {
|
|||
.alloc_inode = v9fs_alloc_inode,
|
||||
.free_inode = v9fs_free_inode,
|
||||
.statfs = simple_statfs,
|
||||
.drop_inode = v9fs_drop_inode,
|
||||
.evict_inode = v9fs_evict_inode,
|
||||
.show_options = v9fs_show_options,
|
||||
.umount_begin = v9fs_umount_begin,
|
||||
|
@ -278,6 +294,7 @@ static const struct super_operations v9fs_super_ops_dotl = {
|
|||
.alloc_inode = v9fs_alloc_inode,
|
||||
.free_inode = v9fs_free_inode,
|
||||
.statfs = v9fs_statfs,
|
||||
.drop_inode = v9fs_drop_inode,
|
||||
.evict_inode = v9fs_evict_inode,
|
||||
.show_options = v9fs_show_options,
|
||||
.umount_begin = v9fs_umount_begin,
|
||||
|
|
|
@ -3464,6 +3464,14 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
|||
if (root_id != BTRFS_TREE_LOG_OBJECTID) {
|
||||
struct btrfs_ref generic_ref = { 0 };
|
||||
|
||||
/*
|
||||
* Assert that the extent buffer is not cleared due to
|
||||
* EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
|
||||
* btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
|
||||
* detail.
|
||||
*/
|
||||
ASSERT(btrfs_header_bytenr(buf) != 0);
|
||||
|
||||
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
|
||||
buf->start, buf->len, parent,
|
||||
btrfs_header_owner(buf));
|
||||
|
|
|
@ -681,31 +681,21 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
|
|||
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
|
||||
gfp_t extra_gfp)
|
||||
{
|
||||
const gfp_t gfp = GFP_NOFS | extra_gfp;
|
||||
unsigned int allocated;
|
||||
|
||||
for (allocated = 0; allocated < nr_pages;) {
|
||||
unsigned int last = allocated;
|
||||
|
||||
allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
|
||||
nr_pages, page_array);
|
||||
|
||||
if (allocated == nr_pages)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* During this iteration, no page could be allocated, even
|
||||
* though alloc_pages_bulk_array() falls back to alloc_page()
|
||||
* if it could not bulk-allocate. So we must be out of memory.
|
||||
*/
|
||||
if (allocated == last) {
|
||||
allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
|
||||
if (unlikely(allocated == last)) {
|
||||
/* No progress, fail and do cleanup. */
|
||||
for (int i = 0; i < allocated; i++) {
|
||||
__free_page(page_array[i]);
|
||||
page_array[i] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memalloc_retry_wait(GFP_NOFS);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -4154,7 +4144,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
|
|||
* The actual zeroout of the buffer will happen later in
|
||||
* btree_csum_one_bio.
|
||||
*/
|
||||
if (btrfs_is_zoned(fs_info)) {
|
||||
if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
|
||||
set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
|
||||
return;
|
||||
}
|
||||
|
@ -4193,6 +4183,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
|
|||
num_folios = num_extent_folios(eb);
|
||||
WARN_ON(atomic_read(&eb->refs) == 0);
|
||||
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
|
||||
WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
|
||||
|
||||
if (!was_dirty) {
|
||||
bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue