Skip to content

Commit 9b04368

Browse files
committed
Merge tag 'drm-fixes-2026-01-02' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "Happy New Year, jetlagged fixes from me, still pretty quiet, xe is most of this, with i915/nouveau/imagination fixes and some shmem cleanups. shmem: - docs and MODULE_LICENSE fix xe: - Ensure svm device memory is idle before migration completes - Fix a SVM debug printout - Use READ_ONCE() / WRITE_ONCE() for g2h_fence i915: - Fix eb_lookup_vmas() failure path nouveau: - fix prepare_fb warnings imagination: - prevent export of protected objects" * tag 'drm-fixes-2026-01-02' of https://gitlab.freedesktop.org/drm/kernel: drm/i915/gem: Zero-initialize the eb.vma array in i915_gem_do_execbuffer drm/xe/guc: READ/WRITE_ONCE g2h_fence->done drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use drm/xe/svm: Fix a debug printout drm/gem-shmem: Fix the MODULE_LICENSE() string drm/gem-shmem: Fix typos in documentation drm/nouveau/dispnv50: Don't call drm_atomic_get_crtc_state() in prepare_fb drm/imagination: Disallow exporting of PM/FW protected objects
2 parents e3a97ab + 7be19f9 commit 9b04368

11 files changed

Lines changed: 144 additions & 54 deletions

File tree

drivers/gpu/drm/drm_gem_shmem_helper.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_obj
9696
/**
9797
* drm_gem_shmem_init - Initialize an allocated object.
9898
* @dev: DRM device
99-
* @obj: The allocated shmem GEM object.
99+
* @shmem: The allocated shmem GEM object.
100+
* @size: Buffer size in bytes
100101
*
101102
* Returns:
102103
* 0 on success, or a negative error code on failure.
@@ -895,4 +896,4 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
895896

896897
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
897898
MODULE_IMPORT_NS("DMA_BUF");
898-
MODULE_LICENSE("GPL v2");
899+
MODULE_LICENSE("GPL");

drivers/gpu/drm/drm_pagemap.c

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
* Copyright © 2024-2025 Intel Corporation
44
*/
55

6+
#include <linux/dma-fence.h>
67
#include <linux/dma-mapping.h>
78
#include <linux/migrate.h>
89
#include <linux/pagemap.h>
@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
408409
drm_pagemap_get_devmem_page(page, zdd);
409410
}
410411

411-
err = ops->copy_to_devmem(pages, pagemap_addr, npages);
412+
err = ops->copy_to_devmem(pages, pagemap_addr, npages,
413+
devmem_allocation->pre_migrate_fence);
412414
if (err)
413415
goto err_finalize;
414416

417+
dma_fence_put(devmem_allocation->pre_migrate_fence);
418+
devmem_allocation->pre_migrate_fence = NULL;
419+
415420
/* Upon success bind devmem allocation to range and zdd */
416421
devmem_allocation->timeslice_expiration = get_jiffies_64() +
417422
msecs_to_jiffies(timeslice_ms);
@@ -596,7 +601,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
596601
for (i = 0; i < npages; ++i)
597602
pages[i] = migrate_pfn_to_page(src[i]);
598603

599-
err = ops->copy_to_ram(pages, pagemap_addr, npages);
604+
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
600605
if (err)
601606
goto err_finalize;
602607

@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
732737
for (i = 0; i < npages; ++i)
733738
pages[i] = migrate_pfn_to_page(migrate.src[i]);
734739

735-
err = ops->copy_to_ram(pages, pagemap_addr, npages);
740+
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
736741
if (err)
737742
goto err_finalize;
738743

@@ -813,18 +818,22 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
813818
* @ops: Pointer to the operations structure for GPU SVM device memory
814819
* @dpagemap: The struct drm_pagemap we're allocating from.
815820
* @size: Size of device memory allocation
821+
* @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
822+
* (May be NULL).
816823
*/
817824
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
818825
struct device *dev, struct mm_struct *mm,
819826
const struct drm_pagemap_devmem_ops *ops,
820-
struct drm_pagemap *dpagemap, size_t size)
827+
struct drm_pagemap *dpagemap, size_t size,
828+
struct dma_fence *pre_migrate_fence)
821829
{
822830
init_completion(&devmem_allocation->detached);
823831
devmem_allocation->dev = dev;
824832
devmem_allocation->mm = mm;
825833
devmem_allocation->ops = ops;
826834
devmem_allocation->dpagemap = dpagemap;
827835
devmem_allocation->size = size;
836+
devmem_allocation->pre_migrate_fence = pre_migrate_fence;
828837
}
829838
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
830839

drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
951951
vma = eb_lookup_vma(eb, eb->exec[i].handle);
952952
if (IS_ERR(vma)) {
953953
err = PTR_ERR(vma);
954-
goto err;
954+
return err;
955955
}
956956

957957
err = eb_validate_vma(eb, &eb->exec[i], vma);
958958
if (unlikely(err)) {
959959
i915_vma_put(vma);
960-
goto err;
960+
return err;
961961
}
962962

963963
err = eb_add_vma(eb, &current_batch, i, vma);
@@ -966,30 +966,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
966966

967967
if (i915_gem_object_is_userptr(vma->obj)) {
968968
err = i915_gem_object_userptr_submit_init(vma->obj);
969-
if (err) {
970-
if (i + 1 < eb->buffer_count) {
971-
/*
972-
* Execbuffer code expects last vma entry to be NULL,
973-
* since we already initialized this entry,
974-
* set the next value to NULL or we mess up
975-
* cleanup handling.
976-
*/
977-
eb->vma[i + 1].vma = NULL;
978-
}
979-
969+
if (err)
980970
return err;
981-
}
982971

983972
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
984973
eb->args->flags |= __EXEC_USERPTR_USED;
985974
}
986975
}
987976

988977
return 0;
989-
990-
err:
991-
eb->vma[i].vma = NULL;
992-
return err;
993978
}
994979

995980
static int eb_lock_vmas(struct i915_execbuffer *eb)
@@ -3375,7 +3360,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
33753360

33763361
eb.exec = exec;
33773362
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
3378-
eb.vma[0].vma = NULL;
3363+
memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
3364+
33793365
eb.batch_pool = NULL;
33803366

33813367
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
@@ -3584,7 +3570,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
35843570
if (err)
35853571
return err;
35863572

3587-
/* Allocate extra slots for use by the command parser */
3573+
/*
3574+
* Allocate extra slots for use by the command parser.
3575+
*
3576+
* Note that this allocation handles two different arrays (the
3577+
* exec2_list array, and the eventual eb.vma array introduced in
3578+
* i915_gem_do_execbuffer()), that reside in virtually contiguous
3579+
* memory. Also note that the allocation intentionally doesn't fill the
3580+
* area with zeros, because the exec2_list part doesn't need to be, as
3581+
* it's immediately overwritten by user data a few lines below.
3582+
* However, the eb.vma part is explicitly zeroed later in
3583+
* i915_gem_do_execbuffer().
3584+
*/
35883585
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
35893586
__GFP_NOWARN | GFP_KERNEL);
35903587
if (exec2_list == NULL) {

drivers/gpu/drm/imagination/pvr_gem.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj)
2828
drm_gem_shmem_object_free(obj);
2929
}
3030

31+
static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
32+
{
33+
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
34+
35+
if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
36+
return ERR_PTR(-EPERM);
37+
38+
return drm_gem_prime_export(obj, flags);
39+
}
40+
3141
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
3242
{
3343
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
@@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v
4252
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
4353
.free = pvr_gem_object_free,
4454
.print_info = drm_gem_shmem_object_print_info,
55+
.export = pvr_gem_export,
4556
.pin = drm_gem_shmem_object_pin,
4657
.unpin = drm_gem_shmem_object_unpin,
4758
.get_sg_table = drm_gem_shmem_object_get_sg_table,

drivers/gpu/drm/nouveau/dispnv50/atom.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
152152
nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
153153
{
154154
struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
155+
155156
if (IS_ERR(statec))
156157
return (void *)statec;
158+
159+
return nv50_head_atom(statec);
160+
}
161+
162+
static inline struct nv50_head_atom *
163+
nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
164+
{
165+
struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
166+
167+
if (!statec)
168+
return NULL;
169+
157170
return nv50_head_atom(statec);
158171
}
159172

drivers/gpu/drm/nouveau/dispnv50/wndw.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
583583
asyw->image.offset[0] = nvbo->offset;
584584

585585
if (wndw->func->prepare) {
586-
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
586+
asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
587587
if (IS_ERR(asyh))
588588
return PTR_ERR(asyh);
589589

drivers/gpu/drm/xe/xe_guc_ct.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
104104
{
105105
g2h_fence->cancel = true;
106106
g2h_fence->fail = true;
107-
g2h_fence->done = true;
107+
108+
/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
109+
WRITE_ONCE(g2h_fence->done, true);
108110
}
109111

110112
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
@@ -1203,10 +1205,13 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
12031205
return ret;
12041206
}
12051207

1206-
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
1208+
/* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
1209+
* and g2h_fence_cancel.
1210+
*/
1211+
ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
12071212
if (!ret) {
12081213
LNL_FLUSH_WORK(&ct->g2h_worker);
1209-
if (g2h_fence.done) {
1214+
if (READ_ONCE(g2h_fence.done)) {
12101215
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
12111216
g2h_fence.seqno, action[0]);
12121217
ret = 1;
@@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
14541459

14551460
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
14561461

1457-
g2h_fence->done = true;
1462+
/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
1463+
WRITE_ONCE(g2h_fence->done, true);
14581464
smp_mb();
14591465

14601466
wake_up_all(&ct->g2h_fence_wq);

drivers/gpu/drm/xe/xe_migrate.c

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
20622062
unsigned long sram_offset,
20632063
struct drm_pagemap_addr *sram_addr,
20642064
u64 vram_addr,
2065+
struct dma_fence *deps,
20652066
const enum xe_migrate_copy_dir dir)
20662067
{
20672068
struct xe_gt *gt = m->tile->primary_gt;
@@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
21502151

21512152
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
21522153

2154+
if (deps && !dma_fence_is_signaled(deps)) {
2155+
dma_fence_get(deps);
2156+
err = drm_sched_job_add_dependency(&job->drm, deps);
2157+
if (err)
2158+
dma_fence_wait(deps, false);
2159+
err = 0;
2160+
}
2161+
21532162
mutex_lock(&m->job_mutex);
21542163
xe_sched_job_arm(job);
21552164
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -2175,6 +2184,8 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
21752184
* @npages: Number of pages to migrate.
21762185
* @src_addr: Array of DMA information (source of migrate)
21772186
* @dst_addr: Device physical address of VRAM (destination of migrate)
2187+
* @deps: struct dma_fence representing the dependencies that need
2188+
* to be signaled before migration.
21782189
*
21792190
* Copy from an array dma addresses to a VRAM device physical address
21802191
*
@@ -2184,10 +2195,11 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
21842195
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
21852196
unsigned long npages,
21862197
struct drm_pagemap_addr *src_addr,
2187-
u64 dst_addr)
2198+
u64 dst_addr,
2199+
struct dma_fence *deps)
21882200
{
21892201
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2190-
XE_MIGRATE_COPY_TO_VRAM);
2202+
deps, XE_MIGRATE_COPY_TO_VRAM);
21912203
}
21922204

21932205
/**
@@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
21962208
* @npages: Number of pages to migrate.
21972209
* @src_addr: Device physical address of VRAM (source of migrate)
21982210
* @dst_addr: Array of DMA information (destination of migrate)
2211+
* @deps: struct dma_fence representing the dependencies that need
2212+
* to be signaled before migration.
21992213
*
22002214
* Copy from a VRAM device physical address to an array dma addresses
22012215
*
@@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
22052219
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
22062220
unsigned long npages,
22072221
u64 src_addr,
2208-
struct drm_pagemap_addr *dst_addr)
2222+
struct drm_pagemap_addr *dst_addr,
2223+
struct dma_fence *deps)
22092224
{
22102225
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2211-
XE_MIGRATE_COPY_TO_SRAM);
2226+
deps, XE_MIGRATE_COPY_TO_SRAM);
22122227
}
22132228

22142229
static void xe_migrate_dma_unmap(struct xe_device *xe,
@@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
23842399
__fence = xe_migrate_vram(m, current_bytes,
23852400
(unsigned long)buf & ~PAGE_MASK,
23862401
&pagemap_addr[current_page],
2387-
vram_addr, write ?
2402+
vram_addr, NULL, write ?
23882403
XE_MIGRATE_COPY_TO_VRAM :
23892404
XE_MIGRATE_COPY_TO_SRAM);
23902405
if (IS_ERR(__fence)) {

drivers/gpu/drm/xe/xe_migrate.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
116116
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
117117
unsigned long npages,
118118
struct drm_pagemap_addr *src_addr,
119-
u64 dst_addr);
119+
u64 dst_addr,
120+
struct dma_fence *deps);
120121

121122
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
122123
unsigned long npages,
123124
u64 src_addr,
124-
struct drm_pagemap_addr *dst_addr);
125+
struct drm_pagemap_addr *dst_addr,
126+
struct dma_fence *deps);
125127

126128
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
127129
struct xe_bo *src_bo,

0 commit comments

Comments
 (0)