]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/msm/msm_submitqueue.c
Merge tag 'io_uring-5.6-2020-02-14' of git://git.kernel.dk/linux-block
[linux.git] / drivers / gpu / drm / msm / msm_submitqueue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3  */
4
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
7
8 #include "msm_gpu.h"
9
10 void msm_submitqueue_destroy(struct kref *kref)
11 {
12         struct msm_gpu_submitqueue *queue = container_of(kref,
13                 struct msm_gpu_submitqueue, ref);
14
15         kfree(queue);
16 }
17
18 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
19                 u32 id)
20 {
21         struct msm_gpu_submitqueue *entry;
22
23         if (!ctx)
24                 return NULL;
25
26         read_lock(&ctx->queuelock);
27
28         list_for_each_entry(entry, &ctx->submitqueues, node) {
29                 if (entry->id == id) {
30                         kref_get(&entry->ref);
31                         read_unlock(&ctx->queuelock);
32
33                         return entry;
34                 }
35         }
36
37         read_unlock(&ctx->queuelock);
38         return NULL;
39 }
40
41 void msm_submitqueue_close(struct msm_file_private *ctx)
42 {
43         struct msm_gpu_submitqueue *entry, *tmp;
44
45         if (!ctx)
46                 return;
47
48         /*
49          * No lock needed in close and there won't
50          * be any more user ioctls coming our way
51          */
52         list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
53                 msm_submitqueue_put(entry);
54 }
55
56 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
57                 u32 prio, u32 flags, u32 *id)
58 {
59         struct msm_drm_private *priv = drm->dev_private;
60         struct msm_gpu_submitqueue *queue;
61
62         if (!ctx)
63                 return -ENODEV;
64
65         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
66
67         if (!queue)
68                 return -ENOMEM;
69
70         kref_init(&queue->ref);
71         queue->flags = flags;
72
73         if (priv->gpu) {
74                 if (prio >= priv->gpu->nr_rings)
75                         return -EINVAL;
76
77                 queue->prio = prio;
78         }
79
80         write_lock(&ctx->queuelock);
81
82         queue->id = ctx->queueid++;
83
84         if (id)
85                 *id = queue->id;
86
87         list_add_tail(&queue->node, &ctx->submitqueues);
88
89         write_unlock(&ctx->queuelock);
90
91         return 0;
92 }
93
94 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
95 {
96         struct msm_drm_private *priv = drm->dev_private;
97         int default_prio;
98
99         if (!ctx)
100                 return 0;
101
102         /*
103          * Select priority 2 as the "default priority" unless nr_rings is less
104          * than 2 and then pick the lowest pirority
105          */
106         default_prio = priv->gpu ?
107                 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
108
109         INIT_LIST_HEAD(&ctx->submitqueues);
110
111         rwlock_init(&ctx->queuelock);
112
113         return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
114 }
115
116 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
117                 struct drm_msm_submitqueue_query *args)
118 {
119         size_t size = min_t(size_t, args->len, sizeof(queue->faults));
120         int ret;
121
122         /* If a zero length was passed in, return the data size we expect */
123         if (!args->len) {
124                 args->len = sizeof(queue->faults);
125                 return 0;
126         }
127
128         /* Set the length to the actual size of the data */
129         args->len = size;
130
131         ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
132
133         return ret ? -EFAULT : 0;
134 }
135
136 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
137                 struct drm_msm_submitqueue_query *args)
138 {
139         struct msm_gpu_submitqueue *queue;
140         int ret = -EINVAL;
141
142         if (args->pad)
143                 return -EINVAL;
144
145         queue = msm_submitqueue_get(ctx, args->id);
146         if (!queue)
147                 return -ENOENT;
148
149         if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
150                 ret = msm_submitqueue_query_faults(queue, args);
151
152         msm_submitqueue_put(queue);
153
154         return ret;
155 }
156
157 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
158 {
159         struct msm_gpu_submitqueue *entry;
160
161         if (!ctx)
162                 return 0;
163
164         /*
165          * id 0 is the "default" queue and can't be destroyed
166          * by the user
167          */
168         if (!id)
169                 return -ENOENT;
170
171         write_lock(&ctx->queuelock);
172
173         list_for_each_entry(entry, &ctx->submitqueues, node) {
174                 if (entry->id == id) {
175                         list_del(&entry->node);
176                         write_unlock(&ctx->queuelock);
177
178                         msm_submitqueue_put(entry);
179                         return 0;
180                 }
181         }
182
183         write_unlock(&ctx->queuelock);
184         return -ENOENT;
185 }
186