2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
29 * Tina Zhang <tina.zhang@intel.com>
30 * Min He <min.he@intel.com>
31 * Niu Bing <bing.niu@intel.com>
32 * Zhi Wang <zhi.a.wang@intel.com>
40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
44 * Zero on success, negative error code if failed
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
50 return gpa - gttmmio_gpa;
53 #define reg_is_mmio(gvt, reg) \
54 (reg >= 0 && reg < gvt->device_info.mmio_size)
56 #define reg_is_gtt(gvt, reg) \
57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
61 * intel_vgpu_emulate_mmio_read - emulate MMIO read
63 * @pa: guest physical address
64 * @p_data: data return buffer
65 * @bytes: access data length
68 * Zero on success, negative error code if failed
70 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
71 void *p_data, unsigned int bytes)
73 struct intel_gvt *gvt = vgpu->gvt;
74 struct intel_gvt_mmio_info *mmio;
75 unsigned int offset = 0;
78 mutex_lock(&gvt->lock);
80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
81 struct intel_vgpu_guest_page *gp;
83 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
85 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
88 gvt_err("vgpu%d: guest page read error %d, "
89 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
91 gp->gfn, pa, *(u32 *)p_data, bytes);
93 mutex_unlock(&gvt->lock);
98 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
100 if (WARN_ON(bytes > 8))
103 if (reg_is_gtt(gvt, offset)) {
104 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
106 if (WARN_ON(bytes != 4 && bytes != 8))
108 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
111 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
115 mutex_unlock(&gvt->lock);
119 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
120 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
121 mutex_unlock(&gvt->lock);
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
151 if (WARN_ON(mmio->offset != offset))
154 ret = mmio->read(vgpu, offset, p_data, bytes);
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
161 intel_gvt_mmio_set_accessed(gvt, offset);
162 mutex_unlock(&gvt->lock);
165 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
166 vgpu->id, offset, bytes);
167 mutex_unlock(&gvt->lock);
172 * intel_vgpu_emulate_mmio_write - emulate MMIO write
174 * @pa: guest physical address
175 * @p_data: write data buffer
176 * @bytes: access data length
179 * Zero on success, negative error code if failed
181 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
182 void *p_data, unsigned int bytes)
184 struct intel_gvt *gvt = vgpu->gvt;
185 struct intel_gvt_mmio_info *mmio;
186 unsigned int offset = 0;
187 u32 old_vreg = 0, old_sreg = 0;
190 mutex_lock(&gvt->lock);
192 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
193 struct intel_vgpu_guest_page *gp;
195 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
197 ret = gp->handler(gp, pa, p_data, bytes);
199 gvt_err("vgpu%d: guest page write error %d, "
200 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
202 gp->gfn, pa, *(u32 *)p_data, bytes);
204 mutex_unlock(&gvt->lock);
209 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
211 if (WARN_ON(bytes > 8))
214 if (reg_is_gtt(gvt, offset)) {
215 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
217 if (WARN_ON(bytes != 4 && bytes != 8))
219 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
222 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
226 mutex_unlock(&gvt->lock);
230 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
231 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
232 mutex_unlock(&gvt->lock);
236 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
237 if (!mmio && !vgpu->mmio.disable_warn_untrack)
238 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
239 vgpu->id, offset, bytes, *(u32 *)p_data);
241 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
242 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
247 u64 ro_mask = mmio->ro_mask;
249 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
250 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
252 if (WARN_ON(mmio->offset != offset))
256 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
257 old_vreg = vgpu_vreg(vgpu, offset);
258 old_sreg = vgpu_sreg(vgpu, offset);
262 ret = mmio->write(vgpu, offset, p_data, bytes);
264 /* Protect RO bits like HW */
267 /* all register bits are RO. */
268 if (ro_mask == ~(u64)0) {
269 gvt_err("vgpu%d: try to write RO reg %x\n",
274 /* keep the RO bits in the virtual register */
275 memcpy(&data, p_data, bytes);
276 data &= ~mmio->ro_mask;
277 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
278 ret = mmio->write(vgpu, offset, &data, bytes);
281 /* higher 16bits of mode ctl regs are mask bits for change */
282 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
283 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
285 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
286 | (vgpu_vreg(vgpu, offset) & mask);
287 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
288 | (vgpu_sreg(vgpu, offset) & mask);
291 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
296 intel_gvt_mmio_set_accessed(gvt, offset);
297 mutex_unlock(&gvt->lock);
300 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
301 vgpu->id, offset, bytes);
302 mutex_unlock(&gvt->lock);