]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/iommu/intel-svm.c
Merge branch 'pipe-exclusive-wakeup'
[linux.git] / drivers / iommu / intel-svm.c
index dca88f9fdf29a10b574ed5d123eabf4df7ac7a06..d7f2a53589002e6aee3bdcf43bc8132c20415092 100644 (file)
 #include <linux/dmar.h>
 #include <linux/interrupt.h>
 #include <linux/mm_types.h>
+#include <linux/ioasid.h>
 #include <asm/page.h>
 
 #include "intel-pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
 
-int intel_svm_init(struct intel_iommu *iommu)
-{
-       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
-                       !cap_fl1gp_support(iommu->cap))
-               return -EINVAL;
-
-       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
-                       !cap_5lp_support(iommu->cap))
-               return -EINVAL;
-
-       return 0;
-}
-
 #define PRQ_ORDER 0
 
 int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
        return 0;
 }
 
+static inline bool intel_svm_capable(struct intel_iommu *iommu)
+{
+       return iommu->flags & VTD_FLAG_SVM_CAPABLE;
+}
+
+void intel_svm_check(struct intel_iommu *iommu)
+{
+       if (!pasid_supported(iommu))
+               return;
+
+       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+           !cap_fl1gp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible 1GB page capability\n",
+                      iommu->name);
+               return;
+       }
+
+       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+           !cap_5lp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible paging mode\n",
+                      iommu->name);
+               return;
+       }
+
+       iommu->flags |= VTD_FLAG_SVM_CAPABLE;
+}
+
 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
                                unsigned long address, unsigned long pages, int ih)
 {
@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
 static DEFINE_MUTEX(pasid_mutex);
 static LIST_HEAD(global_svm_list);
 
+#define for_each_svm_dev(sdev, svm, d)                 \
+       list_for_each_entry((sdev), &(svm)->devs, list) \
+               if ((d) != (sdev)->dev) {} else
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
        if (!iommu || dmar_disabled)
                return -EINVAL;
 
+       if (!intel_svm_capable(iommu))
+               return -ENOTSUPP;
+
        if (dev_is_pci(dev)) {
                pasid_max = pci_max_pasids(to_pci_dev(dev));
                if (pasid_max < 0)
@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                                goto out;
                        }
 
-                       list_for_each_entry(sdev, &svm->devs, list) {
-                               if (dev == sdev->dev) {
-                                       if (sdev->ops != ops) {
-                                               ret = -EBUSY;
-                                               goto out;
-                                       }
-                                       sdev->users++;
-                                       goto success;
+                       /* Find the matching device in svm list */
+                       for_each_svm_dev(sdev, svm, dev) {
+                               if (sdev->ops != ops) {
+                                       ret = -EBUSY;
+                                       goto out;
                                }
+                               sdev->users++;
+                               goto success;
                        }
 
                        break;
@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (pasid_max > intel_pasid_max_id)
                        pasid_max = intel_pasid_max_id;
 
-               /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
-               ret = intel_pasid_alloc_id(svm,
-                                          !!cap_caching_mode(iommu->cap),
-                                          pasid_max - 1, GFP_KERNEL);
-               if (ret < 0) {
+               /* Do not use PASID 0, reserved for RID to PASID */
+               svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+                                         pasid_max - 1, svm);
+               if (svm->pasid == INVALID_IOASID) {
                        kfree(svm);
                        kfree(sdev);
+                       ret = -ENOSPC;
                        goto out;
                }
-               svm->pasid = ret;
                svm->notifier.ops = &intel_mmuops;
                svm->mm = mm;
                svm->flags = flags;
@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (mm) {
                        ret = mmu_notifier_register(&svm->notifier, mm);
                        if (ret) {
-                               intel_pasid_free_id(svm->pasid);
+                               ioasid_free(svm->pasid);
                                kfree(svm);
                                kfree(sdev);
                                goto out;
@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                mm ? mm->pgd : init_mm.pgd,
                                svm->pasid, FLPT_DEFAULT_DID,
-                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        if (mm)
                                mmu_notifier_unregister(&svm->notifier, mm);
-                       intel_pasid_free_id(svm->pasid);
+                       ioasid_free(svm->pasid);
                        kfree(svm);
                        kfree(sdev);
                        goto out;
@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                                mm ? mm->pgd : init_mm.pgd,
                                                svm->pasid, FLPT_DEFAULT_DID,
-                                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                               PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        kfree(sdev);
@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
-       list_for_each_entry(sdev, &svm->devs, list) {
-               if (dev == sdev->dev) {
-                       ret = 0;
-                       sdev->users--;
-                       if (!sdev->users) {
-                               list_del_rcu(&sdev->list);
-                               /* Flush the PASID cache and IOTLB for this device.
-                                * Note that we do depend on the hardware *not* using
-                                * the PASID any more. Just as we depend on other
-                                * devices never using PASIDs that they have no right
-                                * to use. We have a *shared* PASID table, because it's
-                                * large and has to be physically contiguous. So it's
-                                * hard to be as defensive as we might like. */
-                               intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-                               kfree_rcu(sdev, rcu);
-
-                               if (list_empty(&svm->devs)) {
-                                       intel_pasid_free_id(svm->pasid);
-                                       if (svm->mm)
-                                               mmu_notifier_unregister(&svm->notifier, svm->mm);
-
-                                       list_del(&svm->list);
-
-                                       /* We mandate that no page faults may be outstanding
-                                        * for the PASID when intel_svm_unbind_mm() is called.
-                                        * If that is not obeyed, subtle errors will happen.
-                                        * Let's make them less subtle... */
-                                       memset(svm, 0x6b, sizeof(*svm));
-                                       kfree(svm);
-                               }
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
+
+       for_each_svm_dev(sdev, svm, dev) {
+               ret = 0;
+               sdev->users--;
+               if (!sdev->users) {
+                       list_del_rcu(&sdev->list);
+                       /* Flush the PASID cache and IOTLB for this device.
+                        * Note that we do depend on the hardware *not* using
+                        * the PASID any more. Just as we depend on other
+                        * devices never using PASIDs that they have no right
+                        * to use. We have a *shared* PASID table, because it's
+                        * large and has to be physically contiguous. So it's
+                        * hard to be as defensive as we might like. */
+                       intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+                       intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+                       kfree_rcu(sdev, rcu);
+
+                       if (list_empty(&svm->devs)) {
+                               ioasid_free(svm->pasid);
+                               if (svm->mm)
+                                       mmu_notifier_unregister(&svm->notifier, svm->mm);
+                               list_del(&svm->list);
+                               /* We mandate that no page faults may be outstanding
+                                * for the PASID when intel_svm_unbind_mm() is called.
+                                * If that is not obeyed, subtle errors will happen.
+                                * Let's make them less subtle... */
+                               memset(svm, 0x6b, sizeof(*svm));
+                               kfree(svm);
                        }
-                       break;
                }
+               break;
        }
  out:
        mutex_unlock(&pasid_mutex);
@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
        /* init_mm is used in this case */
        if (!svm->mm)
                ret = 1;
@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
 
                if (!svm || svm->pasid != req->pasid) {
                        rcu_read_lock();
-                       svm = intel_pasid_lookup_id(req->pasid);
+                       svm = ioasid_find(NULL, req->pasid, NULL);
                        /* It *can't* go away, because the driver is not permitted
                         * to unbind the mm while any page faults are outstanding.
                         * So we only need RCU to protect the internal idr code. */
                        rcu_read_unlock();
-
-                       if (!svm) {
+                       if (IS_ERR_OR_NULL(svm)) {
                                pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
                                       iommu->name, req->pasid, ((unsigned long long *)req)[0],
                                       ((unsigned long long *)req)[1]);
@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        if (req->priv_data_present)
                                memcpy(&resp.qw2, req->priv_data,
                                       sizeof(req->priv_data));
+                       resp.qw2 = 0;
+                       resp.qw3 = 0;
+                       qi_submit_sync(&resp, iommu);
                }
-               resp.qw2 = 0;
-               resp.qw3 = 0;
-               qi_submit_sync(&resp, iommu);
-
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }