]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
IB/core: Remove ib_device.dma_device
authorBart Van Assche <bart.vanassche@sandisk.com>
Fri, 20 Jan 2017 21:04:36 +0000 (13:04 -0800)
committerDoug Ledford <dledford@redhat.com>
Tue, 24 Jan 2017 17:26:17 +0000 (12:26 -0500)
Add code in ib_register_device() for copying the DMA masks. Use
&ib_device.dev in DMA mapping operations instead of dma_device.
Remove ib_device.dma_device because due to this and previous patches
it is no longer used.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/core/device.c
include/rdma/ib_verbs.h

index d543c43904474036dc406a1320c8c0b10febf85f..cac1518de36e29ff6276e8eb7e7860f1e459d571 100644 (file)
@@ -333,14 +333,15 @@ int ib_register_device(struct ib_device *device,
        int ret;
        struct ib_client *client;
        struct ib_udata uhw = {.outlen = 0, .inlen = 0};
-
-       WARN_ON_ONCE(!device->dev.parent && !device->dma_device);
-       WARN_ON_ONCE(device->dev.parent && device->dma_device
-                    && device->dev.parent != device->dma_device);
-       if (!device->dev.parent)
-               device->dev.parent = device->dma_device;
-       if (!device->dma_device)
-               device->dma_device = device->dev.parent;
+       struct device *parent = device->dev.parent;
+
+       WARN_ON_ONCE(!parent);
+       if (!device->dev.dma_ops)
+               device->dev.dma_ops = parent->dma_ops;
+       if (!device->dev.dma_mask)
+               device->dev.dma_mask = parent->dma_mask;
+       if (!device->dev.coherent_dma_mask)
+               device->dev.coherent_dma_mask = parent->coherent_dma_mask;
 
        mutex_lock(&device_mutex);
 
index 694e39e4f1ffd83f56f47ef52071aa27685f7329..a20a15f81936f43d25917c2fe1de2363f22d7337 100644 (file)
@@ -1841,8 +1841,6 @@ struct ib_port_immutable {
 };
 
 struct ib_device {
-       struct device                *dma_device;
-
        char                          name[IB_DEVICE_NAME_MAX];
 
        struct list_head              event_handler_list;
@@ -2969,7 +2967,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
        if (dev->dma_ops)
                return dev->dma_ops->mapping_error(dev, dma_addr);
-       return dma_mapping_error(dev->dma_device, dma_addr);
+       return dma_mapping_error(&dev->dev, dma_addr);
 }
 
 /**
@@ -2985,7 +2983,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
 {
        if (dev->dma_ops)
                return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
-       return dma_map_single(dev->dma_device, cpu_addr, size, direction);
+       return dma_map_single(&dev->dev, cpu_addr, size, direction);
 }
 
 /**
@@ -3002,7 +3000,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->unmap_single(dev, addr, size, direction);
        else
-               dma_unmap_single(dev->dma_device, addr, size, direction);
+               dma_unmap_single(&dev->dev, addr, size, direction);
 }
 
 /**
@@ -3021,7 +3019,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
 {
        if (dev->dma_ops)
                return dev->dma_ops->map_page(dev, page, offset, size, direction);
-       return dma_map_page(dev->dma_device, page, offset, size, direction);
+       return dma_map_page(&dev->dev, page, offset, size, direction);
 }
 
 /**
@@ -3038,7 +3036,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->unmap_page(dev, addr, size, direction);
        else
-               dma_unmap_page(dev->dma_device, addr, size, direction);
+               dma_unmap_page(&dev->dev, addr, size, direction);
 }
 
 /**
@@ -3054,7 +3052,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
 {
        if (dev->dma_ops)
                return dev->dma_ops->map_sg(dev, sg, nents, direction);
-       return dma_map_sg(dev->dma_device, sg, nents, direction);
+       return dma_map_sg(&dev->dev, sg, nents, direction);
 }
 
 /**
@@ -3071,7 +3069,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->unmap_sg(dev, sg, nents, direction);
        else
-               dma_unmap_sg(dev->dma_device, sg, nents, direction);
+               dma_unmap_sg(&dev->dev, sg, nents, direction);
 }
 
 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3082,9 +3080,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
        if (dev->dma_ops)
                return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
                                                  dma_attrs);
-       else
-               return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
-                                       dma_attrs);
+       return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
 }
 
 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3096,8 +3092,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
                                                  dma_attrs);
        else
-               dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
-                                  dma_attrs);
+               dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
 }
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3142,7 +3137,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
        else
-               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+               dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
 }
 
 /**
@@ -3160,7 +3155,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
        else
-               dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+               dma_sync_single_for_device(&dev->dev, addr, size, dir);
 }
 
 /**
@@ -3183,7 +3178,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
                *dma_handle = handle;
                return ret;
        }
-       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
+       return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
 }
 
 /**
@@ -3200,7 +3195,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
        if (dev->dma_ops)
                dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
        else
-               dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+               dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
 }
 
 /**