]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hns/hns_roce_db.c
Merge tag 'nfsd-5.6' of git://linux-nfs.org/~bfields/linux
[linux.git] / drivers / infiniband / hw / hns / hns_roce_db.c
1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
2 /*
3  * Copyright (c) 2017 Hisilicon Limited.
4  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5  */
6
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
10
11 int hns_roce_db_map_user(struct hns_roce_ucontext *context,
12                          struct ib_udata *udata, unsigned long virt,
13                          struct hns_roce_db *db)
14 {
15         unsigned long page_addr = virt & PAGE_MASK;
16         struct hns_roce_user_db_page *page;
17         unsigned int offset;
18         int ret = 0;
19
20         mutex_lock(&context->page_mutex);
21
22         list_for_each_entry(page, &context->page_list, list)
23                 if (page->user_virt == page_addr)
24                         goto found;
25
26         page = kmalloc(sizeof(*page), GFP_KERNEL);
27         if (!page) {
28                 ret = -ENOMEM;
29                 goto out;
30         }
31
32         refcount_set(&page->refcount, 1);
33         page->user_virt = page_addr;
34         page->umem = ib_umem_get(context->ibucontext.device, page_addr,
35                                  PAGE_SIZE, 0);
36         if (IS_ERR(page->umem)) {
37                 ret = PTR_ERR(page->umem);
38                 kfree(page);
39                 goto out;
40         }
41
42         list_add(&page->list, &context->page_list);
43
44 found:
45         offset = virt - page_addr;
46         db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
47         db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
48         db->u.user_page = page;
49         refcount_inc(&page->refcount);
50
51 out:
52         mutex_unlock(&context->page_mutex);
53
54         return ret;
55 }
56
57 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
58                             struct hns_roce_db *db)
59 {
60         mutex_lock(&context->page_mutex);
61
62         refcount_dec(&db->u.user_page->refcount);
63         if (refcount_dec_if_one(&db->u.user_page->refcount)) {
64                 list_del(&db->u.user_page->list);
65                 ib_umem_release(db->u.user_page->umem);
66                 kfree(db->u.user_page);
67         }
68
69         mutex_unlock(&context->page_mutex);
70 }
71
72 static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
73                                         struct device *dma_device)
74 {
75         struct hns_roce_db_pgdir *pgdir;
76
77         pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
78         if (!pgdir)
79                 return NULL;
80
81         bitmap_fill(pgdir->order1,
82                     HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
83         pgdir->bits[0] = pgdir->order0;
84         pgdir->bits[1] = pgdir->order1;
85         pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
86                                          &pgdir->db_dma, GFP_KERNEL);
87         if (!pgdir->page) {
88                 kfree(pgdir);
89                 return NULL;
90         }
91
92         return pgdir;
93 }
94
95 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
96                                         struct hns_roce_db *db, int order)
97 {
98         int o;
99         int i;
100
101         for (o = order; o <= 1; ++o) {
102                 i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
103                 if (i < HNS_ROCE_DB_PER_PAGE >> o)
104                         goto found;
105         }
106
107         return -ENOMEM;
108
109 found:
110         clear_bit(i, pgdir->bits[o]);
111
112         i <<= o;
113
114         if (o > order)
115                 set_bit(i ^ 1, pgdir->bits[order]);
116
117         db->u.pgdir     = pgdir;
118         db->index       = i;
119         db->db_record   = pgdir->page + db->index;
120         db->dma         = pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
121         db->order       = order;
122
123         return 0;
124 }
125
126 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
127                       int order)
128 {
129         struct hns_roce_db_pgdir *pgdir;
130         int ret = 0;
131
132         mutex_lock(&hr_dev->pgdir_mutex);
133
134         list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
135                 if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
136                         goto out;
137
138         pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
139         if (!pgdir) {
140                 ret = -ENOMEM;
141                 goto out;
142         }
143
144         list_add(&pgdir->list, &hr_dev->pgdir_list);
145
146         /* This should never fail -- we just allocated an empty page: */
147         WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
148
149 out:
150         mutex_unlock(&hr_dev->pgdir_mutex);
151
152         return ret;
153 }
154
155 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
156 {
157         int o;
158         int i;
159
160         mutex_lock(&hr_dev->pgdir_mutex);
161
162         o = db->order;
163         i = db->index;
164
165         if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
166                 clear_bit(i ^ 1, db->u.pgdir->order0);
167                 ++o;
168         }
169
170         i >>= o;
171         set_bit(i, db->u.pgdir->bits[o]);
172
173         if (bitmap_full(db->u.pgdir->order1,
174                         HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
175                 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
176                                   db->u.pgdir->db_dma);
177                 list_del(&db->u.pgdir->list);
178                 kfree(db->u.pgdir);
179         }
180
181         mutex_unlock(&hr_dev->pgdir_mutex);
182 }