1 // SPDX-License-Identifier: GPL-2.0
3 * Simple benchmark program that uses the various features of io_uring
4 * to provide fast random access to a device/file. It has various
5 * options that are control how we use io_uring, see the OPTIONS section
6 * below. This uses the raw io_uring interface.
8 * Copyright (C) 2018-2019 Jens Axboe
18 #include <sys/types.h>
20 #include <sys/ioctl.h>
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
35 #ifndef IOCQE_FLAG_CACHEHIT
36 #define IOCQE_FLAG_CACHEHIT (1U << 0)
39 #define min(a, b) ((a < b) ? (a) : (b))
45 unsigned *ring_entries;
54 unsigned *ring_entries;
55 struct io_uring_cqe *cqes;
60 #define BATCH_SUBMIT 32
61 #define BATCH_COMPLETE 32
67 static unsigned sq_ring_mask, cq_ring_mask;
70 unsigned long max_blocks;
79 struct drand48_data rand;
80 struct io_sq_ring sq_ring;
81 struct io_uring_sqe *sqes;
82 struct iovec iovecs[DEPTH];
83 struct io_cq_ring cq_ring;
88 unsigned long cachehit, cachemiss;
93 struct file files[MAX_FDS];
98 static struct submitter submitters[1];
99 static volatile int finish;
102 * OPTIONS: Set these to test the various features of io_uring.
104 static int polled = 1; /* use IO polling */
105 static int fixedbufs = 1; /* use fixed user buffers */
106 static int register_files = 1; /* use fixed files */
107 static int buffered = 0; /* use buffered IO, not O_DIRECT */
108 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
109 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
110 static int do_nop = 0; /* no-op SQ ring commands */
112 static int io_uring_register_buffers(struct submitter *s)
117 return io_uring_register(s->ring_fd, IORING_REGISTER_BUFFERS, s->iovecs,
121 static int io_uring_register_files(struct submitter *s)
128 s->fds = calloc(s->nr_files, sizeof(__s32));
129 for (i = 0; i < s->nr_files; i++) {
130 s->fds[i] = s->files[i].real_fd;
131 s->files[i].fixed_fd = i;
134 return io_uring_register(s->ring_fd, IORING_REGISTER_FILES, s->fds,
138 static int gettid(void)
140 return syscall(__NR_gettid);
143 static unsigned file_depth(struct submitter *s)
145 return (DEPTH + s->nr_files - 1) / s->nr_files;
148 static void init_io(struct submitter *s, unsigned index)
150 struct io_uring_sqe *sqe = &s->sqes[index];
151 unsigned long offset;
156 sqe->opcode = IORING_OP_NOP;
160 if (s->nr_files == 1) {
163 f = &s->files[s->cur_file];
164 if (f->pending_ios >= file_depth(s)) {
166 if (s->cur_file == s->nr_files)
168 f = &s->files[s->cur_file];
173 lrand48_r(&s->rand, &r);
174 offset = (r % (f->max_blocks - 1)) * BS;
176 if (register_files) {
177 sqe->flags = IOSQE_FIXED_FILE;
178 sqe->fd = f->fixed_fd;
181 sqe->fd = f->real_fd;
184 sqe->opcode = IORING_OP_READ_FIXED;
185 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
187 sqe->buf_index = index;
189 sqe->opcode = IORING_OP_READV;
190 sqe->addr = (unsigned long) &s->iovecs[index];
196 sqe->user_data = (unsigned long) f;
199 static int prep_more_ios(struct submitter *s, unsigned max_ios)
201 struct io_sq_ring *ring = &s->sq_ring;
202 unsigned index, tail, next_tail, prepped = 0;
204 next_tail = tail = *ring->tail;
208 if (next_tail == *ring->head)
211 index = tail & sq_ring_mask;
213 ring->array[index] = index;
216 } while (prepped < max_ios);
218 if (*ring->tail != tail) {
219 /* order tail store with writes to sqes above */
227 static int get_file_size(struct file *f)
231 if (fstat(f->real_fd, &st) < 0)
233 if (S_ISBLK(st.st_mode)) {
234 unsigned long long bytes;
236 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
239 f->max_blocks = bytes / BS;
241 } else if (S_ISREG(st.st_mode)) {
242 f->max_blocks = st.st_size / BS;
249 static int reap_events(struct submitter *s)
251 struct io_cq_ring *ring = &s->cq_ring;
252 struct io_uring_cqe *cqe;
253 unsigned head, reaped = 0;
260 if (head == *ring->tail)
262 cqe = &ring->cqes[head & cq_ring_mask];
264 f = (struct file *) (uintptr_t) cqe->user_data;
266 if (cqe->res != BS) {
267 printf("io: unexpected ret=%d\n", cqe->res);
268 if (polled && cqe->res == -EOPNOTSUPP)
269 printf("Your filesystem doesn't support poll\n");
273 if (cqe->flags & IOCQE_FLAG_CACHEHIT)
281 s->inflight -= reaped;
287 static void *submitter_fn(void *data)
289 struct submitter *s = data;
290 struct io_sq_ring *ring = &s->sq_ring;
293 printf("submitter=%d\n", gettid());
295 srand48_r(pthread_self(), &s->rand);
299 int to_wait, to_submit, this_reap, to_prep;
301 if (!prepped && s->inflight < DEPTH) {
302 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
303 prepped = prep_more_ios(s, to_prep);
305 s->inflight += prepped;
309 if (to_submit && (s->inflight + to_submit <= DEPTH))
312 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
315 * Only need to call io_uring_enter if we're not using SQ thread
316 * poll, or if IORING_SQ_NEED_WAKEUP is set.
318 if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
322 flags = IORING_ENTER_GETEVENTS;
323 if ((*ring->flags & IORING_SQ_NEED_WAKEUP))
324 flags |= IORING_ENTER_SQ_WAKEUP;
325 ret = io_uring_enter(s->ring_fd, to_submit, to_wait,
331 * For non SQ thread poll, we already got the events we needed
332 * through the io_uring_enter() above. For SQ thread poll, we
333 * need to loop here until we find enough events.
344 } while (sq_thread_poll && this_reap < to_wait);
345 s->reaps += this_reap;
353 } else if (ret < to_submit) {
354 int diff = to_submit - ret;
363 } else if (ret < 0) {
364 if (errno == EAGAIN) {
372 printf("io_submit: %s\n", strerror(errno));
375 } while (!s->finish);
381 static void sig_int(int sig)
383 printf("Exiting on signal %d\n", sig);
384 submitters[0].finish = 1;
388 static void arm_sig_int(void)
390 struct sigaction act;
392 memset(&act, 0, sizeof(act));
393 act.sa_handler = sig_int;
394 act.sa_flags = SA_RESTART;
395 sigaction(SIGINT, &act, NULL);
398 static int setup_ring(struct submitter *s)
400 struct io_sq_ring *sring = &s->sq_ring;
401 struct io_cq_ring *cring = &s->cq_ring;
402 struct io_uring_params p;
406 memset(&p, 0, sizeof(p));
408 if (polled && !do_nop)
409 p.flags |= IORING_SETUP_IOPOLL;
410 if (sq_thread_poll) {
411 p.flags |= IORING_SETUP_SQPOLL;
412 if (sq_thread_cpu != -1) {
413 p.flags |= IORING_SETUP_SQ_AFF;
414 p.sq_thread_cpu = sq_thread_cpu;
418 fd = io_uring_setup(DEPTH, &p);
420 perror("io_uring_setup");
426 ret = io_uring_register_buffers(s);
428 perror("io_uring_register_buffers");
433 if (register_files) {
434 ret = io_uring_register_files(s);
436 perror("io_uring_register_files");
441 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
442 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
444 printf("sq_ring ptr = 0x%p\n", ptr);
445 sring->head = ptr + p.sq_off.head;
446 sring->tail = ptr + p.sq_off.tail;
447 sring->ring_mask = ptr + p.sq_off.ring_mask;
448 sring->ring_entries = ptr + p.sq_off.ring_entries;
449 sring->flags = ptr + p.sq_off.flags;
450 sring->array = ptr + p.sq_off.array;
451 sq_ring_mask = *sring->ring_mask;
453 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
454 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
456 printf("sqes ptr = 0x%p\n", s->sqes);
458 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
459 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
461 printf("cq_ring ptr = 0x%p\n", ptr);
462 cring->head = ptr + p.cq_off.head;
463 cring->tail = ptr + p.cq_off.tail;
464 cring->ring_mask = ptr + p.cq_off.ring_mask;
465 cring->ring_entries = ptr + p.cq_off.ring_entries;
466 cring->cqes = ptr + p.cq_off.cqes;
467 cq_ring_mask = *cring->ring_mask;
471 static void file_depths(char *buf)
473 struct submitter *s = &submitters[0];
479 for (i = 0; i < s->nr_files; i++) {
480 struct file *f = &s->files[i];
482 if (i + 1 == s->nr_files)
483 p += sprintf(p, "%d", f->pending_ios);
485 p += sprintf(p, "%d, ", f->pending_ios);
489 int main(int argc, char *argv[])
491 struct submitter *s = &submitters[0];
492 unsigned long done, calls, reap, cache_hit, cache_miss;
493 int err, i, flags, fd;
497 if (!do_nop && argc < 2) {
498 printf("%s: filename\n", argv[0]);
502 flags = O_RDONLY | O_NOATIME;
507 while (!do_nop && i < argc) {
510 if (s->nr_files == MAX_FDS) {
511 printf("Max number of files (%d) reached\n", MAX_FDS);
514 fd = open(argv[i], flags);
520 f = &s->files[s->nr_files];
522 if (get_file_size(f)) {
523 printf("failed getting size of device/file\n");
526 if (f->max_blocks <= 1) {
527 printf("Zero file/device size?\n");
532 printf("Added file %s\n", argv[i]);
540 rlim.rlim_cur = RLIM_INFINITY;
541 rlim.rlim_max = RLIM_INFINITY;
542 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
550 for (i = 0; i < DEPTH; i++) {
553 if (posix_memalign(&buf, BS, BS)) {
554 printf("failed alloc\n");
557 s->iovecs[i].iov_base = buf;
558 s->iovecs[i].iov_len = BS;
563 printf("ring setup failed: %s, %d\n", strerror(errno), err);
566 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
567 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
569 pthread_create(&s->thread, NULL, submitter_fn, s);
571 fdepths = malloc(8 * s->nr_files);
572 cache_hit = cache_miss = reap = calls = done = 0;
574 unsigned long this_done = 0;
575 unsigned long this_reap = 0;
576 unsigned long this_call = 0;
577 unsigned long this_cache_hit = 0;
578 unsigned long this_cache_miss = 0;
579 unsigned long rpc = 0, ipc = 0;
583 this_done += s->done;
584 this_call += s->calls;
585 this_reap += s->reaps;
586 this_cache_hit += s->cachehit;
587 this_cache_miss += s->cachemiss;
588 if (this_cache_hit && this_cache_miss) {
589 unsigned long hits, total;
591 hits = this_cache_hit - cache_hit;
592 total = hits + this_cache_miss - cache_miss;
593 hit = (double) hits / (double) total;
596 if (this_call - calls) {
597 rpc = (this_done - done) / (this_call - calls);
598 ipc = (this_reap - reap) / (this_call - calls);
601 file_depths(fdepths);
602 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
603 this_done - done, rpc, ipc, s->inflight,
608 cache_hit = s->cachehit;
609 cache_miss = s->cachemiss;
612 pthread_join(s->thread, &ret);