1 // SPDX-License-Identifier: GPL-2.0-only
3 * vhost transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
30 #define VHOST_VSOCK_PKT_WEIGHT 256
33 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
36 /* Used to track all the vhost_vsock instances on the system. */
37 static DEFINE_MUTEX(vhost_vsock_mutex);
38 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
42 struct vhost_virtqueue vqs[2];
44 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
45 struct hlist_node hash;
47 struct vhost_work send_pkt_work;
48 spinlock_t send_pkt_list_lock;
49 struct list_head send_pkt_list; /* host->guest pending packets */
51 atomic_t queued_replies;
56 static u32 vhost_transport_get_local_cid(void)
58 return VHOST_VSOCK_DEFAULT_HOST_CID;
61 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
64 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
66 struct vhost_vsock *vsock;
68 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
69 u32 other_cid = vsock->guest_cid;
71 /* Skip instances that have no CID yet */
75 if (other_cid == guest_cid)
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 struct vhost_virtqueue *vq)
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
88 int pkts = 0, total_len = 0;
90 bool restart_tx = false;
92 mutex_lock(&vq->mutex);
94 if (!vq->private_data)
97 /* Avoid further vmexits, we're already processing the virtqueue */
98 vhost_disable_notify(&vsock->dev, vq);
101 struct virtio_vsock_pkt *pkt;
102 struct iov_iter iov_iter;
105 size_t iov_len, payload_len;
108 spin_lock_bh(&vsock->send_pkt_list_lock);
109 if (list_empty(&vsock->send_pkt_list)) {
110 spin_unlock_bh(&vsock->send_pkt_list_lock);
111 vhost_enable_notify(&vsock->dev, vq);
115 pkt = list_first_entry(&vsock->send_pkt_list,
116 struct virtio_vsock_pkt, list);
117 list_del_init(&pkt->list);
118 spin_unlock_bh(&vsock->send_pkt_list_lock);
120 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
121 &out, &in, NULL, NULL);
123 spin_lock_bh(&vsock->send_pkt_list_lock);
124 list_add(&pkt->list, &vsock->send_pkt_list);
125 spin_unlock_bh(&vsock->send_pkt_list_lock);
129 if (head == vq->num) {
130 spin_lock_bh(&vsock->send_pkt_list_lock);
131 list_add(&pkt->list, &vsock->send_pkt_list);
132 spin_unlock_bh(&vsock->send_pkt_list_lock);
134 /* We cannot finish yet if more buffers snuck in while
135 * re-enabling notify.
137 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
138 vhost_disable_notify(&vsock->dev, vq);
145 virtio_transport_free_pkt(pkt);
146 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
150 iov_len = iov_length(&vq->iov[out], in);
151 if (iov_len < sizeof(pkt->hdr)) {
152 virtio_transport_free_pkt(pkt);
153 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
157 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
158 payload_len = pkt->len - pkt->off;
160 /* If the packet is greater than the space available in the
161 * buffer, we split it using multiple buffers.
163 if (payload_len > iov_len - sizeof(pkt->hdr))
164 payload_len = iov_len - sizeof(pkt->hdr);
166 /* Set the correct length in the header */
167 pkt->hdr.len = cpu_to_le32(payload_len);
169 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
170 if (nbytes != sizeof(pkt->hdr)) {
171 virtio_transport_free_pkt(pkt);
172 vq_err(vq, "Faulted on copying pkt hdr\n");
176 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
178 if (nbytes != payload_len) {
179 virtio_transport_free_pkt(pkt);
180 vq_err(vq, "Faulted on copying pkt buf\n");
184 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
187 /* Deliver to monitoring devices all correctly transmitted
190 virtio_transport_deliver_tap_pkt(pkt);
192 pkt->off += payload_len;
193 total_len += payload_len;
195 /* If we didn't send all the payload we can requeue the packet
196 * to send it with the next available buffer.
198 if (pkt->off < pkt->len) {
199 spin_lock_bh(&vsock->send_pkt_list_lock);
200 list_add(&pkt->list, &vsock->send_pkt_list);
201 spin_unlock_bh(&vsock->send_pkt_list_lock);
206 val = atomic_dec_return(&vsock->queued_replies);
208 /* Do we have resources to resume tx
211 if (val + 1 == tx_vq->num)
215 virtio_transport_free_pkt(pkt);
217 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
219 vhost_signal(&vsock->dev, vq);
222 mutex_unlock(&vq->mutex);
225 vhost_poll_queue(&tx_vq->poll);
228 static void vhost_transport_send_pkt_work(struct vhost_work *work)
230 struct vhost_virtqueue *vq;
231 struct vhost_vsock *vsock;
233 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
234 vq = &vsock->vqs[VSOCK_VQ_RX];
236 vhost_transport_do_send_pkt(vsock, vq);
240 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
242 struct vhost_vsock *vsock;
247 /* Find the vhost_vsock according to guest context id */
248 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
251 virtio_transport_free_pkt(pkt);
256 atomic_inc(&vsock->queued_replies);
258 spin_lock_bh(&vsock->send_pkt_list_lock);
259 list_add_tail(&pkt->list, &vsock->send_pkt_list);
260 spin_unlock_bh(&vsock->send_pkt_list_lock);
262 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
269 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
271 struct vhost_vsock *vsock;
272 struct virtio_vsock_pkt *pkt, *n;
279 /* Find the vhost_vsock according to guest context id */
280 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
284 spin_lock_bh(&vsock->send_pkt_list_lock);
285 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
288 list_move(&pkt->list, &freeme);
290 spin_unlock_bh(&vsock->send_pkt_list_lock);
292 list_for_each_entry_safe(pkt, n, &freeme, list) {
295 list_del(&pkt->list);
296 virtio_transport_free_pkt(pkt);
300 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
303 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
304 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
305 vhost_poll_queue(&tx_vq->poll);
314 static struct virtio_vsock_pkt *
315 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
316 unsigned int out, unsigned int in)
318 struct virtio_vsock_pkt *pkt;
319 struct iov_iter iov_iter;
324 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
328 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
332 len = iov_length(vq->iov, out);
333 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
335 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
336 if (nbytes != sizeof(pkt->hdr)) {
337 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
338 sizeof(pkt->hdr), nbytes);
343 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
344 pkt->len = le32_to_cpu(pkt->hdr.len);
350 /* The pkt is too big */
351 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
356 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
362 pkt->buf_len = pkt->len;
364 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
365 if (nbytes != pkt->len) {
366 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
368 virtio_transport_free_pkt(pkt);
375 /* Is there space left for replies to rx packets? */
376 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
378 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
381 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
382 val = atomic_read(&vsock->queued_replies);
384 return val < vq->num;
387 static struct virtio_transport vhost_transport = {
389 .module = THIS_MODULE,
391 .get_local_cid = vhost_transport_get_local_cid,
393 .init = virtio_transport_do_socket_init,
394 .destruct = virtio_transport_destruct,
395 .release = virtio_transport_release,
396 .connect = virtio_transport_connect,
397 .shutdown = virtio_transport_shutdown,
398 .cancel_pkt = vhost_transport_cancel_pkt,
400 .dgram_enqueue = virtio_transport_dgram_enqueue,
401 .dgram_dequeue = virtio_transport_dgram_dequeue,
402 .dgram_bind = virtio_transport_dgram_bind,
403 .dgram_allow = virtio_transport_dgram_allow,
405 .stream_enqueue = virtio_transport_stream_enqueue,
406 .stream_dequeue = virtio_transport_stream_dequeue,
407 .stream_has_data = virtio_transport_stream_has_data,
408 .stream_has_space = virtio_transport_stream_has_space,
409 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
410 .stream_is_active = virtio_transport_stream_is_active,
411 .stream_allow = virtio_transport_stream_allow,
413 .notify_poll_in = virtio_transport_notify_poll_in,
414 .notify_poll_out = virtio_transport_notify_poll_out,
415 .notify_recv_init = virtio_transport_notify_recv_init,
416 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
417 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
418 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
419 .notify_send_init = virtio_transport_notify_send_init,
420 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
421 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
422 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
423 .notify_buffer_size = virtio_transport_notify_buffer_size,
427 .send_pkt = vhost_transport_send_pkt,
430 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
432 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
434 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
436 struct virtio_vsock_pkt *pkt;
437 int head, pkts = 0, total_len = 0;
438 unsigned int out, in;
441 mutex_lock(&vq->mutex);
443 if (!vq->private_data)
446 vhost_disable_notify(&vsock->dev, vq);
450 if (!vhost_vsock_more_replies(vsock)) {
451 /* Stop tx until the device processes already
452 * pending replies. Leave tx virtqueue
453 * callbacks disabled.
455 goto no_more_replies;
458 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
459 &out, &in, NULL, NULL);
463 if (head == vq->num) {
464 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
465 vhost_disable_notify(&vsock->dev, vq);
471 pkt = vhost_vsock_alloc_pkt(vq, out, in);
473 vq_err(vq, "Faulted on pkt\n");
479 /* Deliver to monitoring devices all received packets */
480 virtio_transport_deliver_tap_pkt(pkt);
482 /* Only accept correctly addressed packets */
483 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
484 le64_to_cpu(pkt->hdr.dst_cid) ==
485 vhost_transport_get_local_cid())
486 virtio_transport_recv_pkt(&vhost_transport, pkt);
488 virtio_transport_free_pkt(pkt);
490 len += sizeof(pkt->hdr);
491 vhost_add_used(vq, head, len);
494 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
498 vhost_signal(&vsock->dev, vq);
501 mutex_unlock(&vq->mutex);
504 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
506 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
508 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
511 vhost_transport_do_send_pkt(vsock, vq);
514 static int vhost_vsock_start(struct vhost_vsock *vsock)
516 struct vhost_virtqueue *vq;
520 mutex_lock(&vsock->dev.mutex);
522 ret = vhost_dev_check_owner(&vsock->dev);
526 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
529 mutex_lock(&vq->mutex);
531 if (!vhost_vq_access_ok(vq)) {
536 if (!vq->private_data) {
537 vq->private_data = vsock;
538 ret = vhost_vq_init_access(vq);
543 mutex_unlock(&vq->mutex);
546 mutex_unlock(&vsock->dev.mutex);
550 vq->private_data = NULL;
551 mutex_unlock(&vq->mutex);
553 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
556 mutex_lock(&vq->mutex);
557 vq->private_data = NULL;
558 mutex_unlock(&vq->mutex);
561 mutex_unlock(&vsock->dev.mutex);
565 static int vhost_vsock_stop(struct vhost_vsock *vsock)
570 mutex_lock(&vsock->dev.mutex);
572 ret = vhost_dev_check_owner(&vsock->dev);
576 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
577 struct vhost_virtqueue *vq = &vsock->vqs[i];
579 mutex_lock(&vq->mutex);
580 vq->private_data = NULL;
581 mutex_unlock(&vq->mutex);
585 mutex_unlock(&vsock->dev.mutex);
589 static void vhost_vsock_free(struct vhost_vsock *vsock)
594 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
596 struct vhost_virtqueue **vqs;
597 struct vhost_vsock *vsock;
600 /* This struct is large and allocation could fail, fall back to vmalloc
601 * if there is no other way.
603 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
607 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
613 vsock->guest_cid = 0; /* no CID assigned yet */
615 atomic_set(&vsock->queued_replies, 0);
617 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
618 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
619 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
620 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
622 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
623 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
626 file->private_data = vsock;
627 spin_lock_init(&vsock->send_pkt_list_lock);
628 INIT_LIST_HEAD(&vsock->send_pkt_list);
629 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
633 vhost_vsock_free(vsock);
637 static void vhost_vsock_flush(struct vhost_vsock *vsock)
641 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
642 if (vsock->vqs[i].handle_kick)
643 vhost_poll_flush(&vsock->vqs[i].poll);
644 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
647 static void vhost_vsock_reset_orphans(struct sock *sk)
649 struct vsock_sock *vsk = vsock_sk(sk);
651 /* vmci_transport.c doesn't take sk_lock here either. At least we're
652 * under vsock_table_lock so the sock cannot disappear while we're
656 /* If the peer is still valid, no need to reset connection */
657 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
660 /* If the close timeout is pending, let it expire. This avoids races
661 * with the timeout callback.
663 if (vsk->close_work_scheduled)
666 sock_set_flag(sk, SOCK_DONE);
667 vsk->peer_shutdown = SHUTDOWN_MASK;
668 sk->sk_state = SS_UNCONNECTED;
669 sk->sk_err = ECONNRESET;
670 sk->sk_error_report(sk);
673 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
675 struct vhost_vsock *vsock = file->private_data;
677 mutex_lock(&vhost_vsock_mutex);
678 if (vsock->guest_cid)
679 hash_del_rcu(&vsock->hash);
680 mutex_unlock(&vhost_vsock_mutex);
682 /* Wait for other CPUs to finish using vsock */
685 /* Iterating over all connections for all CIDs to find orphans is
686 * inefficient. Room for improvement here. */
687 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
689 vhost_vsock_stop(vsock);
690 vhost_vsock_flush(vsock);
691 vhost_dev_stop(&vsock->dev);
693 spin_lock_bh(&vsock->send_pkt_list_lock);
694 while (!list_empty(&vsock->send_pkt_list)) {
695 struct virtio_vsock_pkt *pkt;
697 pkt = list_first_entry(&vsock->send_pkt_list,
698 struct virtio_vsock_pkt, list);
699 list_del_init(&pkt->list);
700 virtio_transport_free_pkt(pkt);
702 spin_unlock_bh(&vsock->send_pkt_list_lock);
704 vhost_dev_cleanup(&vsock->dev);
705 kfree(vsock->dev.vqs);
706 vhost_vsock_free(vsock);
710 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
712 struct vhost_vsock *other;
714 /* Refuse reserved CIDs */
715 if (guest_cid <= VMADDR_CID_HOST ||
716 guest_cid == U32_MAX)
719 /* 64-bit CIDs are not yet supported */
720 if (guest_cid > U32_MAX)
723 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
724 * VM), to make the loopback work.
726 if (vsock_find_cid(guest_cid))
729 /* Refuse if CID is already in use */
730 mutex_lock(&vhost_vsock_mutex);
731 other = vhost_vsock_get(guest_cid);
732 if (other && other != vsock) {
733 mutex_unlock(&vhost_vsock_mutex);
737 if (vsock->guest_cid)
738 hash_del_rcu(&vsock->hash);
740 vsock->guest_cid = guest_cid;
741 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
742 mutex_unlock(&vhost_vsock_mutex);
747 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
749 struct vhost_virtqueue *vq;
752 if (features & ~VHOST_VSOCK_FEATURES)
755 mutex_lock(&vsock->dev.mutex);
756 if ((features & (1 << VHOST_F_LOG_ALL)) &&
757 !vhost_log_access_ok(&vsock->dev)) {
758 mutex_unlock(&vsock->dev.mutex);
762 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
764 mutex_lock(&vq->mutex);
765 vq->acked_features = features;
766 mutex_unlock(&vq->mutex);
768 mutex_unlock(&vsock->dev.mutex);
772 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
775 struct vhost_vsock *vsock = f->private_data;
776 void __user *argp = (void __user *)arg;
783 case VHOST_VSOCK_SET_GUEST_CID:
784 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
786 return vhost_vsock_set_cid(vsock, guest_cid);
787 case VHOST_VSOCK_SET_RUNNING:
788 if (copy_from_user(&start, argp, sizeof(start)))
791 return vhost_vsock_start(vsock);
793 return vhost_vsock_stop(vsock);
794 case VHOST_GET_FEATURES:
795 features = VHOST_VSOCK_FEATURES;
796 if (copy_to_user(argp, &features, sizeof(features)))
799 case VHOST_SET_FEATURES:
800 if (copy_from_user(&features, argp, sizeof(features)))
802 return vhost_vsock_set_features(vsock, features);
804 mutex_lock(&vsock->dev.mutex);
805 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
806 if (r == -ENOIOCTLCMD)
807 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
809 vhost_vsock_flush(vsock);
810 mutex_unlock(&vsock->dev.mutex);
815 static const struct file_operations vhost_vsock_fops = {
816 .owner = THIS_MODULE,
817 .open = vhost_vsock_dev_open,
818 .release = vhost_vsock_dev_release,
819 .llseek = noop_llseek,
820 .unlocked_ioctl = vhost_vsock_dev_ioctl,
821 .compat_ioctl = compat_ptr_ioctl,
824 static struct miscdevice vhost_vsock_misc = {
825 .minor = VHOST_VSOCK_MINOR,
826 .name = "vhost-vsock",
827 .fops = &vhost_vsock_fops,
830 static int __init vhost_vsock_init(void)
834 ret = vsock_core_register(&vhost_transport.transport,
835 VSOCK_TRANSPORT_F_H2G);
838 return misc_register(&vhost_vsock_misc);
841 static void __exit vhost_vsock_exit(void)
843 misc_deregister(&vhost_vsock_misc);
844 vsock_core_unregister(&vhost_transport.transport);
847 module_init(vhost_vsock_init);
848 module_exit(vhost_vsock_exit);
849 MODULE_LICENSE("GPL v2");
850 MODULE_AUTHOR("Asias He");
851 MODULE_DESCRIPTION("vhost transport for vsock ");
852 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
853 MODULE_ALIAS("devname:vhost-vsock");