]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
xen/pvcalls: implement poll command
authorStefano Stabellini <sstabellini@kernel.org>
Thu, 6 Jul 2017 18:01:07 +0000 (11:01 -0700)
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>
Thu, 31 Aug 2017 13:45:55 +0000 (09:45 -0400)
Implement poll on passive sockets by requesting a delayed response with
mappass->reqcopy, and reply back when there is data on the passive
socket.

Poll on active socket is unimplemented as by the spec, as the frontend
should just wait for events and check the indexes on the indexes page.

Only support one outstanding poll (or accept) request for every passive
socket at any given time.

[ boris: fixed long lines ]

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
drivers/xen/pvcalls-back.c

index 9a4bdc5e5d349bd2bd9b190de68d59ae1276e155..06bd95d4d552173550d3096be8f28e9553211f20 100644 (file)
@@ -355,11 +355,34 @@ static void __pvcalls_back_accept(struct work_struct *work)
 static void pvcalls_pass_sk_data_ready(struct sock *sock)
 {
        struct sockpass_mapping *mappass = sock->sk_user_data;
+       struct pvcalls_fedata *fedata;
+       struct xen_pvcalls_response *rsp;
+       unsigned long flags;
+       int notify;
 
        if (mappass == NULL)
                return;
 
-       queue_work(mappass->wq, &mappass->register_work);
+       fedata = mappass->fedata;
+       spin_lock_irqsave(&mappass->copy_lock, flags);
+       if (mappass->reqcopy.cmd == PVCALLS_POLL) {
+               rsp = RING_GET_RESPONSE(&fedata->ring,
+                                       fedata->ring.rsp_prod_pvt++);
+               rsp->req_id = mappass->reqcopy.req_id;
+               rsp->u.poll.id = mappass->reqcopy.u.poll.id;
+               rsp->cmd = mappass->reqcopy.cmd;
+               rsp->ret = 0;
+
+               mappass->reqcopy.cmd = 0;
+               spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
+               if (notify)
+                       notify_remote_via_irq(mappass->fedata->irq);
+       } else {
+               spin_unlock_irqrestore(&mappass->copy_lock, flags);
+               queue_work(mappass->wq, &mappass->register_work);
+       }
 }
 
 static int pvcalls_back_bind(struct xenbus_device *dev,
@@ -502,6 +525,56 @@ static int pvcalls_back_accept(struct xenbus_device *dev,
 static int pvcalls_back_poll(struct xenbus_device *dev,
                             struct xen_pvcalls_request *req)
 {
+       struct pvcalls_fedata *fedata;
+       struct sockpass_mapping *mappass;
+       struct xen_pvcalls_response *rsp;
+       struct inet_connection_sock *icsk;
+       struct request_sock_queue *queue;
+       unsigned long flags;
+       int ret;
+       bool data;
+
+       fedata = dev_get_drvdata(&dev->dev);
+
+       down(&fedata->socket_lock);
+       mappass = radix_tree_lookup(&fedata->socketpass_mappings,
+                                   req->u.poll.id);
+       up(&fedata->socket_lock);
+       if (mappass == NULL)
+               return -EINVAL;
+
+       /*
+        * Limitation of the current implementation: only support one
+        * concurrent accept or poll call on one socket.
+        */
+       spin_lock_irqsave(&mappass->copy_lock, flags);
+       if (mappass->reqcopy.cmd != 0) {
+               ret = -EINTR;
+               goto out;
+       }
+
+       mappass->reqcopy = *req;
+       icsk = inet_csk(mappass->sock->sk);
+       queue = &icsk->icsk_accept_queue;
+       data = queue->rskq_accept_head != NULL;
+       if (data) {
+               mappass->reqcopy.cmd = 0;
+               ret = 0;
+               goto out;
+       }
+       spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+       /* Tell the caller we don't need to send back a notification yet */
+       return -1;
+
+out:
+       spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+       rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+       rsp->req_id = req->req_id;
+       rsp->cmd = req->cmd;
+       rsp->u.poll.id = req->u.poll.id;
+       rsp->ret = ret;
        return 0;
 }