2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/hyperv.h>
34 #include "hyperv_vmbus.h"
36 struct vmbus_rescind_work {
37 struct work_struct work;
38 struct vmbus_channel *channel;
42 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
43 * @icmsghdrp: Pointer to msg header structure
44 * @icmsg_negotiate: Pointer to negotiate message structure
45 * @buf: Raw buffer channel data
47 * @icmsghdrp is of type &struct icmsg_hdr.
48 * @negop is of type &struct icmsg_negotiate.
49 * Set up and fill in default negotiate response message.
51 * The fw_version specifies the framework version that
52 * we can support and srv_version specifies the service
53 * version we can support.
55 * Mainly used by Hyper-V drivers.
57 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
58 struct icmsg_negotiate *negop, u8 *buf,
59 int fw_version, int srv_version)
61 int icframe_major, icframe_minor;
62 int icmsg_major, icmsg_minor;
63 int fw_major, fw_minor;
64 int srv_major, srv_minor;
66 bool found_match = false;
68 icmsghdrp->icmsgsize = 0x10;
69 fw_major = (fw_version >> 16);
70 fw_minor = (fw_version & 0xFFFF);
72 srv_major = (srv_version >> 16);
73 srv_minor = (srv_version & 0xFFFF);
75 negop = (struct icmsg_negotiate *)&buf[
76 sizeof(struct vmbuspipe_hdr) +
77 sizeof(struct icmsg_hdr)];
79 icframe_major = negop->icframe_vercnt;
82 icmsg_major = negop->icmsg_vercnt;
86 * Select the framework version number we will
90 for (i = 0; i < negop->icframe_vercnt; i++) {
91 if ((negop->icversion_data[i].major == fw_major) &&
92 (negop->icversion_data[i].minor == fw_minor)) {
93 icframe_major = negop->icversion_data[i].major;
94 icframe_minor = negop->icversion_data[i].minor;
104 for (i = negop->icframe_vercnt;
105 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
106 if ((negop->icversion_data[i].major == srv_major) &&
107 (negop->icversion_data[i].minor == srv_minor)) {
108 icmsg_major = negop->icversion_data[i].major;
109 icmsg_minor = negop->icversion_data[i].minor;
115 * Respond with the framework and service
116 * version numbers we can support.
121 negop->icframe_vercnt = 0;
122 negop->icmsg_vercnt = 0;
124 negop->icframe_vercnt = 1;
125 negop->icmsg_vercnt = 1;
128 negop->icversion_data[0].major = icframe_major;
129 negop->icversion_data[0].minor = icframe_minor;
130 negop->icversion_data[1].major = icmsg_major;
131 negop->icversion_data[1].minor = icmsg_minor;
135 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
137 static void vmbus_sc_creation_cb(struct work_struct *work)
139 struct vmbus_channel *newchannel = container_of(work,
140 struct vmbus_channel,
142 struct vmbus_channel *primary_channel = newchannel->primary_channel;
145 * On entry sc_creation_callback has been already verified to
148 primary_channel->sc_creation_callback(newchannel);
152 * alloc_channel - Allocate and initialize a vmbus channel object
154 static struct vmbus_channel *alloc_channel(void)
156 static atomic_t chan_num = ATOMIC_INIT(0);
157 struct vmbus_channel *channel;
159 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
163 channel->id = atomic_inc_return(&chan_num);
164 spin_lock_init(&channel->inbound_lock);
165 spin_lock_init(&channel->lock);
167 INIT_LIST_HEAD(&channel->sc_list);
168 INIT_LIST_HEAD(&channel->percpu_list);
170 channel->controlwq = alloc_workqueue("hv_vmbus_ctl/%d", WQ_MEM_RECLAIM,
172 if (!channel->controlwq) {
181 * release_hannel - Release the vmbus channel object itself
183 static void release_channel(struct work_struct *work)
185 struct vmbus_channel *channel = container_of(work,
186 struct vmbus_channel,
189 destroy_workqueue(channel->controlwq);
195 * free_channel - Release the resources used by the vmbus channel object
197 static void free_channel(struct vmbus_channel *channel)
201 * We have to release the channel's workqueue/thread in the vmbus's
202 * workqueue/thread context
203 * ie we can't destroy ourselves.
205 INIT_WORK(&channel->work, release_channel);
206 queue_work(vmbus_connection.work_queue, &channel->work);
209 static void process_rescind_fn(struct work_struct *work)
211 struct vmbus_rescind_work *rc_work;
212 struct vmbus_channel *channel;
215 rc_work = container_of(work, struct vmbus_rescind_work, work);
216 channel = rc_work->channel;
219 * We have already acquired a reference on the channel
220 * and so it cannot vanish underneath us.
221 * It is possible (while very unlikely) that we may
222 * get here while the processing of the initial offer
223 * is still not complete. Deal with this situation by
224 * just waiting until the channel is in the correct state.
227 while (channel->work.func != release_channel)
230 if (channel->device_obj) {
231 dev = get_device(&channel->device_obj->device);
233 vmbus_device_unregister(channel->device_obj);
237 hv_process_channel_removal(channel,
238 channel->offermsg.child_relid);
243 static void percpu_channel_enq(void *arg)
245 struct vmbus_channel *channel = arg;
246 int cpu = smp_processor_id();
248 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
251 static void percpu_channel_deq(void *arg)
253 struct vmbus_channel *channel = arg;
255 list_del(&channel->percpu_list);
259 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
261 struct vmbus_channel_relid_released msg;
263 struct vmbus_channel *primary_channel;
265 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
266 msg.child_relid = relid;
267 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
268 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
273 if (channel->target_cpu != get_cpu()) {
275 smp_call_function_single(channel->target_cpu,
276 percpu_channel_deq, channel, true);
278 percpu_channel_deq(channel);
282 if (channel->primary_channel == NULL) {
283 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
284 list_del(&channel->listentry);
285 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
287 primary_channel = channel->primary_channel;
288 spin_lock_irqsave(&primary_channel->lock, flags);
289 list_del(&channel->sc_list);
290 spin_unlock_irqrestore(&primary_channel->lock, flags);
292 free_channel(channel);
295 void vmbus_free_channels(void)
297 struct vmbus_channel *channel;
299 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
300 vmbus_device_unregister(channel->device_obj);
301 free_channel(channel);
305 static void vmbus_do_device_register(struct work_struct *work)
307 struct hv_device *device_obj;
310 struct vmbus_channel *newchannel = container_of(work,
311 struct vmbus_channel,
314 ret = vmbus_device_register(newchannel->device_obj);
316 pr_err("unable to add child device object (relid %d)\n",
317 newchannel->offermsg.child_relid);
318 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
319 list_del(&newchannel->listentry);
320 device_obj = newchannel->device_obj;
321 newchannel->device_obj = NULL;
322 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
324 if (newchannel->target_cpu != get_cpu()) {
326 smp_call_function_single(newchannel->target_cpu,
327 percpu_channel_deq, newchannel, true);
329 percpu_channel_deq(newchannel);
334 if (!newchannel->rescind) {
335 free_channel(newchannel);
340 * The next state for this channel is to be freed.
342 INIT_WORK(&newchannel->work, release_channel);
346 * vmbus_process_offer - Process the offer by creating a channel/device
347 * associated with this offer
349 static void vmbus_process_offer(struct vmbus_channel *newchannel)
351 struct vmbus_channel *channel;
356 /* Make sure this is a new offer */
357 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
359 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
360 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
361 newchannel->offermsg.offer.if_type) &&
362 !uuid_le_cmp(channel->offermsg.offer.if_instance,
363 newchannel->offermsg.offer.if_instance)) {
370 list_add_tail(&newchannel->listentry,
371 &vmbus_connection.chn_list);
375 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
378 if (newchannel->target_cpu != get_cpu()) {
380 smp_call_function_single(newchannel->target_cpu,
384 percpu_channel_enq(newchannel);
390 * Check to see if this is a sub-channel.
392 if (newchannel->offermsg.offer.sub_channel_index != 0) {
394 * Process the sub-channel.
396 newchannel->primary_channel = channel;
397 spin_lock_irqsave(&channel->lock, flags);
398 list_add_tail(&newchannel->sc_list, &channel->sc_list);
399 spin_unlock_irqrestore(&channel->lock, flags);
401 if (newchannel->target_cpu != get_cpu()) {
403 smp_call_function_single(newchannel->target_cpu,
407 percpu_channel_enq(newchannel);
411 newchannel->state = CHANNEL_OPEN_STATE;
413 if (channel->sc_creation_callback != NULL) {
415 * We need to invoke the sub-channel creation
416 * callback; invoke this in a seperate work
417 * context since we are currently running on
418 * the global work context in which we handle
419 * messages from the host.
421 INIT_WORK(&newchannel->work,
422 vmbus_sc_creation_cb);
423 queue_work(newchannel->controlwq,
434 * This state is used to indicate a successful open
435 * so that when we do close the channel normally, we
436 * can cleanup properly
438 newchannel->state = CHANNEL_OPEN_STATE;
441 * Start the process of binding this offer to the driver
442 * We need to set the DeviceObject field before calling
443 * vmbus_child_dev_add()
445 newchannel->device_obj = vmbus_device_create(
446 &newchannel->offermsg.offer.if_type,
447 &newchannel->offermsg.offer.if_instance,
449 if (!newchannel->device_obj)
453 * Add the new device to the bus. This will kick off device-driver
454 * binding which eventually invokes the device driver's AddDevice()
456 * Invoke this call on the per-channel work context.
457 * Until we return from this function, rescind offer message
458 * cannot be processed as we are running on the global message
461 INIT_WORK(&newchannel->work, vmbus_do_device_register);
462 queue_work(newchannel->controlwq, &newchannel->work);
466 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
467 list_del(&newchannel->listentry);
468 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
470 if (newchannel->target_cpu != get_cpu()) {
472 smp_call_function_single(newchannel->target_cpu,
473 percpu_channel_deq, newchannel, true);
475 percpu_channel_deq(newchannel);
480 free_channel(newchannel);
491 * This is an array of device_ids (device types) that are performance critical.
492 * We attempt to distribute the interrupt load for these devices across
493 * all available CPUs.
495 static const struct hv_vmbus_device_id hp_devs[] = {
502 /* NetworkDirect Guest RDMA */
508 * We use this state to statically distribute the channel interrupt load.
513 * Starting with Win8, we can statically distribute the incoming
514 * channel interrupt load by binding a channel to VCPU. We
515 * implement here a simple round robin scheme for distributing
516 * the interrupt load.
517 * We will bind channels that are not performance critical to cpu 0 and
518 * performance critical channels (IDE, SCSI and Network) will be uniformly
519 * distributed across all available CPUs.
521 static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
525 bool perf_chn = false;
526 u32 max_cpus = num_online_cpus();
528 for (i = IDE; i < MAX_PERF_CHN; i++) {
529 if (!memcmp(type_guid->b, hp_devs[i].guid,
535 if ((vmbus_proto_version == VERSION_WS2008) ||
536 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
538 * Prior to win8, all channel interrupts are
539 * delivered on cpu 0.
540 * Also if the channel is not a performance critical
541 * channel, bind it to cpu 0.
543 channel->target_cpu = 0;
544 channel->target_vp = 0;
547 cur_cpu = (++next_vp % max_cpus);
548 channel->target_cpu = cur_cpu;
549 channel->target_vp = hv_context.vp_index[cur_cpu];
553 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
556 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
558 struct vmbus_channel_offer_channel *offer;
559 struct vmbus_channel *newchannel;
561 offer = (struct vmbus_channel_offer_channel *)hdr;
563 /* Allocate the channel object and save this offer. */
564 newchannel = alloc_channel();
566 pr_err("Unable to allocate channel object\n");
571 * By default we setup state to enable batched
572 * reading. A specific service can choose to
573 * disable this prior to opening the channel.
575 newchannel->batched_reading = true;
578 * Setup state for signalling the host.
580 newchannel->sig_event = (struct hv_input_signal_event *)
581 (ALIGN((unsigned long)
582 &newchannel->sig_buf,
583 HV_HYPERCALL_PARAM_ALIGN));
585 newchannel->sig_event->connectionid.asu32 = 0;
586 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
587 newchannel->sig_event->flag_number = 0;
588 newchannel->sig_event->rsvdz = 0;
590 if (vmbus_proto_version != VERSION_WS2008) {
591 newchannel->is_dedicated_interrupt =
592 (offer->is_dedicated_interrupt != 0);
593 newchannel->sig_event->connectionid.u.id =
594 offer->connection_id;
597 init_vp_index(newchannel, &offer->offer.if_type);
599 memcpy(&newchannel->offermsg, offer,
600 sizeof(struct vmbus_channel_offer_channel));
601 newchannel->monitor_grp = (u8)offer->monitorid / 32;
602 newchannel->monitor_bit = (u8)offer->monitorid % 32;
604 vmbus_process_offer(newchannel);
608 * vmbus_onoffer_rescind - Rescind offer handler.
610 * We queue a work item to process this offer synchronously
612 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
614 struct vmbus_channel_rescind_offer *rescind;
615 struct vmbus_channel *channel;
616 struct vmbus_rescind_work *rc_work;
618 rescind = (struct vmbus_channel_rescind_offer *)hdr;
619 channel = relid2channel(rescind->child_relid, true);
621 if (channel == NULL) {
622 hv_process_channel_removal(NULL, rescind->child_relid);
627 * We have acquired a reference on the channel and have posted
628 * the rescind state. Perform further cleanup in a work context
629 * that is different from the global work context in which
630 * we process messages from the host (we are currently executing
631 * on that global context.
633 rc_work = kzalloc(sizeof(struct vmbus_rescind_work), GFP_KERNEL);
635 pr_err("Unable to allocate memory for rescind processing ");
638 rc_work->channel = channel;
639 INIT_WORK(&rc_work->work, process_rescind_fn);
640 schedule_work(&rc_work->work);
644 * vmbus_onoffers_delivered -
645 * This is invoked when all offers have been delivered.
647 * Nothing to do here.
649 static void vmbus_onoffers_delivered(
650 struct vmbus_channel_message_header *hdr)
655 * vmbus_onopen_result - Open result handler.
657 * This is invoked when we received a response to our channel open request.
658 * Find the matching request, copy the response and signal the requesting
661 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
663 struct vmbus_channel_open_result *result;
664 struct vmbus_channel_msginfo *msginfo;
665 struct vmbus_channel_message_header *requestheader;
666 struct vmbus_channel_open_channel *openmsg;
669 result = (struct vmbus_channel_open_result *)hdr;
672 * Find the open msg, copy the result and signal/unblock the wait event
674 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
676 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
679 (struct vmbus_channel_message_header *)msginfo->msg;
681 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
683 (struct vmbus_channel_open_channel *)msginfo->msg;
684 if (openmsg->child_relid == result->child_relid &&
685 openmsg->openid == result->openid) {
686 memcpy(&msginfo->response.open_result,
689 struct vmbus_channel_open_result));
690 complete(&msginfo->waitevent);
695 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
699 * vmbus_ongpadl_created - GPADL created handler.
701 * This is invoked when we received a response to our gpadl create request.
702 * Find the matching request, copy the response and signal the requesting
705 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
707 struct vmbus_channel_gpadl_created *gpadlcreated;
708 struct vmbus_channel_msginfo *msginfo;
709 struct vmbus_channel_message_header *requestheader;
710 struct vmbus_channel_gpadl_header *gpadlheader;
713 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
716 * Find the establish msg, copy the result and signal/unblock the wait
719 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
721 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
724 (struct vmbus_channel_message_header *)msginfo->msg;
726 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
728 (struct vmbus_channel_gpadl_header *)requestheader;
730 if ((gpadlcreated->child_relid ==
731 gpadlheader->child_relid) &&
732 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
733 memcpy(&msginfo->response.gpadl_created,
736 struct vmbus_channel_gpadl_created));
737 complete(&msginfo->waitevent);
742 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
746 * vmbus_ongpadl_torndown - GPADL torndown handler.
748 * This is invoked when we received a response to our gpadl teardown request.
749 * Find the matching request, copy the response and signal the requesting
752 static void vmbus_ongpadl_torndown(
753 struct vmbus_channel_message_header *hdr)
755 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
756 struct vmbus_channel_msginfo *msginfo;
757 struct vmbus_channel_message_header *requestheader;
758 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
761 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
764 * Find the open msg, copy the result and signal/unblock the wait event
766 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
768 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
771 (struct vmbus_channel_message_header *)msginfo->msg;
773 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
775 (struct vmbus_channel_gpadl_teardown *)requestheader;
777 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
778 memcpy(&msginfo->response.gpadl_torndown,
781 struct vmbus_channel_gpadl_torndown));
782 complete(&msginfo->waitevent);
787 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
791 * vmbus_onversion_response - Version response handler
793 * This is invoked when we received a response to our initiate contact request.
794 * Find the matching request, copy the response and signal the requesting
797 static void vmbus_onversion_response(
798 struct vmbus_channel_message_header *hdr)
800 struct vmbus_channel_msginfo *msginfo;
801 struct vmbus_channel_message_header *requestheader;
802 struct vmbus_channel_version_response *version_response;
805 version_response = (struct vmbus_channel_version_response *)hdr;
806 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
808 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
811 (struct vmbus_channel_message_header *)msginfo->msg;
813 if (requestheader->msgtype ==
814 CHANNELMSG_INITIATE_CONTACT) {
815 memcpy(&msginfo->response.version_response,
817 sizeof(struct vmbus_channel_version_response));
818 complete(&msginfo->waitevent);
821 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
824 /* Channel message dispatch table */
825 struct vmbus_channel_message_table_entry
826 channel_message_table[CHANNELMSG_COUNT] = {
827 {CHANNELMSG_INVALID, 0, NULL},
828 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
829 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
830 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
831 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
832 {CHANNELMSG_OPENCHANNEL, 0, NULL},
833 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
834 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
835 {CHANNELMSG_GPADL_HEADER, 0, NULL},
836 {CHANNELMSG_GPADL_BODY, 0, NULL},
837 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
838 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
839 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
840 {CHANNELMSG_RELID_RELEASED, 0, NULL},
841 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
842 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
843 {CHANNELMSG_UNLOAD, 0, NULL},
847 * vmbus_onmessage - Handler for channel protocol messages.
849 * This is invoked in the vmbus worker thread context.
851 void vmbus_onmessage(void *context)
853 struct hv_message *msg = context;
854 struct vmbus_channel_message_header *hdr;
857 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
858 size = msg->header.payload_size;
860 if (hdr->msgtype >= CHANNELMSG_COUNT) {
861 pr_err("Received invalid channel message type %d size %d\n",
863 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
864 (unsigned char *)msg->u.payload, size);
868 if (channel_message_table[hdr->msgtype].message_handler)
869 channel_message_table[hdr->msgtype].message_handler(hdr);
871 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
875 * vmbus_request_offers - Send a request to get all our pending offers.
877 int vmbus_request_offers(void)
879 struct vmbus_channel_message_header *msg;
880 struct vmbus_channel_msginfo *msginfo;
883 msginfo = kmalloc(sizeof(*msginfo) +
884 sizeof(struct vmbus_channel_message_header),
889 msg = (struct vmbus_channel_message_header *)msginfo->msg;
891 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
894 ret = vmbus_post_msg(msg,
895 sizeof(struct vmbus_channel_message_header));
897 pr_err("Unable to request offers - %d\n", ret);
909 * Retrieve the (sub) channel on which to send an outgoing request.
910 * When a primary channel has multiple sub-channels, we try to
911 * distribute the load equally amongst all available channels.
913 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
915 struct list_head *cur, *tmp;
917 struct vmbus_channel *cur_channel;
918 struct vmbus_channel *outgoing_channel = primary;
922 if (list_empty(&primary->sc_list))
923 return outgoing_channel;
925 next_channel = primary->next_oc++;
927 if (next_channel > (primary->num_sc)) {
928 primary->next_oc = 0;
929 return outgoing_channel;
932 cur_cpu = hv_context.vp_index[get_cpu()];
934 list_for_each_safe(cur, tmp, &primary->sc_list) {
935 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
936 if (cur_channel->state != CHANNEL_OPENED_STATE)
939 if (cur_channel->target_vp == cur_cpu)
942 if (i == next_channel)
948 return outgoing_channel;
950 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
952 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
954 struct list_head *cur, *tmp;
955 struct vmbus_channel *cur_channel;
957 if (primary_channel->sc_creation_callback == NULL)
960 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
961 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
963 primary_channel->sc_creation_callback(cur_channel);
967 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
968 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
970 primary_channel->sc_creation_callback = sc_cr_cb;
972 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
974 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
978 ret = !list_empty(&primary->sc_list);
982 * Invoke the callback on sub-channel creation.
983 * This will present a uniform interface to the
986 invoke_sc_cb(primary);
991 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);