]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/hv/channel_mgmt.c
Drivers: hv: vmbus: Ignore the offers when resuming from hibernation
[linux.git] / drivers / hv / channel_mgmt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/hyperv.h>
22 #include <asm/mshyperv.h>
23
24 #include "hyperv_vmbus.h"
25
26 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
27
28 static const struct vmbus_device vmbus_devs[] = {
29         /* IDE */
30         { .dev_type = HV_IDE,
31           HV_IDE_GUID,
32           .perf_device = true,
33         },
34
35         /* SCSI */
36         { .dev_type = HV_SCSI,
37           HV_SCSI_GUID,
38           .perf_device = true,
39         },
40
41         /* Fibre Channel */
42         { .dev_type = HV_FC,
43           HV_SYNTHFC_GUID,
44           .perf_device = true,
45         },
46
47         /* Synthetic NIC */
48         { .dev_type = HV_NIC,
49           HV_NIC_GUID,
50           .perf_device = true,
51         },
52
53         /* Network Direct */
54         { .dev_type = HV_ND,
55           HV_ND_GUID,
56           .perf_device = true,
57         },
58
59         /* PCIE */
60         { .dev_type = HV_PCIE,
61           HV_PCIE_GUID,
62           .perf_device = false,
63         },
64
65         /* Synthetic Frame Buffer */
66         { .dev_type = HV_FB,
67           HV_SYNTHVID_GUID,
68           .perf_device = false,
69         },
70
71         /* Synthetic Keyboard */
72         { .dev_type = HV_KBD,
73           HV_KBD_GUID,
74           .perf_device = false,
75         },
76
77         /* Synthetic MOUSE */
78         { .dev_type = HV_MOUSE,
79           HV_MOUSE_GUID,
80           .perf_device = false,
81         },
82
83         /* KVP */
84         { .dev_type = HV_KVP,
85           HV_KVP_GUID,
86           .perf_device = false,
87         },
88
89         /* Time Synch */
90         { .dev_type = HV_TS,
91           HV_TS_GUID,
92           .perf_device = false,
93         },
94
95         /* Heartbeat */
96         { .dev_type = HV_HB,
97           HV_HEART_BEAT_GUID,
98           .perf_device = false,
99         },
100
101         /* Shutdown */
102         { .dev_type = HV_SHUTDOWN,
103           HV_SHUTDOWN_GUID,
104           .perf_device = false,
105         },
106
107         /* File copy */
108         { .dev_type = HV_FCOPY,
109           HV_FCOPY_GUID,
110           .perf_device = false,
111         },
112
113         /* Backup */
114         { .dev_type = HV_BACKUP,
115           HV_VSS_GUID,
116           .perf_device = false,
117         },
118
119         /* Dynamic Memory */
120         { .dev_type = HV_DM,
121           HV_DM_GUID,
122           .perf_device = false,
123         },
124
125         /* Unknown GUID */
126         { .dev_type = HV_UNKNOWN,
127           .perf_device = false,
128         },
129 };
130
131 static const struct {
132         guid_t guid;
133 } vmbus_unsupported_devs[] = {
134         { HV_AVMA1_GUID },
135         { HV_AVMA2_GUID },
136         { HV_RDV_GUID   },
137 };
138
139 /*
140  * The rescinded channel may be blocked waiting for a response from the host;
141  * take care of that.
142  */
143 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
144 {
145         struct vmbus_channel_msginfo *msginfo;
146         unsigned long flags;
147
148
149         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
150         channel->rescind = true;
151         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
152                                 msglistentry) {
153
154                 if (msginfo->waiting_channel == channel) {
155                         complete(&msginfo->waitevent);
156                         break;
157                 }
158         }
159         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
160 }
161
162 static bool is_unsupported_vmbus_devs(const guid_t *guid)
163 {
164         int i;
165
166         for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
167                 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
168                         return true;
169         return false;
170 }
171
172 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
173 {
174         const guid_t *guid = &channel->offermsg.offer.if_type;
175         u16 i;
176
177         if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
178                 return HV_UNKNOWN;
179
180         for (i = HV_IDE; i < HV_UNKNOWN; i++) {
181                 if (guid_equal(guid, &vmbus_devs[i].guid))
182                         return i;
183         }
184         pr_info("Unknown GUID: %pUl\n", guid);
185         return i;
186 }
187
188 /**
189  * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
190  * @icmsghdrp: Pointer to msg header structure
191  * @buf: Raw buffer channel data
192  * @fw_version: The framework versions we can support.
193  * @fw_vercnt: The size of @fw_version.
194  * @srv_version: The service versions we can support.
195  * @srv_vercnt: The size of @srv_version.
196  * @nego_fw_version: The selected framework version.
197  * @nego_srv_version: The selected service version.
198  *
199  * Note: Versions are given in decreasing order.
200  *
201  * Set up and fill in default negotiate response message.
202  * Mainly used by Hyper-V drivers.
203  */
204 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
205                                 u8 *buf, const int *fw_version, int fw_vercnt,
206                                 const int *srv_version, int srv_vercnt,
207                                 int *nego_fw_version, int *nego_srv_version)
208 {
209         int icframe_major, icframe_minor;
210         int icmsg_major, icmsg_minor;
211         int fw_major, fw_minor;
212         int srv_major, srv_minor;
213         int i, j;
214         bool found_match = false;
215         struct icmsg_negotiate *negop;
216
217         icmsghdrp->icmsgsize = 0x10;
218         negop = (struct icmsg_negotiate *)&buf[
219                 sizeof(struct vmbuspipe_hdr) +
220                 sizeof(struct icmsg_hdr)];
221
222         icframe_major = negop->icframe_vercnt;
223         icframe_minor = 0;
224
225         icmsg_major = negop->icmsg_vercnt;
226         icmsg_minor = 0;
227
228         /*
229          * Select the framework version number we will
230          * support.
231          */
232
233         for (i = 0; i < fw_vercnt; i++) {
234                 fw_major = (fw_version[i] >> 16);
235                 fw_minor = (fw_version[i] & 0xFFFF);
236
237                 for (j = 0; j < negop->icframe_vercnt; j++) {
238                         if ((negop->icversion_data[j].major == fw_major) &&
239                             (negop->icversion_data[j].minor == fw_minor)) {
240                                 icframe_major = negop->icversion_data[j].major;
241                                 icframe_minor = negop->icversion_data[j].minor;
242                                 found_match = true;
243                                 break;
244                         }
245                 }
246
247                 if (found_match)
248                         break;
249         }
250
251         if (!found_match)
252                 goto fw_error;
253
254         found_match = false;
255
256         for (i = 0; i < srv_vercnt; i++) {
257                 srv_major = (srv_version[i] >> 16);
258                 srv_minor = (srv_version[i] & 0xFFFF);
259
260                 for (j = negop->icframe_vercnt;
261                         (j < negop->icframe_vercnt + negop->icmsg_vercnt);
262                         j++) {
263
264                         if ((negop->icversion_data[j].major == srv_major) &&
265                                 (negop->icversion_data[j].minor == srv_minor)) {
266
267                                 icmsg_major = negop->icversion_data[j].major;
268                                 icmsg_minor = negop->icversion_data[j].minor;
269                                 found_match = true;
270                                 break;
271                         }
272                 }
273
274                 if (found_match)
275                         break;
276         }
277
278         /*
279          * Respond with the framework and service
280          * version numbers we can support.
281          */
282
283 fw_error:
284         if (!found_match) {
285                 negop->icframe_vercnt = 0;
286                 negop->icmsg_vercnt = 0;
287         } else {
288                 negop->icframe_vercnt = 1;
289                 negop->icmsg_vercnt = 1;
290         }
291
292         if (nego_fw_version)
293                 *nego_fw_version = (icframe_major << 16) | icframe_minor;
294
295         if (nego_srv_version)
296                 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
297
298         negop->icversion_data[0].major = icframe_major;
299         negop->icversion_data[0].minor = icframe_minor;
300         negop->icversion_data[1].major = icmsg_major;
301         negop->icversion_data[1].minor = icmsg_minor;
302         return found_match;
303 }
304
305 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
306
307 /*
308  * alloc_channel - Allocate and initialize a vmbus channel object
309  */
310 static struct vmbus_channel *alloc_channel(void)
311 {
312         struct vmbus_channel *channel;
313
314         channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
315         if (!channel)
316                 return NULL;
317
318         spin_lock_init(&channel->lock);
319         init_completion(&channel->rescind_event);
320
321         INIT_LIST_HEAD(&channel->sc_list);
322         INIT_LIST_HEAD(&channel->percpu_list);
323
324         tasklet_init(&channel->callback_event,
325                      vmbus_on_event, (unsigned long)channel);
326
327         hv_ringbuffer_pre_init(channel);
328
329         return channel;
330 }
331
332 /*
333  * free_channel - Release the resources used by the vmbus channel object
334  */
335 static void free_channel(struct vmbus_channel *channel)
336 {
337         tasklet_kill(&channel->callback_event);
338         vmbus_remove_channel_attr_group(channel);
339
340         kobject_put(&channel->kobj);
341 }
342
343 static void percpu_channel_enq(void *arg)
344 {
345         struct vmbus_channel *channel = arg;
346         struct hv_per_cpu_context *hv_cpu
347                 = this_cpu_ptr(hv_context.cpu_context);
348
349         list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
350 }
351
352 static void percpu_channel_deq(void *arg)
353 {
354         struct vmbus_channel *channel = arg;
355
356         list_del_rcu(&channel->percpu_list);
357 }
358
359
360 static void vmbus_release_relid(u32 relid)
361 {
362         struct vmbus_channel_relid_released msg;
363         int ret;
364
365         memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
366         msg.child_relid = relid;
367         msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
368         ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
369                              true);
370
371         trace_vmbus_release_relid(&msg, ret);
372 }
373
374 void hv_process_channel_removal(struct vmbus_channel *channel)
375 {
376         struct vmbus_channel *primary_channel;
377         unsigned long flags;
378
379         BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
380         BUG_ON(!channel->rescind);
381
382         if (channel->target_cpu != get_cpu()) {
383                 put_cpu();
384                 smp_call_function_single(channel->target_cpu,
385                                          percpu_channel_deq, channel, true);
386         } else {
387                 percpu_channel_deq(channel);
388                 put_cpu();
389         }
390
391         if (channel->primary_channel == NULL) {
392                 list_del(&channel->listentry);
393
394                 primary_channel = channel;
395         } else {
396                 primary_channel = channel->primary_channel;
397                 spin_lock_irqsave(&primary_channel->lock, flags);
398                 list_del(&channel->sc_list);
399                 spin_unlock_irqrestore(&primary_channel->lock, flags);
400         }
401
402         /*
403          * We need to free the bit for init_vp_index() to work in the case
404          * of sub-channel, when we reload drivers like hv_netvsc.
405          */
406         if (channel->affinity_policy == HV_LOCALIZED)
407                 cpumask_clear_cpu(channel->target_cpu,
408                                   &primary_channel->alloced_cpus_in_node);
409
410         vmbus_release_relid(channel->offermsg.child_relid);
411
412         free_channel(channel);
413 }
414
415 void vmbus_free_channels(void)
416 {
417         struct vmbus_channel *channel, *tmp;
418
419         list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
420                 listentry) {
421                 /* hv_process_channel_removal() needs this */
422                 channel->rescind = true;
423
424                 vmbus_device_unregister(channel->device_obj);
425         }
426 }
427
428 /* Note: the function can run concurrently for primary/sub channels. */
429 static void vmbus_add_channel_work(struct work_struct *work)
430 {
431         struct vmbus_channel *newchannel =
432                 container_of(work, struct vmbus_channel, add_channel_work);
433         struct vmbus_channel *primary_channel = newchannel->primary_channel;
434         unsigned long flags;
435         u16 dev_type;
436         int ret;
437
438         dev_type = hv_get_dev_type(newchannel);
439
440         init_vp_index(newchannel, dev_type);
441
442         if (newchannel->target_cpu != get_cpu()) {
443                 put_cpu();
444                 smp_call_function_single(newchannel->target_cpu,
445                                          percpu_channel_enq,
446                                          newchannel, true);
447         } else {
448                 percpu_channel_enq(newchannel);
449                 put_cpu();
450         }
451
452         /*
453          * This state is used to indicate a successful open
454          * so that when we do close the channel normally, we
455          * can cleanup properly.
456          */
457         newchannel->state = CHANNEL_OPEN_STATE;
458
459         if (primary_channel != NULL) {
460                 /* newchannel is a sub-channel. */
461                 struct hv_device *dev = primary_channel->device_obj;
462
463                 if (vmbus_add_channel_kobj(dev, newchannel))
464                         goto err_deq_chan;
465
466                 if (primary_channel->sc_creation_callback != NULL)
467                         primary_channel->sc_creation_callback(newchannel);
468
469                 newchannel->probe_done = true;
470                 return;
471         }
472
473         /*
474          * Start the process of binding the primary channel to the driver
475          */
476         newchannel->device_obj = vmbus_device_create(
477                 &newchannel->offermsg.offer.if_type,
478                 &newchannel->offermsg.offer.if_instance,
479                 newchannel);
480         if (!newchannel->device_obj)
481                 goto err_deq_chan;
482
483         newchannel->device_obj->device_id = dev_type;
484         /*
485          * Add the new device to the bus. This will kick off device-driver
486          * binding which eventually invokes the device driver's AddDevice()
487          * method.
488          */
489         ret = vmbus_device_register(newchannel->device_obj);
490
491         if (ret != 0) {
492                 pr_err("unable to add child device object (relid %d)\n",
493                         newchannel->offermsg.child_relid);
494                 kfree(newchannel->device_obj);
495                 goto err_deq_chan;
496         }
497
498         newchannel->probe_done = true;
499         return;
500
501 err_deq_chan:
502         mutex_lock(&vmbus_connection.channel_mutex);
503
504         /*
505          * We need to set the flag, otherwise
506          * vmbus_onoffer_rescind() can be blocked.
507          */
508         newchannel->probe_done = true;
509
510         if (primary_channel == NULL) {
511                 list_del(&newchannel->listentry);
512         } else {
513                 spin_lock_irqsave(&primary_channel->lock, flags);
514                 list_del(&newchannel->sc_list);
515                 spin_unlock_irqrestore(&primary_channel->lock, flags);
516         }
517
518         mutex_unlock(&vmbus_connection.channel_mutex);
519
520         if (newchannel->target_cpu != get_cpu()) {
521                 put_cpu();
522                 smp_call_function_single(newchannel->target_cpu,
523                                          percpu_channel_deq,
524                                          newchannel, true);
525         } else {
526                 percpu_channel_deq(newchannel);
527                 put_cpu();
528         }
529
530         vmbus_release_relid(newchannel->offermsg.child_relid);
531
532         free_channel(newchannel);
533 }
534
535 /*
536  * vmbus_process_offer - Process the offer by creating a channel/device
537  * associated with this offer
538  */
539 static void vmbus_process_offer(struct vmbus_channel *newchannel)
540 {
541         struct vmbus_channel *channel;
542         struct workqueue_struct *wq;
543         unsigned long flags;
544         bool fnew = true;
545
546         mutex_lock(&vmbus_connection.channel_mutex);
547
548         /*
549          * Now that we have acquired the channel_mutex,
550          * we can release the potentially racing rescind thread.
551          */
552         atomic_dec(&vmbus_connection.offer_in_progress);
553
554         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
555                 if (guid_equal(&channel->offermsg.offer.if_type,
556                                &newchannel->offermsg.offer.if_type) &&
557                     guid_equal(&channel->offermsg.offer.if_instance,
558                                &newchannel->offermsg.offer.if_instance)) {
559                         fnew = false;
560                         break;
561                 }
562         }
563
564         if (fnew)
565                 list_add_tail(&newchannel->listentry,
566                               &vmbus_connection.chn_list);
567         else {
568                 /*
569                  * Check to see if this is a valid sub-channel.
570                  */
571                 if (newchannel->offermsg.offer.sub_channel_index == 0) {
572                         mutex_unlock(&vmbus_connection.channel_mutex);
573                         /*
574                          * Don't call free_channel(), because newchannel->kobj
575                          * is not initialized yet.
576                          */
577                         kfree(newchannel);
578                         WARN_ON_ONCE(1);
579                         return;
580                 }
581                 /*
582                  * Process the sub-channel.
583                  */
584                 newchannel->primary_channel = channel;
585                 spin_lock_irqsave(&channel->lock, flags);
586                 list_add_tail(&newchannel->sc_list, &channel->sc_list);
587                 spin_unlock_irqrestore(&channel->lock, flags);
588         }
589
590         mutex_unlock(&vmbus_connection.channel_mutex);
591
592         /*
593          * vmbus_process_offer() mustn't call channel->sc_creation_callback()
594          * directly for sub-channels, because sc_creation_callback() ->
595          * vmbus_open() may never get the host's response to the
596          * OPEN_CHANNEL message (the host may rescind a channel at any time,
597          * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
598          * may not wake up the vmbus_open() as it's blocked due to a non-zero
599          * vmbus_connection.offer_in_progress, and finally we have a deadlock.
600          *
601          * The above is also true for primary channels, if the related device
602          * drivers use sync probing mode by default.
603          *
604          * And, usually the handling of primary channels and sub-channels can
605          * depend on each other, so we should offload them to different
606          * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
607          * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
608          * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
609          * and waits for all the sub-channels to appear, but the latter
610          * can't get the rtnl_lock and this blocks the handling of
611          * sub-channels.
612          */
613         INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
614         wq = fnew ? vmbus_connection.handle_primary_chan_wq :
615                     vmbus_connection.handle_sub_chan_wq;
616         queue_work(wq, &newchannel->add_channel_work);
617 }
618
619 /*
620  * We use this state to statically distribute the channel interrupt load.
621  */
622 static int next_numa_node_id;
623 /*
624  * init_vp_index() accesses global variables like next_numa_node_id, and
625  * it can run concurrently for primary channels and sub-channels: see
626  * vmbus_process_offer(), so we need the lock to protect the global
627  * variables.
628  */
629 static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
630
631 /*
632  * Starting with Win8, we can statically distribute the incoming
633  * channel interrupt load by binding a channel to VCPU.
634  * We distribute the interrupt loads to one or more NUMA nodes based on
635  * the channel's affinity_policy.
636  *
637  * For pre-win8 hosts or non-performance critical channels we assign the
638  * first CPU in the first NUMA node.
639  */
640 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
641 {
642         u32 cur_cpu;
643         bool perf_chn = vmbus_devs[dev_type].perf_device;
644         struct vmbus_channel *primary = channel->primary_channel;
645         int next_node;
646         cpumask_var_t available_mask;
647         struct cpumask *alloced_mask;
648
649         if ((vmbus_proto_version == VERSION_WS2008) ||
650             (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
651             !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
652                 /*
653                  * Prior to win8, all channel interrupts are
654                  * delivered on cpu 0.
655                  * Also if the channel is not a performance critical
656                  * channel, bind it to cpu 0.
657                  * In case alloc_cpumask_var() fails, bind it to cpu 0.
658                  */
659                 channel->numa_node = 0;
660                 channel->target_cpu = 0;
661                 channel->target_vp = hv_cpu_number_to_vp_number(0);
662                 return;
663         }
664
665         spin_lock(&bind_channel_to_cpu_lock);
666
667         /*
668          * Based on the channel affinity policy, we will assign the NUMA
669          * nodes.
670          */
671
672         if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
673                 while (true) {
674                         next_node = next_numa_node_id++;
675                         if (next_node == nr_node_ids) {
676                                 next_node = next_numa_node_id = 0;
677                                 continue;
678                         }
679                         if (cpumask_empty(cpumask_of_node(next_node)))
680                                 continue;
681                         break;
682                 }
683                 channel->numa_node = next_node;
684                 primary = channel;
685         }
686         alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
687
688         if (cpumask_weight(alloced_mask) ==
689             cpumask_weight(cpumask_of_node(primary->numa_node))) {
690                 /*
691                  * We have cycled through all the CPUs in the node;
692                  * reset the alloced map.
693                  */
694                 cpumask_clear(alloced_mask);
695         }
696
697         cpumask_xor(available_mask, alloced_mask,
698                     cpumask_of_node(primary->numa_node));
699
700         cur_cpu = -1;
701
702         if (primary->affinity_policy == HV_LOCALIZED) {
703                 /*
704                  * Normally Hyper-V host doesn't create more subchannels
705                  * than there are VCPUs on the node but it is possible when not
706                  * all present VCPUs on the node are initialized by guest.
707                  * Clear the alloced_cpus_in_node to start over.
708                  */
709                 if (cpumask_equal(&primary->alloced_cpus_in_node,
710                                   cpumask_of_node(primary->numa_node)))
711                         cpumask_clear(&primary->alloced_cpus_in_node);
712         }
713
714         while (true) {
715                 cur_cpu = cpumask_next(cur_cpu, available_mask);
716                 if (cur_cpu >= nr_cpu_ids) {
717                         cur_cpu = -1;
718                         cpumask_copy(available_mask,
719                                      cpumask_of_node(primary->numa_node));
720                         continue;
721                 }
722
723                 if (primary->affinity_policy == HV_LOCALIZED) {
724                         /*
725                          * NOTE: in the case of sub-channel, we clear the
726                          * sub-channel related bit(s) in
727                          * primary->alloced_cpus_in_node in
728                          * hv_process_channel_removal(), so when we
729                          * reload drivers like hv_netvsc in SMP guest, here
730                          * we're able to re-allocate
731                          * bit from primary->alloced_cpus_in_node.
732                          */
733                         if (!cpumask_test_cpu(cur_cpu,
734                                               &primary->alloced_cpus_in_node)) {
735                                 cpumask_set_cpu(cur_cpu,
736                                                 &primary->alloced_cpus_in_node);
737                                 cpumask_set_cpu(cur_cpu, alloced_mask);
738                                 break;
739                         }
740                 } else {
741                         cpumask_set_cpu(cur_cpu, alloced_mask);
742                         break;
743                 }
744         }
745
746         channel->target_cpu = cur_cpu;
747         channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
748
749         spin_unlock(&bind_channel_to_cpu_lock);
750
751         free_cpumask_var(available_mask);
752 }
753
754 static void vmbus_wait_for_unload(void)
755 {
756         int cpu;
757         void *page_addr;
758         struct hv_message *msg;
759         struct vmbus_channel_message_header *hdr;
760         u32 message_type;
761
762         /*
763          * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
764          * used for initial contact or to CPU0 depending on host version. When
765          * we're crashing on a different CPU let's hope that IRQ handler on
766          * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
767          * functional and vmbus_unload_response() will complete
768          * vmbus_connection.unload_event. If not, the last thing we can do is
769          * read message pages for all CPUs directly.
770          */
771         while (1) {
772                 if (completion_done(&vmbus_connection.unload_event))
773                         break;
774
775                 for_each_online_cpu(cpu) {
776                         struct hv_per_cpu_context *hv_cpu
777                                 = per_cpu_ptr(hv_context.cpu_context, cpu);
778
779                         page_addr = hv_cpu->synic_message_page;
780                         msg = (struct hv_message *)page_addr
781                                 + VMBUS_MESSAGE_SINT;
782
783                         message_type = READ_ONCE(msg->header.message_type);
784                         if (message_type == HVMSG_NONE)
785                                 continue;
786
787                         hdr = (struct vmbus_channel_message_header *)
788                                 msg->u.payload;
789
790                         if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
791                                 complete(&vmbus_connection.unload_event);
792
793                         vmbus_signal_eom(msg, message_type);
794                 }
795
796                 mdelay(10);
797         }
798
799         /*
800          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
801          * maybe-pending messages on all CPUs to be able to receive new
802          * messages after we reconnect.
803          */
804         for_each_online_cpu(cpu) {
805                 struct hv_per_cpu_context *hv_cpu
806                         = per_cpu_ptr(hv_context.cpu_context, cpu);
807
808                 page_addr = hv_cpu->synic_message_page;
809                 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
810                 msg->header.message_type = HVMSG_NONE;
811         }
812 }
813
814 /*
815  * vmbus_unload_response - Handler for the unload response.
816  */
817 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
818 {
819         /*
820          * This is a global event; just wakeup the waiting thread.
821          * Once we successfully unload, we can cleanup the monitor state.
822          */
823         complete(&vmbus_connection.unload_event);
824 }
825
826 void vmbus_initiate_unload(bool crash)
827 {
828         struct vmbus_channel_message_header hdr;
829
830         /* Pre-Win2012R2 hosts don't support reconnect */
831         if (vmbus_proto_version < VERSION_WIN8_1)
832                 return;
833
834         init_completion(&vmbus_connection.unload_event);
835         memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
836         hdr.msgtype = CHANNELMSG_UNLOAD;
837         vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
838                        !crash);
839
840         /*
841          * vmbus_initiate_unload() is also called on crash and the crash can be
842          * happening in an interrupt context, where scheduling is impossible.
843          */
844         if (!crash)
845                 wait_for_completion(&vmbus_connection.unload_event);
846         else
847                 vmbus_wait_for_unload();
848 }
849
850 /*
851  * find_primary_channel_by_offer - Get the channel object given the new offer.
852  * This is only used in the resume path of hibernation.
853  */
854 static struct vmbus_channel *
855 find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
856 {
857         struct vmbus_channel *channel = NULL, *iter;
858         const guid_t *inst1, *inst2;
859
860         /* Ignore sub-channel offers. */
861         if (offer->offer.sub_channel_index != 0)
862                 return NULL;
863
864         mutex_lock(&vmbus_connection.channel_mutex);
865
866         list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
867                 inst1 = &iter->offermsg.offer.if_instance;
868                 inst2 = &offer->offer.if_instance;
869
870                 if (guid_equal(inst1, inst2)) {
871                         channel = iter;
872                         break;
873                 }
874         }
875
876         mutex_unlock(&vmbus_connection.channel_mutex);
877
878         return channel;
879 }
880
881 /*
882  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
883  *
884  */
885 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
886 {
887         struct vmbus_channel_offer_channel *offer;
888         struct vmbus_channel *oldchannel, *newchannel;
889         size_t offer_sz;
890
891         offer = (struct vmbus_channel_offer_channel *)hdr;
892
893         trace_vmbus_onoffer(offer);
894
895         oldchannel = find_primary_channel_by_offer(offer);
896
897         if (oldchannel != NULL) {
898                 atomic_dec(&vmbus_connection.offer_in_progress);
899
900                 /*
901                  * We're resuming from hibernation: we expect the host to send
902                  * exactly the same offers that we had before the hibernation.
903                  */
904                 offer_sz = sizeof(*offer);
905                 if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0)
906                         return;
907
908                 pr_debug("Mismatched offer from the host (relid=%d)\n",
909                          offer->child_relid);
910
911                 print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
912                                      16, 4, &oldchannel->offermsg, offer_sz,
913                                      false);
914                 print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
915                                      16, 4, offer, offer_sz, false);
916                 return;
917         }
918
919         /* Allocate the channel object and save this offer. */
920         newchannel = alloc_channel();
921         if (!newchannel) {
922                 vmbus_release_relid(offer->child_relid);
923                 atomic_dec(&vmbus_connection.offer_in_progress);
924                 pr_err("Unable to allocate channel object\n");
925                 return;
926         }
927
928         /*
929          * Setup state for signalling the host.
930          */
931         newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
932
933         if (vmbus_proto_version != VERSION_WS2008) {
934                 newchannel->is_dedicated_interrupt =
935                                 (offer->is_dedicated_interrupt != 0);
936                 newchannel->sig_event = offer->connection_id;
937         }
938
939         memcpy(&newchannel->offermsg, offer,
940                sizeof(struct vmbus_channel_offer_channel));
941         newchannel->monitor_grp = (u8)offer->monitorid / 32;
942         newchannel->monitor_bit = (u8)offer->monitorid % 32;
943
944         vmbus_process_offer(newchannel);
945 }
946
947 /*
948  * vmbus_onoffer_rescind - Rescind offer handler.
949  *
950  * We queue a work item to process this offer synchronously
951  */
952 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
953 {
954         struct vmbus_channel_rescind_offer *rescind;
955         struct vmbus_channel *channel;
956         struct device *dev;
957
958         rescind = (struct vmbus_channel_rescind_offer *)hdr;
959
960         trace_vmbus_onoffer_rescind(rescind);
961
962         /*
963          * The offer msg and the corresponding rescind msg
964          * from the host are guranteed to be ordered -
965          * offer comes in first and then the rescind.
966          * Since we process these events in work elements,
967          * and with preemption, we may end up processing
968          * the events out of order. Given that we handle these
969          * work elements on the same CPU, this is possible only
970          * in the case of preemption. In any case wait here
971          * until the offer processing has moved beyond the
972          * point where the channel is discoverable.
973          */
974
975         while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
976                 /*
977                  * We wait here until any channel offer is currently
978                  * being processed.
979                  */
980                 msleep(1);
981         }
982
983         mutex_lock(&vmbus_connection.channel_mutex);
984         channel = relid2channel(rescind->child_relid);
985         mutex_unlock(&vmbus_connection.channel_mutex);
986
987         if (channel == NULL) {
988                 /*
989                  * We failed in processing the offer message;
990                  * we would have cleaned up the relid in that
991                  * failure path.
992                  */
993                 return;
994         }
995
996         /*
997          * Before setting channel->rescind in vmbus_rescind_cleanup(), we
998          * should make sure the channel callback is not running any more.
999          */
1000         vmbus_reset_channel_cb(channel);
1001
1002         /*
1003          * Now wait for offer handling to complete.
1004          */
1005         vmbus_rescind_cleanup(channel);
1006         while (READ_ONCE(channel->probe_done) == false) {
1007                 /*
1008                  * We wait here until any channel offer is currently
1009                  * being processed.
1010                  */
1011                 msleep(1);
1012         }
1013
1014         /*
1015          * At this point, the rescind handling can proceed safely.
1016          */
1017
1018         if (channel->device_obj) {
1019                 if (channel->chn_rescind_callback) {
1020                         channel->chn_rescind_callback(channel);
1021                         return;
1022                 }
1023                 /*
1024                  * We will have to unregister this device from the
1025                  * driver core.
1026                  */
1027                 dev = get_device(&channel->device_obj->device);
1028                 if (dev) {
1029                         vmbus_device_unregister(channel->device_obj);
1030                         put_device(dev);
1031                 }
1032         }
1033         if (channel->primary_channel != NULL) {
1034                 /*
1035                  * Sub-channel is being rescinded. Following is the channel
1036                  * close sequence when initiated from the driveri (refer to
1037                  * vmbus_close() for details):
1038                  * 1. Close all sub-channels first
1039                  * 2. Then close the primary channel.
1040                  */
1041                 mutex_lock(&vmbus_connection.channel_mutex);
1042                 if (channel->state == CHANNEL_OPEN_STATE) {
1043                         /*
1044                          * The channel is currently not open;
1045                          * it is safe for us to cleanup the channel.
1046                          */
1047                         hv_process_channel_removal(channel);
1048                 } else {
1049                         complete(&channel->rescind_event);
1050                 }
1051                 mutex_unlock(&vmbus_connection.channel_mutex);
1052         }
1053 }
1054
1055 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1056 {
1057         BUG_ON(!is_hvsock_channel(channel));
1058
1059         /* We always get a rescind msg when a connection is closed. */
1060         while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1061                 msleep(1);
1062
1063         vmbus_device_unregister(channel->device_obj);
1064 }
1065 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1066
1067
1068 /*
1069  * vmbus_onoffers_delivered -
1070  * This is invoked when all offers have been delivered.
1071  *
1072  * Nothing to do here.
1073  */
1074 static void vmbus_onoffers_delivered(
1075                         struct vmbus_channel_message_header *hdr)
1076 {
1077 }
1078
1079 /*
1080  * vmbus_onopen_result - Open result handler.
1081  *
1082  * This is invoked when we received a response to our channel open request.
1083  * Find the matching request, copy the response and signal the requesting
1084  * thread.
1085  */
1086 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1087 {
1088         struct vmbus_channel_open_result *result;
1089         struct vmbus_channel_msginfo *msginfo;
1090         struct vmbus_channel_message_header *requestheader;
1091         struct vmbus_channel_open_channel *openmsg;
1092         unsigned long flags;
1093
1094         result = (struct vmbus_channel_open_result *)hdr;
1095
1096         trace_vmbus_onopen_result(result);
1097
1098         /*
1099          * Find the open msg, copy the result and signal/unblock the wait event
1100          */
1101         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1102
1103         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1104                                 msglistentry) {
1105                 requestheader =
1106                         (struct vmbus_channel_message_header *)msginfo->msg;
1107
1108                 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1109                         openmsg =
1110                         (struct vmbus_channel_open_channel *)msginfo->msg;
1111                         if (openmsg->child_relid == result->child_relid &&
1112                             openmsg->openid == result->openid) {
1113                                 memcpy(&msginfo->response.open_result,
1114                                        result,
1115                                        sizeof(
1116                                         struct vmbus_channel_open_result));
1117                                 complete(&msginfo->waitevent);
1118                                 break;
1119                         }
1120                 }
1121         }
1122         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1123 }
1124
1125 /*
1126  * vmbus_ongpadl_created - GPADL created handler.
1127  *
1128  * This is invoked when we received a response to our gpadl create request.
1129  * Find the matching request, copy the response and signal the requesting
1130  * thread.
1131  */
1132 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1133 {
1134         struct vmbus_channel_gpadl_created *gpadlcreated;
1135         struct vmbus_channel_msginfo *msginfo;
1136         struct vmbus_channel_message_header *requestheader;
1137         struct vmbus_channel_gpadl_header *gpadlheader;
1138         unsigned long flags;
1139
1140         gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1141
1142         trace_vmbus_ongpadl_created(gpadlcreated);
1143
1144         /*
1145          * Find the establish msg, copy the result and signal/unblock the wait
1146          * event
1147          */
1148         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1149
1150         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1151                                 msglistentry) {
1152                 requestheader =
1153                         (struct vmbus_channel_message_header *)msginfo->msg;
1154
1155                 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1156                         gpadlheader =
1157                         (struct vmbus_channel_gpadl_header *)requestheader;
1158
1159                         if ((gpadlcreated->child_relid ==
1160                              gpadlheader->child_relid) &&
1161                             (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1162                                 memcpy(&msginfo->response.gpadl_created,
1163                                        gpadlcreated,
1164                                        sizeof(
1165                                         struct vmbus_channel_gpadl_created));
1166                                 complete(&msginfo->waitevent);
1167                                 break;
1168                         }
1169                 }
1170         }
1171         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1172 }
1173
1174 /*
1175  * vmbus_ongpadl_torndown - GPADL torndown handler.
1176  *
1177  * This is invoked when we received a response to our gpadl teardown request.
1178  * Find the matching request, copy the response and signal the requesting
1179  * thread.
1180  */
1181 static void vmbus_ongpadl_torndown(
1182                         struct vmbus_channel_message_header *hdr)
1183 {
1184         struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1185         struct vmbus_channel_msginfo *msginfo;
1186         struct vmbus_channel_message_header *requestheader;
1187         struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1188         unsigned long flags;
1189
1190         gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1191
1192         trace_vmbus_ongpadl_torndown(gpadl_torndown);
1193
1194         /*
1195          * Find the open msg, copy the result and signal/unblock the wait event
1196          */
1197         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1198
1199         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1200                                 msglistentry) {
1201                 requestheader =
1202                         (struct vmbus_channel_message_header *)msginfo->msg;
1203
1204                 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1205                         gpadl_teardown =
1206                         (struct vmbus_channel_gpadl_teardown *)requestheader;
1207
1208                         if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1209                                 memcpy(&msginfo->response.gpadl_torndown,
1210                                        gpadl_torndown,
1211                                        sizeof(
1212                                         struct vmbus_channel_gpadl_torndown));
1213                                 complete(&msginfo->waitevent);
1214                                 break;
1215                         }
1216                 }
1217         }
1218         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1219 }
1220
1221 /*
1222  * vmbus_onversion_response - Version response handler
1223  *
1224  * This is invoked when we received a response to our initiate contact request.
1225  * Find the matching request, copy the response and signal the requesting
1226  * thread.
1227  */
1228 static void vmbus_onversion_response(
1229                 struct vmbus_channel_message_header *hdr)
1230 {
1231         struct vmbus_channel_msginfo *msginfo;
1232         struct vmbus_channel_message_header *requestheader;
1233         struct vmbus_channel_version_response *version_response;
1234         unsigned long flags;
1235
1236         version_response = (struct vmbus_channel_version_response *)hdr;
1237
1238         trace_vmbus_onversion_response(version_response);
1239
1240         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1241
1242         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1243                                 msglistentry) {
1244                 requestheader =
1245                         (struct vmbus_channel_message_header *)msginfo->msg;
1246
1247                 if (requestheader->msgtype ==
1248                     CHANNELMSG_INITIATE_CONTACT) {
1249                         memcpy(&msginfo->response.version_response,
1250                               version_response,
1251                               sizeof(struct vmbus_channel_version_response));
1252                         complete(&msginfo->waitevent);
1253                 }
1254         }
1255         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1256 }
1257
1258 /* Channel message dispatch table */
1259 const struct vmbus_channel_message_table_entry
1260 channel_message_table[CHANNELMSG_COUNT] = {
1261         { CHANNELMSG_INVALID,                   0, NULL },
1262         { CHANNELMSG_OFFERCHANNEL,              0, vmbus_onoffer },
1263         { CHANNELMSG_RESCIND_CHANNELOFFER,      0, vmbus_onoffer_rescind },
1264         { CHANNELMSG_REQUESTOFFERS,             0, NULL },
1265         { CHANNELMSG_ALLOFFERS_DELIVERED,       1, vmbus_onoffers_delivered },
1266         { CHANNELMSG_OPENCHANNEL,               0, NULL },
1267         { CHANNELMSG_OPENCHANNEL_RESULT,        1, vmbus_onopen_result },
1268         { CHANNELMSG_CLOSECHANNEL,              0, NULL },
1269         { CHANNELMSG_GPADL_HEADER,              0, NULL },
1270         { CHANNELMSG_GPADL_BODY,                0, NULL },
1271         { CHANNELMSG_GPADL_CREATED,             1, vmbus_ongpadl_created },
1272         { CHANNELMSG_GPADL_TEARDOWN,            0, NULL },
1273         { CHANNELMSG_GPADL_TORNDOWN,            1, vmbus_ongpadl_torndown },
1274         { CHANNELMSG_RELID_RELEASED,            0, NULL },
1275         { CHANNELMSG_INITIATE_CONTACT,          0, NULL },
1276         { CHANNELMSG_VERSION_RESPONSE,          1, vmbus_onversion_response },
1277         { CHANNELMSG_UNLOAD,                    0, NULL },
1278         { CHANNELMSG_UNLOAD_RESPONSE,           1, vmbus_unload_response },
1279         { CHANNELMSG_18,                        0, NULL },
1280         { CHANNELMSG_19,                        0, NULL },
1281         { CHANNELMSG_20,                        0, NULL },
1282         { CHANNELMSG_TL_CONNECT_REQUEST,        0, NULL },
1283 };
1284
1285 /*
1286  * vmbus_onmessage - Handler for channel protocol messages.
1287  *
1288  * This is invoked in the vmbus worker thread context.
1289  */
1290 void vmbus_onmessage(void *context)
1291 {
1292         struct hv_message *msg = context;
1293         struct vmbus_channel_message_header *hdr;
1294         int size;
1295
1296         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1297         size = msg->header.payload_size;
1298
1299         trace_vmbus_on_message(hdr);
1300
1301         if (hdr->msgtype >= CHANNELMSG_COUNT) {
1302                 pr_err("Received invalid channel message type %d size %d\n",
1303                            hdr->msgtype, size);
1304                 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1305                                      (unsigned char *)msg->u.payload, size);
1306                 return;
1307         }
1308
1309         if (channel_message_table[hdr->msgtype].message_handler)
1310                 channel_message_table[hdr->msgtype].message_handler(hdr);
1311         else
1312                 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1313 }
1314
1315 /*
1316  * vmbus_request_offers - Send a request to get all our pending offers.
1317  */
1318 int vmbus_request_offers(void)
1319 {
1320         struct vmbus_channel_message_header *msg;
1321         struct vmbus_channel_msginfo *msginfo;
1322         int ret;
1323
1324         msginfo = kmalloc(sizeof(*msginfo) +
1325                           sizeof(struct vmbus_channel_message_header),
1326                           GFP_KERNEL);
1327         if (!msginfo)
1328                 return -ENOMEM;
1329
1330         msg = (struct vmbus_channel_message_header *)msginfo->msg;
1331
1332         msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1333
1334         ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1335                              true);
1336
1337         trace_vmbus_request_offers(ret);
1338
1339         if (ret != 0) {
1340                 pr_err("Unable to request offers - %d\n", ret);
1341
1342                 goto cleanup;
1343         }
1344
1345 cleanup:
1346         kfree(msginfo);
1347
1348         return ret;
1349 }
1350
1351 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1352 {
1353         struct list_head *cur, *tmp;
1354         struct vmbus_channel *cur_channel;
1355
1356         if (primary_channel->sc_creation_callback == NULL)
1357                 return;
1358
1359         list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1360                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1361
1362                 primary_channel->sc_creation_callback(cur_channel);
1363         }
1364 }
1365
1366 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1367                                 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1368 {
1369         primary_channel->sc_creation_callback = sc_cr_cb;
1370 }
1371 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1372
1373 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1374 {
1375         bool ret;
1376
1377         ret = !list_empty(&primary->sc_list);
1378
1379         if (ret) {
1380                 /*
1381                  * Invoke the callback on sub-channel creation.
1382                  * This will present a uniform interface to the
1383                  * clients.
1384                  */
1385                 invoke_sc_cb(primary);
1386         }
1387
1388         return ret;
1389 }
1390 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1391
1392 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1393                 void (*chn_rescind_cb)(struct vmbus_channel *))
1394 {
1395         channel->chn_rescind_callback = chn_rescind_cb;
1396 }
1397 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);