1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
4 AudioScience HPI driver
5 Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
8 Extended Message Function With Response Caching
10 (C) Copyright AudioScience Inc. 2002
11 *****************************************************************************/
12 #define SOURCEFILE_NAME "hpimsgx.c"
13 #include "hpi_internal.h"
14 #include "hpi_version.h"
15 #include "hpimsginit.h"
20 static const struct pci_device_id asihpi_pci_tbl[] = {
24 static struct hpios_spinlock msgx_lock;
26 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
27 static int logging_enabled = 1;
29 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
35 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
36 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
37 && asihpi_pci_tbl[i].vendor !=
38 pci_info->pci_dev->vendor)
40 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
41 && asihpi_pci_tbl[i].device !=
42 pci_info->pci_dev->device)
44 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].subvendor !=
46 pci_info->pci_dev->subsystem_vendor)
48 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
49 && asihpi_pci_tbl[i].subdevice !=
50 pci_info->pci_dev->subsystem_device)
53 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
54 asihpi_pci_tbl[i].driver_data); */
55 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
61 static inline void hw_entry_point(struct hpi_message *phm,
62 struct hpi_response *phr)
64 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
65 && hpi_entry_points[phm->adapter_index])
66 hpi_entry_points[phm->adapter_index] (phm, phr);
68 hpi_init_response(phr, phm->object, phm->function,
69 HPI_ERROR_PROCESSING_MESSAGE);
72 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
73 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
75 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
76 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
78 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
80 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
82 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
84 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
87 static void HPIMSGX__reset(u16 adapter_index);
89 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
90 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
92 #ifndef DISABLE_PRAGMA_PACK1
96 struct hpi_subsys_response {
97 struct hpi_response_header h;
98 struct hpi_subsys_res s;
101 struct hpi_adapter_response {
102 struct hpi_response_header h;
103 struct hpi_adapter_res a;
106 struct hpi_mixer_response {
107 struct hpi_response_header h;
108 struct hpi_mixer_res m;
111 struct hpi_stream_response {
112 struct hpi_response_header h;
113 struct hpi_stream_res d;
116 struct adapter_info {
122 struct asi_open_state {
127 #ifndef DISABLE_PRAGMA_PACK1
132 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
134 static struct hpi_stream_response
135 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
137 static struct hpi_stream_response
138 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
140 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
142 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
144 /* use these to keep track of opens from user mode apps/DLLs */
145 static struct asi_open_state
146 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
148 static struct asi_open_state
149 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
151 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
154 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
155 HPI_DEBUG_LOG(WARNING,
156 "suspicious adapter index %d in subsys message 0x%x.\n",
157 phm->adapter_index, phm->function);
159 switch (phm->function) {
160 case HPI_SUBSYS_GET_VERSION:
161 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
162 HPI_SUBSYS_GET_VERSION, 0);
163 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
164 phr->u.s.data = HPI_VER; /* return major.minor.release */
166 case HPI_SUBSYS_OPEN:
167 /*do not propagate the message down the chain */
168 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
170 case HPI_SUBSYS_CLOSE:
171 /*do not propagate the message down the chain */
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
174 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
176 case HPI_SUBSYS_DRIVER_LOAD:
177 /* Initialize this module's internal state */
178 hpios_msgxlock_init(&msgx_lock);
179 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
180 /* Init subsys_findadapters response to no-adapters */
181 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
183 HPI_SUBSYS_DRIVER_LOAD, 0);
184 /* individual HPIs dont implement driver load */
185 HPI_COMMON(phm, phr);
187 case HPI_SUBSYS_DRIVER_UNLOAD:
188 HPI_COMMON(phm, phr);
189 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
190 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
191 HPI_SUBSYS_DRIVER_UNLOAD, 0);
194 case HPI_SUBSYS_GET_NUM_ADAPTERS:
195 case HPI_SUBSYS_GET_ADAPTER:
196 HPI_COMMON(phm, phr);
199 case HPI_SUBSYS_CREATE_ADAPTER:
200 HPIMSGX__init(phm, phr);
204 /* Must explicitly handle every subsys message in this switch */
205 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
206 HPI_ERROR_INVALID_FUNC);
211 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
214 switch (phm->function) {
215 case HPI_ADAPTER_OPEN:
216 adapter_open(phm, phr);
218 case HPI_ADAPTER_CLOSE:
219 adapter_close(phm, phr);
221 case HPI_ADAPTER_DELETE:
222 HPIMSGX__cleanup(phm->adapter_index, h_owner);
224 struct hpi_message hm;
225 struct hpi_response hr;
226 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
228 hm.adapter_index = phm->adapter_index;
229 hw_entry_point(&hm, &hr);
231 hw_entry_point(phm, phr);
235 hw_entry_point(phm, phr);
240 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
242 switch (phm->function) {
244 mixer_open(phm, phr);
246 case HPI_MIXER_CLOSE:
247 mixer_close(phm, phr);
250 hw_entry_point(phm, phr);
255 static void outstream_message(struct hpi_message *phm,
256 struct hpi_response *phr, void *h_owner)
258 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
259 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
260 HPI_ERROR_INVALID_OBJ_INDEX);
264 switch (phm->function) {
265 case HPI_OSTREAM_OPEN:
266 outstream_open(phm, phr, h_owner);
268 case HPI_OSTREAM_CLOSE:
269 outstream_close(phm, phr, h_owner);
272 hw_entry_point(phm, phr);
277 static void instream_message(struct hpi_message *phm,
278 struct hpi_response *phr, void *h_owner)
280 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
281 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
282 HPI_ERROR_INVALID_OBJ_INDEX);
286 switch (phm->function) {
287 case HPI_ISTREAM_OPEN:
288 instream_open(phm, phr, h_owner);
290 case HPI_ISTREAM_CLOSE:
291 instream_close(phm, phr, h_owner);
294 hw_entry_point(phm, phr);
299 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
300 * HPI_MessageEx so that functions in hpifunc.c compile.
302 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
307 HPI_DEBUG_MESSAGE(DEBUG, phm);
309 if (phm->type != HPI_TYPE_REQUEST) {
310 hpi_init_response(phr, phm->object, phm->function,
311 HPI_ERROR_INVALID_TYPE);
315 if (phm->adapter_index >= HPI_MAX_ADAPTERS
316 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
317 hpi_init_response(phr, phm->object, phm->function,
318 HPI_ERROR_BAD_ADAPTER_NUMBER);
322 switch (phm->object) {
323 case HPI_OBJ_SUBSYSTEM:
324 subsys_message(phm, phr, h_owner);
327 case HPI_OBJ_ADAPTER:
328 adapter_message(phm, phr, h_owner);
332 mixer_message(phm, phr);
335 case HPI_OBJ_OSTREAM:
336 outstream_message(phm, phr, h_owner);
339 case HPI_OBJ_ISTREAM:
340 instream_message(phm, phr, h_owner);
344 hw_entry_point(phm, phr);
349 HPI_DEBUG_RESPONSE(phr);
351 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
352 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
357 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
359 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
360 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
361 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
364 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
366 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
367 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
370 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
372 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
373 sizeof(rESP_HPI_MIXER_OPEN[0]));
376 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
378 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
381 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
385 struct hpi_message hm;
386 struct hpi_response hr;
388 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
390 hpios_msgxlock_lock(&msgx_lock);
392 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
393 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
394 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
395 [phm->obj_index].h.error)
397 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
399 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
401 instream_user_open[phm->adapter_index][phm->
402 obj_index].open_flag = 1;
403 hpios_msgxlock_unlock(&msgx_lock);
406 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
408 hm.adapter_index = phm->adapter_index;
409 hm.obj_index = phm->obj_index;
410 hw_entry_point(&hm, &hr);
412 hpios_msgxlock_lock(&msgx_lock);
414 instream_user_open[phm->adapter_index][phm->
415 obj_index].open_flag = 0;
416 phr->error = hr.error;
418 instream_user_open[phm->adapter_index][phm->
419 obj_index].open_flag = 1;
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].h_owner = h_owner;
423 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
425 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
428 hpios_msgxlock_unlock(&msgx_lock);
431 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
435 struct hpi_message hm;
436 struct hpi_response hr;
438 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
440 hpios_msgxlock_lock(&msgx_lock);
442 instream_user_open[phm->adapter_index][phm->
443 obj_index].h_owner) {
444 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
445 "instream %d owned by %p\n",
446 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
447 instream_user_open[phm->adapter_index][phm->
448 obj_index].h_owner = NULL;
449 hpios_msgxlock_unlock(&msgx_lock);
451 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
453 hm.adapter_index = phm->adapter_index;
454 hm.obj_index = phm->obj_index;
455 hw_entry_point(&hm, &hr);
456 hpios_msgxlock_lock(&msgx_lock);
458 instream_user_open[phm->adapter_index][phm->
459 obj_index].h_owner = h_owner;
460 phr->error = hr.error;
462 instream_user_open[phm->adapter_index][phm->
463 obj_index].open_flag = 0;
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].h_owner = NULL;
468 HPI_DEBUG_LOG(WARNING,
469 "%p trying to close %d instream %d owned by %p\n",
470 h_owner, phm->adapter_index, phm->obj_index,
471 instream_user_open[phm->adapter_index][phm->
473 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
475 hpios_msgxlock_unlock(&msgx_lock);
478 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
482 struct hpi_message hm;
483 struct hpi_response hr;
485 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
487 hpios_msgxlock_lock(&msgx_lock);
489 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
490 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
491 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
492 [phm->obj_index].h.error)
494 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
496 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
498 outstream_user_open[phm->adapter_index][phm->
499 obj_index].open_flag = 1;
500 hpios_msgxlock_unlock(&msgx_lock);
503 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
505 hm.adapter_index = phm->adapter_index;
506 hm.obj_index = phm->obj_index;
507 hw_entry_point(&hm, &hr);
509 hpios_msgxlock_lock(&msgx_lock);
511 outstream_user_open[phm->adapter_index][phm->
512 obj_index].open_flag = 0;
513 phr->error = hr.error;
515 outstream_user_open[phm->adapter_index][phm->
516 obj_index].open_flag = 1;
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].h_owner = h_owner;
520 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
522 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
525 hpios_msgxlock_unlock(&msgx_lock);
528 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
532 struct hpi_message hm;
533 struct hpi_response hr;
535 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
537 hpios_msgxlock_lock(&msgx_lock);
540 outstream_user_open[phm->adapter_index][phm->
541 obj_index].h_owner) {
542 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
543 "outstream %d owned by %p\n",
544 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
545 outstream_user_open[phm->adapter_index][phm->
546 obj_index].h_owner = NULL;
547 hpios_msgxlock_unlock(&msgx_lock);
549 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
551 hm.adapter_index = phm->adapter_index;
552 hm.obj_index = phm->obj_index;
553 hw_entry_point(&hm, &hr);
554 hpios_msgxlock_lock(&msgx_lock);
556 outstream_user_open[phm->adapter_index][phm->
557 obj_index].h_owner = h_owner;
558 phr->error = hr.error;
560 outstream_user_open[phm->adapter_index][phm->
561 obj_index].open_flag = 0;
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].h_owner = NULL;
566 HPI_DEBUG_LOG(WARNING,
567 "%p trying to close %d outstream %d owned by %p\n",
568 h_owner, phm->adapter_index, phm->obj_index,
569 outstream_user_open[phm->adapter_index][phm->
571 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
573 hpios_msgxlock_unlock(&msgx_lock);
576 static u16 adapter_prepare(u16 adapter)
578 struct hpi_message hm;
579 struct hpi_response hr;
581 /* Open the adapter and streams */
584 /* call to HPI_ADAPTER_OPEN */
585 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
587 hm.adapter_index = adapter;
588 hw_entry_point(&hm, &hr);
589 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
590 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
594 /* call to HPI_ADAPTER_GET_INFO */
595 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
596 HPI_ADAPTER_GET_INFO);
597 hm.adapter_index = adapter;
598 hw_entry_point(&hm, &hr);
602 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
603 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
604 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
606 /* call to HPI_OSTREAM_OPEN */
607 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
608 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
610 hm.adapter_index = adapter;
612 hw_entry_point(&hm, &hr);
613 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
614 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
615 outstream_user_open[adapter][i].open_flag = 0;
616 outstream_user_open[adapter][i].h_owner = NULL;
619 /* call to HPI_ISTREAM_OPEN */
620 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
621 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
623 hm.adapter_index = adapter;
625 hw_entry_point(&hm, &hr);
626 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
627 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
628 instream_user_open[adapter][i].open_flag = 0;
629 instream_user_open[adapter][i].h_owner = NULL;
632 /* call to HPI_MIXER_OPEN */
633 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
634 hm.adapter_index = adapter;
635 hw_entry_point(&hm, &hr);
636 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
637 sizeof(rESP_HPI_MIXER_OPEN[0]));
642 static void HPIMSGX__reset(u16 adapter_index)
646 struct hpi_response hr;
648 if (adapter_index == HPIMSGX_ALLADAPTERS) {
649 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
651 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
652 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
653 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
654 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
656 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
657 HPI_ERROR_INVALID_OBJ);
658 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
659 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
661 for (i = 0; i < HPI_MAX_STREAMS; i++) {
662 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
664 HPI_ERROR_INVALID_OBJ);
665 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
667 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
669 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
671 HPI_ERROR_INVALID_OBJ);
672 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
674 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
678 } else if (adapter_index < HPI_MAX_ADAPTERS) {
679 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
680 HPI_ERROR_BAD_ADAPTER;
681 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
682 HPI_ERROR_INVALID_OBJ;
683 for (i = 0; i < HPI_MAX_STREAMS; i++) {
684 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
685 HPI_ERROR_INVALID_OBJ;
686 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
692 static u16 HPIMSGX__init(struct hpi_message *phm,
693 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
694 /* resource list or NULL=find all */
695 struct hpi_response *phr
696 /* response from HPI_ADAPTER_GET_INFO */
699 hpi_handler_func *entry_point_func;
700 struct hpi_response hr;
702 /* Init response here so we can pass in previous adapter list */
703 hpi_init_response(&hr, phm->object, phm->function,
704 HPI_ERROR_INVALID_OBJ);
707 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
709 if (entry_point_func) {
710 HPI_DEBUG_MESSAGE(DEBUG, phm);
711 entry_point_func(phm, &hr);
713 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
717 /* the adapter was created successfully
718 save the mapping for future use */
719 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
720 /* prepare adapter (pre-open streams etc.) */
722 "HPI_SUBSYS_CREATE_ADAPTER successful,"
723 " preparing adapter\n");
724 adapter_prepare(hr.u.s.adapter_index);
726 memcpy(phr, &hr, hr.size);
730 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
732 int i, adapter, adapter_limit;
737 if (adapter_index == HPIMSGX_ALLADAPTERS) {
739 adapter_limit = HPI_MAX_ADAPTERS;
741 adapter = adapter_index;
742 adapter_limit = adapter + 1;
745 for (; adapter < adapter_limit; adapter++) {
746 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
747 for (i = 0; i < HPI_MAX_STREAMS; i++) {
749 outstream_user_open[adapter][i].h_owner) {
750 struct hpi_message hm;
751 struct hpi_response hr;
754 "Close adapter %d ostream %d\n",
757 hpi_init_message_response(&hm, &hr,
758 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
759 hm.adapter_index = (u16)adapter;
760 hm.obj_index = (u16)i;
761 hw_entry_point(&hm, &hr);
763 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
764 hw_entry_point(&hm, &hr);
766 hm.function = HPI_OSTREAM_GROUP_RESET;
767 hw_entry_point(&hm, &hr);
769 outstream_user_open[adapter][i].open_flag = 0;
770 outstream_user_open[adapter][i].h_owner =
773 if (h_owner == instream_user_open[adapter][i].h_owner) {
774 struct hpi_message hm;
775 struct hpi_response hr;
778 "Close adapter %d istream %d\n",
781 hpi_init_message_response(&hm, &hr,
782 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
783 hm.adapter_index = (u16)adapter;
784 hm.obj_index = (u16)i;
785 hw_entry_point(&hm, &hr);
787 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
788 hw_entry_point(&hm, &hr);
790 hm.function = HPI_ISTREAM_GROUP_RESET;
791 hw_entry_point(&hm, &hr);
793 instream_user_open[adapter][i].open_flag = 0;
794 instream_user_open[adapter][i].h_owner = NULL;