1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Cacheing
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpimsginit.h"
30 static struct pci_device_id asihpi_pci_tbl[] = {
34 static struct hpios_spinlock msgx_lock;
36 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
44 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
45 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
46 && asihpi_pci_tbl[i].vendor !=
47 pci_info->pci_dev->vendor)
49 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
50 && asihpi_pci_tbl[i].device !=
51 pci_info->pci_dev->device)
53 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
54 && asihpi_pci_tbl[i].subvendor !=
55 pci_info->pci_dev->subsystem_vendor)
57 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
58 && asihpi_pci_tbl[i].subdevice !=
59 pci_info->pci_dev->subsystem_device)
62 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
63 asihpi_pci_tbl[i].driver_data); */
64 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
70 static inline void hw_entry_point(struct hpi_message *phm,
71 struct hpi_response *phr)
73 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
74 && hpi_entry_points[phm->adapter_index])
75 hpi_entry_points[phm->adapter_index] (phm, phr);
77 hpi_init_response(phr, phm->object, phm->function,
78 HPI_ERROR_PROCESSING_MESSAGE);
81 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
82 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
84 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
85 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
87 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
89 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
91 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
93 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
96 static void HPIMSGX__reset(u16 adapter_index);
98 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
99 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
101 #ifndef DISABLE_PRAGMA_PACK1
102 #pragma pack(push, 1)
105 struct hpi_subsys_response {
106 struct hpi_response_header h;
107 struct hpi_subsys_res s;
110 struct hpi_adapter_response {
111 struct hpi_response_header h;
112 struct hpi_adapter_res a;
115 struct hpi_mixer_response {
116 struct hpi_response_header h;
117 struct hpi_mixer_res m;
120 struct hpi_stream_response {
121 struct hpi_response_header h;
122 struct hpi_stream_res d;
125 struct adapter_info {
131 struct asi_open_state {
136 #ifndef DISABLE_PRAGMA_PACK1
141 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
143 static struct hpi_stream_response
144 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
146 static struct hpi_stream_response
147 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
149 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
151 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
153 /* use these to keep track of opens from user mode apps/DLLs */
154 static struct asi_open_state
155 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
157 static struct asi_open_state
158 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
160 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
163 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
164 HPI_DEBUG_LOG(WARNING,
165 "suspicious adapter index %d in subsys message 0x%x.\n",
166 phm->adapter_index, phm->function);
168 switch (phm->function) {
169 case HPI_SUBSYS_GET_VERSION:
170 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
171 HPI_SUBSYS_GET_VERSION, 0);
172 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
173 phr->u.s.data = HPI_VER; /* return major.minor.release */
175 case HPI_SUBSYS_OPEN:
176 /*do not propagate the message down the chain */
177 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
179 case HPI_SUBSYS_CLOSE:
180 /*do not propagate the message down the chain */
181 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
183 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185 case HPI_SUBSYS_DRIVER_LOAD:
186 /* Initialize this module's internal state */
187 hpios_msgxlock_init(&msgx_lock);
188 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
189 hpios_locked_mem_init();
190 /* Init subsys_findadapters response to no-adapters */
191 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
192 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
193 HPI_SUBSYS_DRIVER_LOAD, 0);
194 /* individual HPIs dont implement driver load */
195 HPI_COMMON(phm, phr);
197 case HPI_SUBSYS_DRIVER_UNLOAD:
198 HPI_COMMON(phm, phr);
199 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
200 hpios_locked_mem_free_all();
201 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
202 HPI_SUBSYS_DRIVER_UNLOAD, 0);
205 case HPI_SUBSYS_GET_NUM_ADAPTERS:
206 case HPI_SUBSYS_GET_ADAPTER:
207 HPI_COMMON(phm, phr);
210 case HPI_SUBSYS_CREATE_ADAPTER:
211 HPIMSGX__init(phm, phr);
215 /* Must explicitly handle every subsys message in this switch */
216 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
217 HPI_ERROR_INVALID_FUNC);
222 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
225 switch (phm->function) {
226 case HPI_ADAPTER_OPEN:
227 adapter_open(phm, phr);
229 case HPI_ADAPTER_CLOSE:
230 adapter_close(phm, phr);
232 case HPI_ADAPTER_DELETE:
233 HPIMSGX__cleanup(phm->adapter_index, h_owner);
235 struct hpi_message hm;
236 struct hpi_response hr;
237 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
239 hm.adapter_index = phm->adapter_index;
240 hw_entry_point(&hm, &hr);
242 hw_entry_point(phm, phr);
246 hw_entry_point(phm, phr);
251 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
253 switch (phm->function) {
255 mixer_open(phm, phr);
257 case HPI_MIXER_CLOSE:
258 mixer_close(phm, phr);
261 hw_entry_point(phm, phr);
266 static void outstream_message(struct hpi_message *phm,
267 struct hpi_response *phr, void *h_owner)
269 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
270 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
271 HPI_ERROR_INVALID_OBJ_INDEX);
275 switch (phm->function) {
276 case HPI_OSTREAM_OPEN:
277 outstream_open(phm, phr, h_owner);
279 case HPI_OSTREAM_CLOSE:
280 outstream_close(phm, phr, h_owner);
283 hw_entry_point(phm, phr);
288 static void instream_message(struct hpi_message *phm,
289 struct hpi_response *phr, void *h_owner)
291 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
292 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
293 HPI_ERROR_INVALID_OBJ_INDEX);
297 switch (phm->function) {
298 case HPI_ISTREAM_OPEN:
299 instream_open(phm, phr, h_owner);
301 case HPI_ISTREAM_CLOSE:
302 instream_close(phm, phr, h_owner);
305 hw_entry_point(phm, phr);
310 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
311 * HPI_MessageEx so that functions in hpifunc.c compile.
313 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
316 HPI_DEBUG_MESSAGE(DEBUG, phm);
318 if (phm->type != HPI_TYPE_MESSAGE) {
319 hpi_init_response(phr, phm->object, phm->function,
320 HPI_ERROR_INVALID_TYPE);
324 if (phm->adapter_index >= HPI_MAX_ADAPTERS
325 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
326 hpi_init_response(phr, phm->object, phm->function,
327 HPI_ERROR_BAD_ADAPTER_NUMBER);
331 switch (phm->object) {
332 case HPI_OBJ_SUBSYSTEM:
333 subsys_message(phm, phr, h_owner);
336 case HPI_OBJ_ADAPTER:
337 adapter_message(phm, phr, h_owner);
341 mixer_message(phm, phr);
344 case HPI_OBJ_OSTREAM:
345 outstream_message(phm, phr, h_owner);
348 case HPI_OBJ_ISTREAM:
349 instream_message(phm, phr, h_owner);
353 hw_entry_point(phm, phr);
356 HPI_DEBUG_RESPONSE(phr);
360 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
362 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
363 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
364 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
367 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
369 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
370 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
373 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
375 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
376 sizeof(rESP_HPI_MIXER_OPEN[0]));
379 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
381 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
384 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
388 struct hpi_message hm;
389 struct hpi_response hr;
391 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
393 hpios_msgxlock_lock(&msgx_lock);
395 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
396 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
397 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
398 [phm->obj_index].h.error)
400 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
402 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
404 instream_user_open[phm->adapter_index][phm->
405 obj_index].open_flag = 1;
406 hpios_msgxlock_unlock(&msgx_lock);
409 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
411 hm.adapter_index = phm->adapter_index;
412 hm.obj_index = phm->obj_index;
413 hw_entry_point(&hm, &hr);
415 hpios_msgxlock_lock(&msgx_lock);
417 instream_user_open[phm->adapter_index][phm->
418 obj_index].open_flag = 0;
419 phr->error = hr.error;
421 instream_user_open[phm->adapter_index][phm->
422 obj_index].open_flag = 1;
423 instream_user_open[phm->adapter_index][phm->
424 obj_index].h_owner = h_owner;
426 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
428 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
431 hpios_msgxlock_unlock(&msgx_lock);
434 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
438 struct hpi_message hm;
439 struct hpi_response hr;
441 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
443 hpios_msgxlock_lock(&msgx_lock);
445 instream_user_open[phm->adapter_index][phm->
446 obj_index].h_owner) {
447 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
448 "instream %d owned by %p\n",
449 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
450 instream_user_open[phm->adapter_index][phm->
451 obj_index].h_owner = NULL;
452 hpios_msgxlock_unlock(&msgx_lock);
454 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
456 hm.adapter_index = phm->adapter_index;
457 hm.obj_index = phm->obj_index;
458 hw_entry_point(&hm, &hr);
459 hpios_msgxlock_lock(&msgx_lock);
461 instream_user_open[phm->adapter_index][phm->
462 obj_index].h_owner = h_owner;
463 phr->error = hr.error;
465 instream_user_open[phm->adapter_index][phm->
466 obj_index].open_flag = 0;
467 instream_user_open[phm->adapter_index][phm->
468 obj_index].h_owner = NULL;
471 HPI_DEBUG_LOG(WARNING,
472 "%p trying to close %d instream %d owned by %p\n",
473 h_owner, phm->adapter_index, phm->obj_index,
474 instream_user_open[phm->adapter_index][phm->
476 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
478 hpios_msgxlock_unlock(&msgx_lock);
481 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
485 struct hpi_message hm;
486 struct hpi_response hr;
488 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
490 hpios_msgxlock_lock(&msgx_lock);
492 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
493 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
494 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
495 [phm->obj_index].h.error)
497 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
499 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
501 outstream_user_open[phm->adapter_index][phm->
502 obj_index].open_flag = 1;
503 hpios_msgxlock_unlock(&msgx_lock);
506 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
508 hm.adapter_index = phm->adapter_index;
509 hm.obj_index = phm->obj_index;
510 hw_entry_point(&hm, &hr);
512 hpios_msgxlock_lock(&msgx_lock);
514 outstream_user_open[phm->adapter_index][phm->
515 obj_index].open_flag = 0;
516 phr->error = hr.error;
518 outstream_user_open[phm->adapter_index][phm->
519 obj_index].open_flag = 1;
520 outstream_user_open[phm->adapter_index][phm->
521 obj_index].h_owner = h_owner;
523 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
525 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
528 hpios_msgxlock_unlock(&msgx_lock);
531 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
535 struct hpi_message hm;
536 struct hpi_response hr;
538 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
540 hpios_msgxlock_lock(&msgx_lock);
543 outstream_user_open[phm->adapter_index][phm->
544 obj_index].h_owner) {
545 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
546 "outstream %d owned by %p\n",
547 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
548 outstream_user_open[phm->adapter_index][phm->
549 obj_index].h_owner = NULL;
550 hpios_msgxlock_unlock(&msgx_lock);
552 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
554 hm.adapter_index = phm->adapter_index;
555 hm.obj_index = phm->obj_index;
556 hw_entry_point(&hm, &hr);
557 hpios_msgxlock_lock(&msgx_lock);
559 outstream_user_open[phm->adapter_index][phm->
560 obj_index].h_owner = h_owner;
561 phr->error = hr.error;
563 outstream_user_open[phm->adapter_index][phm->
564 obj_index].open_flag = 0;
565 outstream_user_open[phm->adapter_index][phm->
566 obj_index].h_owner = NULL;
569 HPI_DEBUG_LOG(WARNING,
570 "%p trying to close %d outstream %d owned by %p\n",
571 h_owner, phm->adapter_index, phm->obj_index,
572 outstream_user_open[phm->adapter_index][phm->
574 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
576 hpios_msgxlock_unlock(&msgx_lock);
579 static u16 adapter_prepare(u16 adapter)
581 struct hpi_message hm;
582 struct hpi_response hr;
584 /* Open the adapter and streams */
587 /* call to HPI_ADAPTER_OPEN */
588 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
590 hm.adapter_index = adapter;
591 hw_entry_point(&hm, &hr);
592 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
593 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
597 /* call to HPI_ADAPTER_GET_INFO */
598 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
599 HPI_ADAPTER_GET_INFO);
600 hm.adapter_index = adapter;
601 hw_entry_point(&hm, &hr);
605 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
606 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
607 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
609 /* call to HPI_OSTREAM_OPEN */
610 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
611 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
613 hm.adapter_index = adapter;
615 hw_entry_point(&hm, &hr);
616 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
617 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
618 outstream_user_open[adapter][i].open_flag = 0;
619 outstream_user_open[adapter][i].h_owner = NULL;
622 /* call to HPI_ISTREAM_OPEN */
623 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
624 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
626 hm.adapter_index = adapter;
628 hw_entry_point(&hm, &hr);
629 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
630 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
631 instream_user_open[adapter][i].open_flag = 0;
632 instream_user_open[adapter][i].h_owner = NULL;
635 /* call to HPI_MIXER_OPEN */
636 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
637 hm.adapter_index = adapter;
638 hw_entry_point(&hm, &hr);
639 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
640 sizeof(rESP_HPI_MIXER_OPEN[0]));
645 static void HPIMSGX__reset(u16 adapter_index)
649 struct hpi_response hr;
651 if (adapter_index == HPIMSGX_ALLADAPTERS) {
652 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
654 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
655 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
656 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
657 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
659 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
660 HPI_ERROR_INVALID_OBJ);
661 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
662 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
664 for (i = 0; i < HPI_MAX_STREAMS; i++) {
665 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
667 HPI_ERROR_INVALID_OBJ);
668 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
670 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
672 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
674 HPI_ERROR_INVALID_OBJ);
675 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
677 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
681 } else if (adapter_index < HPI_MAX_ADAPTERS) {
682 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
683 HPI_ERROR_BAD_ADAPTER;
684 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
685 HPI_ERROR_INVALID_OBJ;
686 for (i = 0; i < HPI_MAX_STREAMS; i++) {
687 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
688 HPI_ERROR_INVALID_OBJ;
689 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
690 HPI_ERROR_INVALID_OBJ;
695 static u16 HPIMSGX__init(struct hpi_message *phm,
696 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
697 /* resource list or NULL=find all */
698 struct hpi_response *phr
699 /* response from HPI_ADAPTER_GET_INFO */
702 hpi_handler_func *entry_point_func;
703 struct hpi_response hr;
705 /* Init response here so we can pass in previous adapter list */
706 hpi_init_response(&hr, phm->object, phm->function,
707 HPI_ERROR_INVALID_OBJ);
710 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
712 if (entry_point_func) {
713 HPI_DEBUG_MESSAGE(DEBUG, phm);
714 entry_point_func(phm, &hr);
716 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
720 /* the adapter was created successfully
721 save the mapping for future use */
722 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
723 /* prepare adapter (pre-open streams etc.) */
725 "HPI_SUBSYS_CREATE_ADAPTER successful,"
726 " preparing adapter\n");
727 adapter_prepare(hr.u.s.adapter_index);
729 memcpy(phr, &hr, hr.size);
733 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
735 int i, adapter, adapter_limit;
740 if (adapter_index == HPIMSGX_ALLADAPTERS) {
742 adapter_limit = HPI_MAX_ADAPTERS;
744 adapter = adapter_index;
745 adapter_limit = adapter + 1;
748 for (; adapter < adapter_limit; adapter++) {
749 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
750 for (i = 0; i < HPI_MAX_STREAMS; i++) {
752 outstream_user_open[adapter][i].h_owner) {
753 struct hpi_message hm;
754 struct hpi_response hr;
757 "Close adapter %d ostream %d\n",
760 hpi_init_message_response(&hm, &hr,
761 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
762 hm.adapter_index = (u16)adapter;
763 hm.obj_index = (u16)i;
764 hw_entry_point(&hm, &hr);
766 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
767 hw_entry_point(&hm, &hr);
769 hm.function = HPI_OSTREAM_GROUP_RESET;
770 hw_entry_point(&hm, &hr);
772 outstream_user_open[adapter][i].open_flag = 0;
773 outstream_user_open[adapter][i].h_owner =
776 if (h_owner == instream_user_open[adapter][i].h_owner) {
777 struct hpi_message hm;
778 struct hpi_response hr;
781 "Close adapter %d istream %d\n",
784 hpi_init_message_response(&hm, &hr,
785 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
786 hm.adapter_index = (u16)adapter;
787 hm.obj_index = (u16)i;
788 hw_entry_point(&hm, &hr);
790 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
791 hw_entry_point(&hm, &hr);
793 hm.function = HPI_ISTREAM_GROUP_RESET;
794 hw_entry_point(&hm, &hr);
796 instream_user_open[adapter][i].open_flag = 0;
797 instream_user_open[adapter][i].h_owner = NULL;