1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Caching
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpi_version.h"
26 #include "hpimsginit.h"
31 static struct pci_device_id asihpi_pci_tbl[] = {
35 static struct hpios_spinlock msgx_lock;
37 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
39 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
45 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
46 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
47 && asihpi_pci_tbl[i].vendor !=
48 pci_info->pci_dev->vendor)
50 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
51 && asihpi_pci_tbl[i].device !=
52 pci_info->pci_dev->device)
54 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
55 && asihpi_pci_tbl[i].subvendor !=
56 pci_info->pci_dev->subsystem_vendor)
58 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
59 && asihpi_pci_tbl[i].subdevice !=
60 pci_info->pci_dev->subsystem_device)
63 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
64 asihpi_pci_tbl[i].driver_data); */
65 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
71 static inline void hw_entry_point(struct hpi_message *phm,
72 struct hpi_response *phr)
74 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
75 && hpi_entry_points[phm->adapter_index])
76 hpi_entry_points[phm->adapter_index] (phm, phr);
78 hpi_init_response(phr, phm->object, phm->function,
79 HPI_ERROR_PROCESSING_MESSAGE);
82 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
83 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
85 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
86 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
88 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
90 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
92 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
94 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
97 static void HPIMSGX__reset(u16 adapter_index);
99 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
100 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
102 #ifndef DISABLE_PRAGMA_PACK1
103 #pragma pack(push, 1)
106 struct hpi_subsys_response {
107 struct hpi_response_header h;
108 struct hpi_subsys_res s;
111 struct hpi_adapter_response {
112 struct hpi_response_header h;
113 struct hpi_adapter_res a;
116 struct hpi_mixer_response {
117 struct hpi_response_header h;
118 struct hpi_mixer_res m;
121 struct hpi_stream_response {
122 struct hpi_response_header h;
123 struct hpi_stream_res d;
126 struct adapter_info {
132 struct asi_open_state {
137 #ifndef DISABLE_PRAGMA_PACK1
142 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
144 static struct hpi_stream_response
145 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
147 static struct hpi_stream_response
148 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
152 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
154 /* use these to keep track of opens from user mode apps/DLLs */
155 static struct asi_open_state
156 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
158 static struct asi_open_state
159 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
161 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
164 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
165 HPI_DEBUG_LOG(WARNING,
166 "suspicious adapter index %d in subsys message 0x%x.\n",
167 phm->adapter_index, phm->function);
169 switch (phm->function) {
170 case HPI_SUBSYS_GET_VERSION:
171 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
172 HPI_SUBSYS_GET_VERSION, 0);
173 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
174 phr->u.s.data = HPI_VER; /* return major.minor.release */
176 case HPI_SUBSYS_OPEN:
177 /*do not propagate the message down the chain */
178 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
180 case HPI_SUBSYS_CLOSE:
181 /*do not propagate the message down the chain */
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
184 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
186 case HPI_SUBSYS_DRIVER_LOAD:
187 /* Initialize this module's internal state */
188 hpios_msgxlock_init(&msgx_lock);
189 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
190 /* Init subsys_findadapters response to no-adapters */
191 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
192 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
193 HPI_SUBSYS_DRIVER_LOAD, 0);
194 /* individual HPIs dont implement driver load */
195 HPI_COMMON(phm, phr);
197 case HPI_SUBSYS_DRIVER_UNLOAD:
198 HPI_COMMON(phm, phr);
199 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
200 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
201 HPI_SUBSYS_DRIVER_UNLOAD, 0);
204 case HPI_SUBSYS_GET_NUM_ADAPTERS:
205 case HPI_SUBSYS_GET_ADAPTER:
206 HPI_COMMON(phm, phr);
209 case HPI_SUBSYS_CREATE_ADAPTER:
210 HPIMSGX__init(phm, phr);
214 /* Must explicitly handle every subsys message in this switch */
215 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
216 HPI_ERROR_INVALID_FUNC);
221 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
224 switch (phm->function) {
225 case HPI_ADAPTER_OPEN:
226 adapter_open(phm, phr);
228 case HPI_ADAPTER_CLOSE:
229 adapter_close(phm, phr);
231 case HPI_ADAPTER_DELETE:
232 HPIMSGX__cleanup(phm->adapter_index, h_owner);
234 struct hpi_message hm;
235 struct hpi_response hr;
236 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
238 hm.adapter_index = phm->adapter_index;
239 hw_entry_point(&hm, &hr);
241 hw_entry_point(phm, phr);
245 hw_entry_point(phm, phr);
250 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
252 switch (phm->function) {
254 mixer_open(phm, phr);
256 case HPI_MIXER_CLOSE:
257 mixer_close(phm, phr);
260 hw_entry_point(phm, phr);
265 static void outstream_message(struct hpi_message *phm,
266 struct hpi_response *phr, void *h_owner)
268 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
269 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
270 HPI_ERROR_INVALID_OBJ_INDEX);
274 switch (phm->function) {
275 case HPI_OSTREAM_OPEN:
276 outstream_open(phm, phr, h_owner);
278 case HPI_OSTREAM_CLOSE:
279 outstream_close(phm, phr, h_owner);
282 hw_entry_point(phm, phr);
287 static void instream_message(struct hpi_message *phm,
288 struct hpi_response *phr, void *h_owner)
290 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
291 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
292 HPI_ERROR_INVALID_OBJ_INDEX);
296 switch (phm->function) {
297 case HPI_ISTREAM_OPEN:
298 instream_open(phm, phr, h_owner);
300 case HPI_ISTREAM_CLOSE:
301 instream_close(phm, phr, h_owner);
304 hw_entry_point(phm, phr);
309 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
310 * HPI_MessageEx so that functions in hpifunc.c compile.
312 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
315 HPI_DEBUG_MESSAGE(DEBUG, phm);
317 if (phm->type != HPI_TYPE_REQUEST) {
318 hpi_init_response(phr, phm->object, phm->function,
319 HPI_ERROR_INVALID_TYPE);
323 if (phm->adapter_index >= HPI_MAX_ADAPTERS
324 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
325 hpi_init_response(phr, phm->object, phm->function,
326 HPI_ERROR_BAD_ADAPTER_NUMBER);
330 switch (phm->object) {
331 case HPI_OBJ_SUBSYSTEM:
332 subsys_message(phm, phr, h_owner);
335 case HPI_OBJ_ADAPTER:
336 adapter_message(phm, phr, h_owner);
340 mixer_message(phm, phr);
343 case HPI_OBJ_OSTREAM:
344 outstream_message(phm, phr, h_owner);
347 case HPI_OBJ_ISTREAM:
348 instream_message(phm, phr, h_owner);
352 hw_entry_point(phm, phr);
355 HPI_DEBUG_RESPONSE(phr);
359 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
361 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
362 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
363 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
366 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
368 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
369 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
372 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
374 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
375 sizeof(rESP_HPI_MIXER_OPEN[0]));
378 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
380 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
383 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
387 struct hpi_message hm;
388 struct hpi_response hr;
390 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
392 hpios_msgxlock_lock(&msgx_lock);
394 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
395 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
396 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
397 [phm->obj_index].h.error)
399 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
401 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
403 instream_user_open[phm->adapter_index][phm->
404 obj_index].open_flag = 1;
405 hpios_msgxlock_unlock(&msgx_lock);
408 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
410 hm.adapter_index = phm->adapter_index;
411 hm.obj_index = phm->obj_index;
412 hw_entry_point(&hm, &hr);
414 hpios_msgxlock_lock(&msgx_lock);
416 instream_user_open[phm->adapter_index][phm->
417 obj_index].open_flag = 0;
418 phr->error = hr.error;
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].open_flag = 1;
422 instream_user_open[phm->adapter_index][phm->
423 obj_index].h_owner = h_owner;
425 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
427 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
430 hpios_msgxlock_unlock(&msgx_lock);
433 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
437 struct hpi_message hm;
438 struct hpi_response hr;
440 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
442 hpios_msgxlock_lock(&msgx_lock);
444 instream_user_open[phm->adapter_index][phm->
445 obj_index].h_owner) {
446 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
447 "instream %d owned by %p\n",
448 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
449 instream_user_open[phm->adapter_index][phm->
450 obj_index].h_owner = NULL;
451 hpios_msgxlock_unlock(&msgx_lock);
453 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
455 hm.adapter_index = phm->adapter_index;
456 hm.obj_index = phm->obj_index;
457 hw_entry_point(&hm, &hr);
458 hpios_msgxlock_lock(&msgx_lock);
460 instream_user_open[phm->adapter_index][phm->
461 obj_index].h_owner = h_owner;
462 phr->error = hr.error;
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].open_flag = 0;
466 instream_user_open[phm->adapter_index][phm->
467 obj_index].h_owner = NULL;
470 HPI_DEBUG_LOG(WARNING,
471 "%p trying to close %d instream %d owned by %p\n",
472 h_owner, phm->adapter_index, phm->obj_index,
473 instream_user_open[phm->adapter_index][phm->
475 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
477 hpios_msgxlock_unlock(&msgx_lock);
480 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
484 struct hpi_message hm;
485 struct hpi_response hr;
487 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
489 hpios_msgxlock_lock(&msgx_lock);
491 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
492 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
493 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
494 [phm->obj_index].h.error)
496 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
498 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
500 outstream_user_open[phm->adapter_index][phm->
501 obj_index].open_flag = 1;
502 hpios_msgxlock_unlock(&msgx_lock);
505 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
507 hm.adapter_index = phm->adapter_index;
508 hm.obj_index = phm->obj_index;
509 hw_entry_point(&hm, &hr);
511 hpios_msgxlock_lock(&msgx_lock);
513 outstream_user_open[phm->adapter_index][phm->
514 obj_index].open_flag = 0;
515 phr->error = hr.error;
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].open_flag = 1;
519 outstream_user_open[phm->adapter_index][phm->
520 obj_index].h_owner = h_owner;
522 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
524 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
527 hpios_msgxlock_unlock(&msgx_lock);
530 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
534 struct hpi_message hm;
535 struct hpi_response hr;
537 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
539 hpios_msgxlock_lock(&msgx_lock);
542 outstream_user_open[phm->adapter_index][phm->
543 obj_index].h_owner) {
544 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
545 "outstream %d owned by %p\n",
546 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
547 outstream_user_open[phm->adapter_index][phm->
548 obj_index].h_owner = NULL;
549 hpios_msgxlock_unlock(&msgx_lock);
551 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
553 hm.adapter_index = phm->adapter_index;
554 hm.obj_index = phm->obj_index;
555 hw_entry_point(&hm, &hr);
556 hpios_msgxlock_lock(&msgx_lock);
558 outstream_user_open[phm->adapter_index][phm->
559 obj_index].h_owner = h_owner;
560 phr->error = hr.error;
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].open_flag = 0;
564 outstream_user_open[phm->adapter_index][phm->
565 obj_index].h_owner = NULL;
568 HPI_DEBUG_LOG(WARNING,
569 "%p trying to close %d outstream %d owned by %p\n",
570 h_owner, phm->adapter_index, phm->obj_index,
571 outstream_user_open[phm->adapter_index][phm->
573 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
575 hpios_msgxlock_unlock(&msgx_lock);
578 static u16 adapter_prepare(u16 adapter)
580 struct hpi_message hm;
581 struct hpi_response hr;
583 /* Open the adapter and streams */
586 /* call to HPI_ADAPTER_OPEN */
587 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
589 hm.adapter_index = adapter;
590 hw_entry_point(&hm, &hr);
591 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
592 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
596 /* call to HPI_ADAPTER_GET_INFO */
597 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
598 HPI_ADAPTER_GET_INFO);
599 hm.adapter_index = adapter;
600 hw_entry_point(&hm, &hr);
604 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
605 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
606 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
608 /* call to HPI_OSTREAM_OPEN */
609 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
610 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
612 hm.adapter_index = adapter;
614 hw_entry_point(&hm, &hr);
615 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
616 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
617 outstream_user_open[adapter][i].open_flag = 0;
618 outstream_user_open[adapter][i].h_owner = NULL;
621 /* call to HPI_ISTREAM_OPEN */
622 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
623 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
625 hm.adapter_index = adapter;
627 hw_entry_point(&hm, &hr);
628 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
629 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
630 instream_user_open[adapter][i].open_flag = 0;
631 instream_user_open[adapter][i].h_owner = NULL;
634 /* call to HPI_MIXER_OPEN */
635 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
636 hm.adapter_index = adapter;
637 hw_entry_point(&hm, &hr);
638 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
639 sizeof(rESP_HPI_MIXER_OPEN[0]));
644 static void HPIMSGX__reset(u16 adapter_index)
648 struct hpi_response hr;
650 if (adapter_index == HPIMSGX_ALLADAPTERS) {
651 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
653 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
654 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
655 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
656 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
658 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
659 HPI_ERROR_INVALID_OBJ);
660 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
661 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
663 for (i = 0; i < HPI_MAX_STREAMS; i++) {
664 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
666 HPI_ERROR_INVALID_OBJ);
667 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
669 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
671 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
673 HPI_ERROR_INVALID_OBJ);
674 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
676 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
680 } else if (adapter_index < HPI_MAX_ADAPTERS) {
681 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
682 HPI_ERROR_BAD_ADAPTER;
683 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
684 HPI_ERROR_INVALID_OBJ;
685 for (i = 0; i < HPI_MAX_STREAMS; i++) {
686 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
688 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
689 HPI_ERROR_INVALID_OBJ;
694 static u16 HPIMSGX__init(struct hpi_message *phm,
695 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
696 /* resource list or NULL=find all */
697 struct hpi_response *phr
698 /* response from HPI_ADAPTER_GET_INFO */
701 hpi_handler_func *entry_point_func;
702 struct hpi_response hr;
704 /* Init response here so we can pass in previous adapter list */
705 hpi_init_response(&hr, phm->object, phm->function,
706 HPI_ERROR_INVALID_OBJ);
709 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
711 if (entry_point_func) {
712 HPI_DEBUG_MESSAGE(DEBUG, phm);
713 entry_point_func(phm, &hr);
715 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
719 /* the adapter was created successfully
720 save the mapping for future use */
721 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
722 /* prepare adapter (pre-open streams etc.) */
724 "HPI_SUBSYS_CREATE_ADAPTER successful,"
725 " preparing adapter\n");
726 adapter_prepare(hr.u.s.adapter_index);
728 memcpy(phr, &hr, hr.size);
732 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
734 int i, adapter, adapter_limit;
739 if (adapter_index == HPIMSGX_ALLADAPTERS) {
741 adapter_limit = HPI_MAX_ADAPTERS;
743 adapter = adapter_index;
744 adapter_limit = adapter + 1;
747 for (; adapter < adapter_limit; adapter++) {
748 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
749 for (i = 0; i < HPI_MAX_STREAMS; i++) {
751 outstream_user_open[adapter][i].h_owner) {
752 struct hpi_message hm;
753 struct hpi_response hr;
756 "Close adapter %d ostream %d\n",
759 hpi_init_message_response(&hm, &hr,
760 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
761 hm.adapter_index = (u16)adapter;
762 hm.obj_index = (u16)i;
763 hw_entry_point(&hm, &hr);
765 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
766 hw_entry_point(&hm, &hr);
768 hm.function = HPI_OSTREAM_GROUP_RESET;
769 hw_entry_point(&hm, &hr);
771 outstream_user_open[adapter][i].open_flag = 0;
772 outstream_user_open[adapter][i].h_owner =
775 if (h_owner == instream_user_open[adapter][i].h_owner) {
776 struct hpi_message hm;
777 struct hpi_response hr;
780 "Close adapter %d istream %d\n",
783 hpi_init_message_response(&hm, &hr,
784 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
785 hm.adapter_index = (u16)adapter;
786 hm.obj_index = (u16)i;
787 hw_entry_point(&hm, &hr);
789 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
790 hw_entry_point(&hm, &hr);
792 hm.function = HPI_ISTREAM_GROUP_RESET;
793 hw_entry_point(&hm, &hr);
795 instream_user_open[adapter][i].open_flag = 0;
796 instream_user_open[adapter][i].h_owner = NULL;