2 * S/390 common I/O routines -- channel subsystem call
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/pci.h>
20 #include <asm/chpid.h>
26 #include "cio_debug.h"
31 static void *sei_page;
32 static void *chsc_page;
33 static DEFINE_SPINLOCK(chsc_page_lock);
36 * chsc_error_from_response() - convert a chsc response to an error
37 * @response: chsc response code
39 * Returns an appropriate Linux error code for @response.
41 int chsc_error_from_response(int response)
65 EXPORT_SYMBOL_GPL(chsc_error_from_response);
67 struct chsc_ssd_area {
68 struct chsc_header request;
72 u16 f_sch; /* first subchannel */
74 u16 l_sch; /* last subchannel */
76 struct chsc_header response;
80 u8 st : 3; /* subchannel type */
82 u8 unit_addr; /* unit address */
83 u16 devno; /* device number */
86 u16 sch; /* subchannel */
87 u8 chpid[8]; /* chpids 0-7 */
88 u16 fla[8]; /* full link addresses 0-7 */
89 } __attribute__ ((packed));
91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
93 struct chsc_ssd_area *ssd_area;
99 spin_lock_irq(&chsc_page_lock);
100 memset(chsc_page, 0, PAGE_SIZE);
101 ssd_area = chsc_page;
102 ssd_area->request.length = 0x0010;
103 ssd_area->request.code = 0x0004;
104 ssd_area->ssid = schid.ssid;
105 ssd_area->f_sch = schid.sch_no;
106 ssd_area->l_sch = schid.sch_no;
108 ccode = chsc(ssd_area);
109 /* Check response. */
111 ret = (ccode == 3) ? -ENODEV : -EBUSY;
114 ret = chsc_error_from_response(ssd_area->response.code);
116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
117 schid.ssid, schid.sch_no,
118 ssd_area->response.code);
121 if (!ssd_area->sch_valid) {
127 memset(ssd, 0, sizeof(struct chsc_ssd_info));
128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
129 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
131 ssd->path_mask = ssd_area->path_mask;
132 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
133 for (i = 0; i < 8; i++) {
135 if (ssd_area->path_mask & mask) {
136 chp_id_init(&ssd->chpid[i]);
137 ssd->chpid[i].id = ssd_area->chpid[i];
139 if (ssd_area->fla_valid_mask & mask)
140 ssd->fla[i] = ssd_area->fla[i];
143 spin_unlock_irq(&chsc_page_lock);
147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
149 spin_lock_irq(sch->lock);
150 if (sch->driver && sch->driver->chp_event)
151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
153 spin_unlock_irq(sch->lock);
158 spin_unlock_irq(sch->lock);
159 css_schedule_eval(sch->schid);
163 void chsc_chp_offline(struct chp_id chpid)
166 struct chp_link link;
168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
169 CIO_TRACE_EVENT(2, dbf_txt);
171 if (chp_get_status(chpid) <= 0)
173 memset(&link, 0, sizeof(struct chp_link));
175 /* Wait until previous actions have settled. */
176 css_wait_for_slow_path();
177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
184 * We don't know the device yet, but since a path
185 * may be available now to the device we'll have
186 * to do recognition again.
187 * Since we don't have any idea about which chpid
188 * that beast may be on we'll have to do a stsch
189 * on all devices, grr...
191 if (stsch_err(schid, &schib))
195 /* Put it on the slow path. */
196 css_schedule_eval(schid);
200 static int __s390_process_res_acc(struct subchannel *sch, void *data)
202 spin_lock_irq(sch->lock);
203 if (sch->driver && sch->driver->chp_event)
204 sch->driver->chp_event(sch, data, CHP_ONLINE);
205 spin_unlock_irq(sch->lock);
210 static void s390_process_res_acc(struct chp_link *link)
214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
216 CIO_TRACE_EVENT( 2, dbf_txt);
217 if (link->fla != 0) {
218 sprintf(dbf_txt, "fla%x", link->fla);
219 CIO_TRACE_EVENT( 2, dbf_txt);
221 /* Wait until previous actions have settled. */
222 css_wait_for_slow_path();
224 * I/O resources may have become accessible.
225 * Scan through all subchannels that may be concerned and
226 * do a validation on those.
227 * The more information we have (info), the less scanning
228 * will we have to do.
230 for_each_subchannel_staged(__s390_process_res_acc,
231 s390_process_res_acc_new_sch, link);
235 __get_chpid_from_lir(void *data)
241 /* incident-node descriptor */
243 /* attached-node descriptor */
245 /* incident-specific information */
247 } __attribute__ ((packed)) *lir;
251 /* NULL link incident record */
253 if (!(lir->indesc[0]&0xc0000000))
254 /* node descriptor not valid */
256 if (!(lir->indesc[0]&0x10000000))
257 /* don't handle device-type nodes - FIXME */
259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
261 return (u16) (lir->indesc[0]&0x000000ff);
264 struct chsc_sei_nt0_area {
266 u8 vf; /* validity flags */
267 u8 rs; /* reporting source */
268 u8 cc; /* content code */
269 u16 fla; /* full link address */
270 u16 rsid; /* reporting source id */
273 /* ccdf has to be big enough for a link-incident record */
274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
277 struct chsc_sei_nt2_area {
278 u8 flags; /* p and v bit */
281 u8 cc; /* content code */
283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
286 #define CHSC_SEI_NT0 0ULL
287 #define CHSC_SEI_NT2 (1ULL << 61)
290 struct chsc_header request;
292 u64 ntsm; /* notification type mask */
293 struct chsc_header response;
296 struct chsc_sei_nt0_area nt0_area;
297 struct chsc_sei_nt2_area nt2_area;
298 u8 nt_area[PAGE_SIZE - 24];
302 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
307 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
308 sei_area->rs, sei_area->rsid);
309 if (sei_area->rs != 4)
311 id = __get_chpid_from_lir(sei_area->ccdf);
313 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
317 chsc_chp_offline(chpid);
321 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
323 struct chp_link link;
327 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
328 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
329 if (sei_area->rs != 4)
332 chpid.id = sei_area->rsid;
333 /* allocate a new channel path structure, if needed */
334 status = chp_get_status(chpid);
339 memset(&link, 0, sizeof(struct chp_link));
341 if ((sei_area->vf & 0xc0) != 0) {
342 link.fla = sei_area->fla;
343 if ((sei_area->vf & 0xc0) == 0xc0)
344 /* full link address */
345 link.fla_mask = 0xffff;
348 link.fla_mask = 0xff00;
350 s390_process_res_acc(&link);
353 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
355 struct channel_path *chp;
360 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
361 if (sei_area->rs != 0)
363 data = sei_area->ccdf;
365 for (num = 0; num <= __MAX_CHPID; num++) {
366 if (!chp_test_bit(data, num))
370 CIO_CRW_EVENT(4, "Update information for channel path "
371 "%x.%02x\n", chpid.cssid, chpid.id);
372 chp = chpid_to_chp(chpid);
377 mutex_lock(&chp->lock);
378 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
379 mutex_unlock(&chp->lock);
383 struct chp_config_data {
389 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
391 struct chp_config_data *data;
394 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
396 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
397 if (sei_area->rs != 0)
399 data = (struct chp_config_data *) &(sei_area->ccdf);
401 for (num = 0; num <= __MAX_CHPID; num++) {
402 if (!chp_test_bit(data->map, num))
405 pr_notice("Processing %s for channel path %x.%02x\n",
406 events[data->op], chpid.cssid, chpid.id);
409 chp_cfg_schedule(chpid, 1);
412 chp_cfg_schedule(chpid, 0);
415 chp_cfg_cancel_deconfigure(chpid);
421 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
425 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
426 if (sei_area->rs != 7)
429 ret = scm_update_information();
431 CIO_CRW_EVENT(0, "chsc: updating change notification"
432 " failed (rc=%d).\n", ret);
435 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
438 switch (sei_area->cc) {
440 zpci_event_error(sei_area->ccdf);
443 zpci_event_availability(sei_area->ccdf);
446 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
453 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
455 /* which kind of information was stored? */
456 switch (sei_area->cc) {
457 case 1: /* link incident*/
458 chsc_process_sei_link_incident(sei_area);
460 case 2: /* i/o resource accessibility */
461 chsc_process_sei_res_acc(sei_area);
463 case 7: /* channel-path-availability information */
464 chsc_process_sei_chp_avail(sei_area);
466 case 8: /* channel-path-configuration notification */
467 chsc_process_sei_chp_config(sei_area);
469 case 12: /* scm change notification */
470 chsc_process_sei_scm_change(sei_area);
472 default: /* other stuff */
473 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
479 static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
482 memset(sei, 0, sizeof(*sei));
483 sei->request.length = 0x0010;
484 sei->request.code = 0x000e;
490 if (sei->response.code == 0x0001) {
491 CIO_CRW_EVENT(2, "chsc: sei successful\n");
493 /* Check if we might have lost some information. */
494 if (sei->u.nt0_area.flags & 0x40) {
495 CIO_CRW_EVENT(2, "chsc: event overflow\n");
496 css_schedule_eval_all();
501 chsc_process_sei_nt0(&sei->u.nt0_area);
504 chsc_process_sei_nt2(&sei->u.nt2_area);
507 CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
512 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
516 } while (sei->u.nt0_area.flags & 0x80);
521 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
523 struct chsc_sei *sei;
526 css_schedule_eval_all();
529 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
530 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
531 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
532 crw0->erc, crw0->rsid);
535 /* Access to sei_page is serialized through machine check handler
536 * thread, so no need for locking. */
539 CIO_TRACE_EVENT(2, "prcss");
542 * The ntsm does not allow to select NT0 and NT2 together. We need to
543 * first check for NT2, than additionally for NT0...
546 if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
548 __chsc_process_crw(sei, CHSC_SEI_NT0);
551 void chsc_chp_online(struct chp_id chpid)
554 struct chp_link link;
556 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
557 CIO_TRACE_EVENT(2, dbf_txt);
559 if (chp_get_status(chpid) != 0) {
560 memset(&link, 0, sizeof(struct chp_link));
562 /* Wait until previous actions have settled. */
563 css_wait_for_slow_path();
564 for_each_subchannel_staged(__s390_process_res_acc, NULL,
569 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
570 struct chp_id chpid, int on)
573 struct chp_link link;
575 memset(&link, 0, sizeof(struct chp_link));
577 spin_lock_irqsave(sch->lock, flags);
578 if (sch->driver && sch->driver->chp_event)
579 sch->driver->chp_event(sch, &link,
580 on ? CHP_VARY_ON : CHP_VARY_OFF);
581 spin_unlock_irqrestore(sch->lock, flags);
584 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
586 struct chp_id *chpid = data;
588 __s390_subchannel_vary_chpid(sch, *chpid, 0);
592 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
594 struct chp_id *chpid = data;
596 __s390_subchannel_vary_chpid(sch, *chpid, 1);
601 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
605 if (stsch_err(schid, &schib))
608 /* Put it on the slow path. */
609 css_schedule_eval(schid);
614 * chsc_chp_vary - propagate channel-path vary operation to subchannels
615 * @chpid: channl-path ID
616 * @on: non-zero for vary online, zero for vary offline
618 int chsc_chp_vary(struct chp_id chpid, int on)
620 struct channel_path *chp = chpid_to_chp(chpid);
622 /* Wait until previous actions have settled. */
623 css_wait_for_slow_path();
625 * Redo PathVerification on the devices the chpid connects to
628 /* Try to update the channel path descritor. */
629 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
630 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
631 __s390_vary_chpid_on, &chpid);
633 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
640 chsc_remove_cmg_attr(struct channel_subsystem *css)
644 for (i = 0; i <= __MAX_CHPID; i++) {
647 chp_remove_cmg_attr(css->chps[i]);
652 chsc_add_cmg_attr(struct channel_subsystem *css)
657 for (i = 0; i <= __MAX_CHPID; i++) {
660 ret = chp_add_cmg_attr(css->chps[i]);
666 for (--i; i >= 0; i--) {
669 chp_remove_cmg_attr(css->chps[i]);
674 int __chsc_do_secm(struct channel_subsystem *css, int enable)
677 struct chsc_header request;
678 u32 operation_code : 2;
687 struct chsc_header response;
692 } __attribute__ ((packed)) *secm_area;
695 spin_lock_irq(&chsc_page_lock);
696 memset(chsc_page, 0, PAGE_SIZE);
697 secm_area = chsc_page;
698 secm_area->request.length = 0x0050;
699 secm_area->request.code = 0x0016;
701 secm_area->key = PAGE_DEFAULT_KEY >> 4;
702 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
703 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
705 secm_area->operation_code = enable ? 0 : 1;
707 ccode = chsc(secm_area);
709 ret = (ccode == 3) ? -ENODEV : -EBUSY;
713 switch (secm_area->response.code) {
719 ret = chsc_error_from_response(secm_area->response.code);
722 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
723 secm_area->response.code);
725 spin_unlock_irq(&chsc_page_lock);
730 chsc_secm(struct channel_subsystem *css, int enable)
734 if (enable && !css->cm_enabled) {
735 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
736 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
737 if (!css->cub_addr1 || !css->cub_addr2) {
738 free_page((unsigned long)css->cub_addr1);
739 free_page((unsigned long)css->cub_addr2);
743 ret = __chsc_do_secm(css, enable);
745 css->cm_enabled = enable;
746 if (css->cm_enabled) {
747 ret = chsc_add_cmg_attr(css);
749 __chsc_do_secm(css, 0);
753 chsc_remove_cmg_attr(css);
755 if (!css->cm_enabled) {
756 free_page((unsigned long)css->cub_addr1);
757 free_page((unsigned long)css->cub_addr2);
762 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
763 int c, int m, void *page)
765 struct chsc_scpd *scpd_area;
768 if ((rfmt == 1) && !css_general_characteristics.fcs)
770 if ((rfmt == 2) && !css_general_characteristics.cib)
773 memset(page, 0, PAGE_SIZE);
775 scpd_area->request.length = 0x0010;
776 scpd_area->request.code = 0x0002;
777 scpd_area->cssid = chpid.cssid;
778 scpd_area->first_chpid = chpid.id;
779 scpd_area->last_chpid = chpid.id;
782 scpd_area->fmt = fmt;
783 scpd_area->rfmt = rfmt;
785 ccode = chsc(scpd_area);
787 return (ccode == 3) ? -ENODEV : -EBUSY;
789 ret = chsc_error_from_response(scpd_area->response.code);
791 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
792 scpd_area->response.code);
795 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
797 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
798 struct channel_path_desc *desc)
800 struct chsc_response_struct *chsc_resp;
801 struct chsc_scpd *scpd_area;
805 spin_lock_irqsave(&chsc_page_lock, flags);
806 scpd_area = chsc_page;
807 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
810 chsc_resp = (void *)&scpd_area->response;
811 memcpy(desc, &chsc_resp->data, sizeof(*desc));
813 spin_unlock_irqrestore(&chsc_page_lock, flags);
817 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
818 struct channel_path_desc_fmt1 *desc)
820 struct chsc_response_struct *chsc_resp;
821 struct chsc_scpd *scpd_area;
824 spin_lock_irq(&chsc_page_lock);
825 scpd_area = chsc_page;
826 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
829 chsc_resp = (void *)&scpd_area->response;
830 memcpy(desc, &chsc_resp->data, sizeof(*desc));
832 spin_unlock_irq(&chsc_page_lock);
837 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
838 struct cmg_chars *chars)
840 struct cmg_chars *cmg_chars;
843 cmg_chars = chp->cmg_chars;
844 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
845 mask = 0x80 >> (i + 3);
847 cmg_chars->values[i] = chars->values[i];
849 cmg_chars->values[i] = 0;
853 int chsc_get_channel_measurement_chars(struct channel_path *chp)
855 struct cmg_chars *cmg_chars;
859 struct chsc_header request;
865 struct chsc_header response;
876 u32 data[NR_MEASUREMENT_CHARS];
877 } __attribute__ ((packed)) *scmc_area;
879 chp->cmg_chars = NULL;
880 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
884 spin_lock_irq(&chsc_page_lock);
885 memset(chsc_page, 0, PAGE_SIZE);
886 scmc_area = chsc_page;
887 scmc_area->request.length = 0x0010;
888 scmc_area->request.code = 0x0022;
889 scmc_area->first_chpid = chp->chpid.id;
890 scmc_area->last_chpid = chp->chpid.id;
892 ccode = chsc(scmc_area);
894 ret = (ccode == 3) ? -ENODEV : -EBUSY;
898 ret = chsc_error_from_response(scmc_area->response.code);
900 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
901 scmc_area->response.code);
904 if (scmc_area->not_valid) {
909 chp->cmg = scmc_area->cmg;
910 chp->shared = scmc_area->shared;
911 if (chp->cmg != 2 && chp->cmg != 3) {
912 /* No cmg-dependent data. */
915 chp->cmg_chars = cmg_chars;
916 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
917 (struct cmg_chars *) &scmc_area->data);
919 spin_unlock_irq(&chsc_page_lock);
926 int __init chsc_init(void)
930 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
931 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
932 if (!sei_page || !chsc_page) {
936 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
941 free_page((unsigned long)chsc_page);
942 free_page((unsigned long)sei_page);
946 void __init chsc_init_cleanup(void)
948 crw_unregister_handler(CRW_RSC_CSS);
949 free_page((unsigned long)chsc_page);
950 free_page((unsigned long)sei_page);
953 int chsc_enable_facility(int operation_code)
958 struct chsc_header request;
965 u32 operation_data_area[252];
966 struct chsc_header response;
970 } __attribute__ ((packed)) *sda_area;
972 spin_lock_irqsave(&chsc_page_lock, flags);
973 memset(chsc_page, 0, PAGE_SIZE);
974 sda_area = chsc_page;
975 sda_area->request.length = 0x0400;
976 sda_area->request.code = 0x0031;
977 sda_area->operation_code = operation_code;
979 ret = chsc(sda_area);
981 ret = (ret == 3) ? -ENODEV : -EBUSY;
985 switch (sda_area->response.code) {
990 ret = chsc_error_from_response(sda_area->response.code);
993 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
994 operation_code, sda_area->response.code);
996 spin_unlock_irqrestore(&chsc_page_lock, flags);
1000 struct css_general_char css_general_characteristics;
1001 struct css_chsc_char css_chsc_characteristics;
1004 chsc_determine_css_characteristics(void)
1008 struct chsc_header request;
1012 struct chsc_header response;
1014 u32 general_char[510];
1016 } __attribute__ ((packed)) *scsc_area;
1018 spin_lock_irq(&chsc_page_lock);
1019 memset(chsc_page, 0, PAGE_SIZE);
1020 scsc_area = chsc_page;
1021 scsc_area->request.length = 0x0010;
1022 scsc_area->request.code = 0x0010;
1024 result = chsc(scsc_area);
1026 result = (result == 3) ? -ENODEV : -EBUSY;
1030 result = chsc_error_from_response(scsc_area->response.code);
1032 memcpy(&css_general_characteristics, scsc_area->general_char,
1033 sizeof(css_general_characteristics));
1034 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1035 sizeof(css_chsc_characteristics));
1037 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1038 scsc_area->response.code);
1040 spin_unlock_irq(&chsc_page_lock);
1044 EXPORT_SYMBOL_GPL(css_general_characteristics);
1045 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1047 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
1050 struct chsc_header request;
1052 unsigned int op : 8;
1053 unsigned int rsvd1 : 8;
1054 unsigned int ctrl : 16;
1055 unsigned int rsvd2[5];
1056 struct chsc_header response;
1057 unsigned int rsvd3[7];
1058 } __attribute__ ((packed)) *rr;
1061 memset(page, 0, PAGE_SIZE);
1063 rr->request.length = 0x0020;
1064 rr->request.code = 0x0033;
1070 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1074 int chsc_sstpi(void *page, void *result, size_t size)
1077 struct chsc_header request;
1078 unsigned int rsvd0[3];
1079 struct chsc_header response;
1081 } __attribute__ ((packed)) *rr;
1084 memset(page, 0, PAGE_SIZE);
1086 rr->request.length = 0x0010;
1087 rr->request.code = 0x0038;
1091 memcpy(result, &rr->data, size);
1092 return (rr->response.code == 0x0001) ? 0 : -EIO;
1095 int chsc_siosl(struct subchannel_id schid)
1098 struct chsc_header request;
1100 struct subchannel_id sid;
1102 struct chsc_header response;
1104 } __attribute__ ((packed)) *siosl_area;
1105 unsigned long flags;
1109 spin_lock_irqsave(&chsc_page_lock, flags);
1110 memset(chsc_page, 0, PAGE_SIZE);
1111 siosl_area = chsc_page;
1112 siosl_area->request.length = 0x0010;
1113 siosl_area->request.code = 0x0046;
1114 siosl_area->word1 = 0x80000000;
1115 siosl_area->sid = schid;
1117 ccode = chsc(siosl_area);
1123 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1124 schid.ssid, schid.sch_no, ccode);
1127 rc = chsc_error_from_response(siosl_area->response.code);
1129 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1130 schid.ssid, schid.sch_no,
1131 siosl_area->response.code);
1133 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1134 schid.ssid, schid.sch_no);
1136 spin_unlock_irqrestore(&chsc_page_lock, flags);
1139 EXPORT_SYMBOL_GPL(chsc_siosl);
1142 * chsc_scm_info() - store SCM information (SSI)
1143 * @scm_area: request and response block for SSI
1144 * @token: continuation token
1146 * Returns 0 on success.
1148 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1152 memset(scm_area, 0, sizeof(*scm_area));
1153 scm_area->request.length = 0x0020;
1154 scm_area->request.code = 0x004C;
1155 scm_area->reqtok = token;
1157 ccode = chsc(scm_area);
1159 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1162 ret = chsc_error_from_response(scm_area->response.code);
1164 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1165 scm_area->response.code);
1169 EXPORT_SYMBOL_GPL(chsc_scm_info);