2 * ms_block.c - Sony MemoryStick (legacy) storage support
4 * Copyright (C) 2012 Maxim Levitsky <maximlevitsky@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Minor portions of the driver were copied from mspro_block.c which is
11 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
14 #define DRIVER_NAME "ms_block"
15 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/blkdev.h>
19 #include <linux/memstick.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/random.h>
25 #include <linux/bitmap.h>
26 #include <linux/scatterlist.h>
27 #include <linux/jiffies.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
33 static int cache_flush_timeout = 1000;
34 static bool verify_writes;
37 * Copies section of 'sg_from' starting from offset 'offset' and with length
38 * 'len' To another scatterlist of to_nents enties
40 static size_t msb_sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to,
41 int to_nents, size_t offset, size_t len)
46 if (offset >= sg_from->length) {
47 if (sg_is_last(sg_from))
50 offset -= sg_from->length;
51 sg_from = sg_next(sg_from);
55 copied = min(len, sg_from->length - offset);
56 sg_set_page(sg_to, sg_page(sg_from),
57 copied, sg_from->offset + offset);
62 if (sg_is_last(sg_from) || !len)
65 sg_to = sg_next(sg_to);
67 sg_from = sg_next(sg_from);
70 while (len > sg_from->length && to_nents--) {
71 len -= sg_from->length;
72 copied += sg_from->length;
74 sg_set_page(sg_to, sg_page(sg_from),
75 sg_from->length, sg_from->offset);
77 if (sg_is_last(sg_from) || !len)
80 sg_from = sg_next(sg_from);
81 sg_to = sg_next(sg_to);
84 if (len && to_nents) {
85 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
95 * to linear buffer of length 'len' at address 'buffer'
96 * Returns 0 if equal and -1 otherwice
98 static int msb_sg_compare_to_buffer(struct scatterlist *sg,
99 size_t offset, u8 *buffer, size_t len)
101 int retval = 0, cmplen;
102 struct sg_mapping_iter miter;
104 sg_miter_start(&miter, sg, sg_nents(sg),
105 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
107 while (sg_miter_next(&miter) && len > 0) {
108 if (offset >= miter.length) {
109 offset -= miter.length;
113 cmplen = min(miter.length - offset, len);
114 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
126 sg_miter_stop(&miter);
131 /* Get zone at which block with logical address 'lba' lives
132 * Flash is broken into zones.
133 * Each zone consists of 512 eraseblocks, out of which in first
134 * zone 494 are used and 496 are for all following zones.
135 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
137 static int msb_get_zone_from_lba(int lba)
141 return ((lba - 494) / 496) + 1;
144 /* Get zone of physical block. Trivial */
145 static int msb_get_zone_from_pba(int pba)
147 return pba / MS_BLOCKS_IN_ZONE;
150 /* Debug test to validate free block counts */
152 static int msb_validate_used_block_bitmap(struct msb_data *msb)
154 int total_free_blocks = 0;
157 for (i = 0 ; i < msb->zone_count ; i++)
158 total_free_blocks += msb->free_block_count[i];
160 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
161 msb->block_count) == total_free_blocks)
164 pr_err("BUG: free block counts don't match the bitmap");
165 msb->read_only = true;
170 /* Mark physical block as used */
171 static void msb_mark_block_used(struct msb_data *msb, int pba)
173 int zone = msb_get_zone_from_pba(pba);
175 if (test_bit(pba, msb->used_blocks_bitmap)) {
177 "BUG: attempt to mark already used pba %d as used", pba);
178 msb->read_only = true;
183 if (msb_validate_used_block_bitmap(msb))
186 /* No races because all IO is single threaded */
187 __set_bit(pba, msb->used_blocks_bitmap);
188 msb->free_block_count[zone]--;
191 /* Mark physical block as free */
192 static void msb_mark_block_unused(struct msb_data *msb, int pba)
194 int zone = msb_get_zone_from_pba(pba);
196 if (!test_bit(pba, msb->used_blocks_bitmap)) {
197 pr_err("BUG: attempt to mark "
198 "already unused pba %d as unused" , pba);
199 msb->read_only = true;
204 if (msb_validate_used_block_bitmap(msb))
207 /* No races because all IO is single threaded */
208 __clear_bit(pba, msb->used_blocks_bitmap);
209 msb->free_block_count[zone]++;
212 /* Invalidate current register window */
213 static void msb_invalidate_reg_window(struct msb_data *msb)
215 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
216 msb->reg_addr.w_length = sizeof(struct ms_id_register);
217 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
218 msb->reg_addr.r_length = sizeof(struct ms_id_register);
219 msb->addr_valid = false;
222 /* Start a state machine */
223 static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
224 (struct memstick_dev *card, struct memstick_request **req))
226 struct memstick_dev *card = msb->card;
228 WARN_ON(msb->state != -1);
229 msb->int_polling = false;
233 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
235 card->next_request = state_func;
236 memstick_new_req(card->host);
237 wait_for_completion(&card->mrq_complete);
239 WARN_ON(msb->state != -1);
240 return msb->exit_error;
243 /* State machines call that to exit */
244 static int msb_exit_state_machine(struct msb_data *msb, int error)
246 WARN_ON(msb->state == -1);
249 msb->exit_error = error;
250 msb->card->next_request = h_msb_default_bad;
252 /* Invalidate reg window on errors */
254 msb_invalidate_reg_window(msb);
256 complete(&msb->card->mrq_complete);
260 /* read INT register */
261 static int msb_read_int_reg(struct msb_data *msb, long timeout)
263 struct memstick_request *mrq = &msb->card->current_mrq;
265 WARN_ON(msb->state == -1);
267 if (!msb->int_polling) {
268 msb->int_timeout = jiffies +
269 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
270 msb->int_polling = true;
271 } else if (time_after(jiffies, msb->int_timeout)) {
272 mrq->data[0] = MEMSTICK_INT_CMDNAK;
276 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
277 mrq->need_card_int && !mrq->error) {
278 mrq->data[0] = mrq->int_reg;
279 mrq->need_card_int = false;
282 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
287 /* Read a register */
288 static int msb_read_regs(struct msb_data *msb, int offset, int len)
290 struct memstick_request *req = &msb->card->current_mrq;
292 if (msb->reg_addr.r_offset != offset ||
293 msb->reg_addr.r_length != len || !msb->addr_valid) {
295 msb->reg_addr.r_offset = offset;
296 msb->reg_addr.r_length = len;
297 msb->addr_valid = true;
299 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
300 &msb->reg_addr, sizeof(msb->reg_addr));
304 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
308 /* Write a card register */
309 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
311 struct memstick_request *req = &msb->card->current_mrq;
313 if (msb->reg_addr.w_offset != offset ||
314 msb->reg_addr.w_length != len || !msb->addr_valid) {
316 msb->reg_addr.w_offset = offset;
317 msb->reg_addr.w_length = len;
318 msb->addr_valid = true;
320 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
321 &msb->reg_addr, sizeof(msb->reg_addr));
325 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
329 /* Handler for absence of IO */
330 static int h_msb_default_bad(struct memstick_dev *card,
331 struct memstick_request **mrq)
337 * This function is a handler for reads of one page from device.
338 * Writes output to msb->current_sg, takes sector address from msb->reg.param
339 * Can also be used to read extra data only. Set params accordintly.
341 static int h_msb_read_page(struct memstick_dev *card,
342 struct memstick_request **out_mrq)
344 struct msb_data *msb = memstick_get_drvdata(card);
345 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
346 struct scatterlist sg[2];
350 dbg("read_page, unknown error");
351 return msb_exit_state_machine(msb, mrq->error);
354 switch (msb->state) {
355 case MSB_RP_SEND_BLOCK_ADDRESS:
356 /* msb_write_regs sometimes "fails" because it needs to update
357 the reg window, and thus it returns request for that.
358 Then we stay in this state and retry */
359 if (!msb_write_regs(msb,
360 offsetof(struct ms_register, param),
361 sizeof(struct ms_param_register),
362 (unsigned char *)&msb->regs.param))
365 msb->state = MSB_RP_SEND_READ_COMMAND;
368 case MSB_RP_SEND_READ_COMMAND:
369 command = MS_CMD_BLOCK_READ;
370 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
371 msb->state = MSB_RP_SEND_INT_REQ;
374 case MSB_RP_SEND_INT_REQ:
375 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
376 /* If dont actually need to send the int read request (only in
377 serial mode), then just fall through */
378 if (msb_read_int_reg(msb, -1))
382 case MSB_RP_RECEIVE_INT_REQ_RESULT:
383 intreg = mrq->data[0];
384 msb->regs.status.interrupt = intreg;
386 if (intreg & MEMSTICK_INT_CMDNAK)
387 return msb_exit_state_machine(msb, -EIO);
389 if (!(intreg & MEMSTICK_INT_CED)) {
390 msb->state = MSB_RP_SEND_INT_REQ;
394 msb->int_polling = false;
395 msb->state = (intreg & MEMSTICK_INT_ERR) ?
396 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
399 case MSB_RP_SEND_READ_STATUS_REG:
400 /* read the status register to understand source of the INT_ERR */
401 if (!msb_read_regs(msb,
402 offsetof(struct ms_register, status),
403 sizeof(struct ms_status_register)))
406 msb->state = MSB_RP_RECEIVE_OOB_READ;
409 case MSB_RP_RECIVE_STATUS_REG:
410 msb->regs.status = *(struct ms_status_register *)mrq->data;
411 msb->state = MSB_RP_SEND_OOB_READ;
414 case MSB_RP_SEND_OOB_READ:
415 if (!msb_read_regs(msb,
416 offsetof(struct ms_register, extra_data),
417 sizeof(struct ms_extra_data_register)))
420 msb->state = MSB_RP_RECEIVE_OOB_READ;
423 case MSB_RP_RECEIVE_OOB_READ:
424 msb->regs.extra_data =
425 *(struct ms_extra_data_register *) mrq->data;
426 msb->state = MSB_RP_SEND_READ_DATA;
429 case MSB_RP_SEND_READ_DATA:
430 /* Skip that state if we only read the oob */
431 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
432 msb->state = MSB_RP_RECEIVE_READ_DATA;
436 sg_init_table(sg, ARRAY_SIZE(sg));
437 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
438 msb->current_sg_offset,
441 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
442 msb->state = MSB_RP_RECEIVE_READ_DATA;
445 case MSB_RP_RECEIVE_READ_DATA:
446 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
447 msb->current_sg_offset += msb->page_size;
448 return msb_exit_state_machine(msb, 0);
451 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
452 dbg("read_page: uncorrectable error");
453 return msb_exit_state_machine(msb, -EBADMSG);
456 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
457 dbg("read_page: correctable error");
458 msb->current_sg_offset += msb->page_size;
459 return msb_exit_state_machine(msb, -EUCLEAN);
461 dbg("read_page: INT error, but no status error bits");
462 return msb_exit_state_machine(msb, -EIO);
470 * Handler of writes of exactly one block.
471 * Takes address from msb->regs.param.
472 * Writes same extra data to blocks, also taken
473 * from msb->regs.extra
474 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
475 * device refuses to take the command or something else
477 static int h_msb_write_block(struct memstick_dev *card,
478 struct memstick_request **out_mrq)
480 struct msb_data *msb = memstick_get_drvdata(card);
481 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
482 struct scatterlist sg[2];
486 return msb_exit_state_machine(msb, mrq->error);
489 switch (msb->state) {
491 /* HACK: Jmicon handling of TPCs between 8 and
492 * sizeof(memstick_request.data) is broken due to hardware
493 * bug in PIO mode that is used for these TPCs
494 * Therefore split the write
497 case MSB_WB_SEND_WRITE_PARAMS:
498 if (!msb_write_regs(msb,
499 offsetof(struct ms_register, param),
500 sizeof(struct ms_param_register),
504 msb->state = MSB_WB_SEND_WRITE_OOB;
507 case MSB_WB_SEND_WRITE_OOB:
508 if (!msb_write_regs(msb,
509 offsetof(struct ms_register, extra_data),
510 sizeof(struct ms_extra_data_register),
511 &msb->regs.extra_data))
513 msb->state = MSB_WB_SEND_WRITE_COMMAND;
517 case MSB_WB_SEND_WRITE_COMMAND:
518 command = MS_CMD_BLOCK_WRITE;
519 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
520 msb->state = MSB_WB_SEND_INT_REQ;
523 case MSB_WB_SEND_INT_REQ:
524 msb->state = MSB_WB_RECEIVE_INT_REQ;
525 if (msb_read_int_reg(msb, -1))
529 case MSB_WB_RECEIVE_INT_REQ:
530 intreg = mrq->data[0];
531 msb->regs.status.interrupt = intreg;
533 /* errors mean out of here, and fast... */
534 if (intreg & (MEMSTICK_INT_CMDNAK))
535 return msb_exit_state_machine(msb, -EIO);
537 if (intreg & MEMSTICK_INT_ERR)
538 return msb_exit_state_machine(msb, -EBADMSG);
541 /* for last page we need to poll CED */
542 if (msb->current_page == msb->pages_in_block) {
543 if (intreg & MEMSTICK_INT_CED)
544 return msb_exit_state_machine(msb, 0);
545 msb->state = MSB_WB_SEND_INT_REQ;
550 /* for non-last page we need BREQ before writing next chunk */
551 if (!(intreg & MEMSTICK_INT_BREQ)) {
552 msb->state = MSB_WB_SEND_INT_REQ;
556 msb->int_polling = false;
557 msb->state = MSB_WB_SEND_WRITE_DATA;
560 case MSB_WB_SEND_WRITE_DATA:
561 sg_init_table(sg, ARRAY_SIZE(sg));
563 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
564 msb->current_sg_offset,
565 msb->page_size) < msb->page_size)
566 return msb_exit_state_machine(msb, -EIO);
568 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
569 mrq->need_card_int = 1;
570 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
573 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
575 msb->current_sg_offset += msb->page_size;
576 msb->state = MSB_WB_SEND_INT_REQ;
586 * This function is used to send simple IO requests to device that consist
587 * of register write + command
589 static int h_msb_send_command(struct memstick_dev *card,
590 struct memstick_request **out_mrq)
592 struct msb_data *msb = memstick_get_drvdata(card);
593 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
597 dbg("send_command: unknown error");
598 return msb_exit_state_machine(msb, mrq->error);
601 switch (msb->state) {
603 /* HACK: see h_msb_write_block */
604 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
605 if (!msb_write_regs(msb,
606 offsetof(struct ms_register, param),
607 sizeof(struct ms_param_register),
610 msb->state = MSB_SC_SEND_WRITE_OOB;
613 case MSB_SC_SEND_WRITE_OOB:
614 if (!msb->command_need_oob) {
615 msb->state = MSB_SC_SEND_COMMAND;
619 if (!msb_write_regs(msb,
620 offsetof(struct ms_register, extra_data),
621 sizeof(struct ms_extra_data_register),
622 &msb->regs.extra_data))
625 msb->state = MSB_SC_SEND_COMMAND;
628 case MSB_SC_SEND_COMMAND:
629 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
630 msb->state = MSB_SC_SEND_INT_REQ;
633 case MSB_SC_SEND_INT_REQ:
634 msb->state = MSB_SC_RECEIVE_INT_REQ;
635 if (msb_read_int_reg(msb, -1))
639 case MSB_SC_RECEIVE_INT_REQ:
640 intreg = mrq->data[0];
642 if (intreg & MEMSTICK_INT_CMDNAK)
643 return msb_exit_state_machine(msb, -EIO);
644 if (intreg & MEMSTICK_INT_ERR)
645 return msb_exit_state_machine(msb, -EBADMSG);
647 if (!(intreg & MEMSTICK_INT_CED)) {
648 msb->state = MSB_SC_SEND_INT_REQ;
652 return msb_exit_state_machine(msb, 0);
658 /* Small handler for card reset */
659 static int h_msb_reset(struct memstick_dev *card,
660 struct memstick_request **out_mrq)
662 u8 command = MS_CMD_RESET;
663 struct msb_data *msb = memstick_get_drvdata(card);
664 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
667 return msb_exit_state_machine(msb, mrq->error);
669 switch (msb->state) {
671 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
672 mrq->need_card_int = 0;
673 msb->state = MSB_RS_CONFIRM;
676 return msb_exit_state_machine(msb, 0);
681 /* This handler is used to do serial->parallel switch */
682 static int h_msb_parallel_switch(struct memstick_dev *card,
683 struct memstick_request **out_mrq)
685 struct msb_data *msb = memstick_get_drvdata(card);
686 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
687 struct memstick_host *host = card->host;
690 dbg("parallel_switch: error");
691 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
692 return msb_exit_state_machine(msb, mrq->error);
695 switch (msb->state) {
696 case MSB_PS_SEND_SWITCH_COMMAND:
697 /* Set the parallel interface on memstick side */
698 msb->regs.param.system |= MEMSTICK_SYS_PAM;
700 if (!msb_write_regs(msb,
701 offsetof(struct ms_register, param),
703 (unsigned char *)&msb->regs.param))
706 msb->state = MSB_PS_SWICH_HOST;
709 case MSB_PS_SWICH_HOST:
710 /* Set parallel interface on our side + send a dummy request
711 to see if card responds */
712 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
713 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
714 msb->state = MSB_PS_CONFIRM;
718 return msb_exit_state_machine(msb, 0);
724 static int msb_switch_to_parallel(struct msb_data *msb);
726 /* Reset the card, to guard against hw errors beeing treated as bad blocks */
727 static int msb_reset(struct msb_data *msb, bool full)
730 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
731 struct memstick_dev *card = msb->card;
732 struct memstick_host *host = card->host;
736 msb->regs.param.system = MEMSTICK_SYS_BAMD;
739 error = host->set_param(host,
740 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
744 msb_invalidate_reg_window(msb);
746 error = host->set_param(host,
747 MEMSTICK_POWER, MEMSTICK_POWER_ON);
751 error = host->set_param(host,
752 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
755 dbg("Failed to reset the host controller");
756 msb->read_only = true;
761 error = msb_run_state_machine(msb, h_msb_reset);
763 dbg("Failed to reset the card");
764 msb->read_only = true;
768 /* Set parallel mode */
770 msb_switch_to_parallel(msb);
774 /* Attempts to switch interface to parallel mode */
775 static int msb_switch_to_parallel(struct msb_data *msb)
779 error = msb_run_state_machine(msb, h_msb_parallel_switch);
781 pr_err("Switch to parallel failed");
782 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
783 msb_reset(msb, true);
787 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
791 /* Changes overwrite flag on a page */
792 static int msb_set_overwrite_flag(struct msb_data *msb,
793 u16 pba, u8 page, u8 flag)
798 msb->regs.param.block_address = cpu_to_be16(pba);
799 msb->regs.param.page_address = page;
800 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
801 msb->regs.extra_data.overwrite_flag = flag;
802 msb->command_value = MS_CMD_BLOCK_WRITE;
803 msb->command_need_oob = true;
805 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
807 return msb_run_state_machine(msb, h_msb_send_command);
810 static int msb_mark_bad(struct msb_data *msb, int pba)
812 pr_notice("marking pba %d as bad", pba);
813 msb_reset(msb, true);
814 return msb_set_overwrite_flag(
815 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
818 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
820 dbg("marking page %d of pba %d as bad", page, pba);
821 msb_reset(msb, true);
822 return msb_set_overwrite_flag(msb,
823 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
826 /* Erases one physical block */
827 static int msb_erase_block(struct msb_data *msb, u16 pba)
833 dbg_verbose("erasing pba %d", pba);
835 for (try = 1 ; try < 3 ; try++) {
836 msb->regs.param.block_address = cpu_to_be16(pba);
837 msb->regs.param.page_address = 0;
838 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
839 msb->command_value = MS_CMD_BLOCK_ERASE;
840 msb->command_need_oob = false;
843 error = msb_run_state_machine(msb, h_msb_send_command);
844 if (!error || msb_reset(msb, true))
849 pr_err("erase failed, marking pba %d as bad", pba);
850 msb_mark_bad(msb, pba);
853 dbg_verbose("erase success, marking pba %d as unused", pba);
854 msb_mark_block_unused(msb, pba);
855 __set_bit(pba, msb->erased_blocks_bitmap);
859 /* Reads one page from device */
860 static int msb_read_page(struct msb_data *msb,
861 u16 pba, u8 page, struct ms_extra_data_register *extra,
862 struct scatterlist *sg, int offset)
866 if (pba == MS_BLOCK_INVALID) {
868 struct sg_mapping_iter miter;
869 size_t len = msb->page_size;
871 dbg_verbose("read unmapped sector. returning 0xFF");
873 local_irq_save(flags);
874 sg_miter_start(&miter, sg, sg_nents(sg),
875 SG_MITER_ATOMIC | SG_MITER_TO_SG);
877 while (sg_miter_next(&miter) && len > 0) {
881 if (offset && offset >= miter.length) {
882 offset -= miter.length;
886 chunklen = min(miter.length - offset, len);
887 memset(miter.addr + offset, 0xFF, chunklen);
892 sg_miter_stop(&miter);
893 local_irq_restore(flags);
899 memset(extra, 0xFF, sizeof(*extra));
903 if (pba >= msb->block_count) {
904 pr_err("BUG: attempt to read beyond"
905 " the end of the card at pba %d", pba);
909 for (try = 1 ; try < 3 ; try++) {
910 msb->regs.param.block_address = cpu_to_be16(pba);
911 msb->regs.param.page_address = page;
912 msb->regs.param.cp = MEMSTICK_CP_PAGE;
914 msb->current_sg = sg;
915 msb->current_sg_offset = offset;
916 error = msb_run_state_machine(msb, h_msb_read_page);
919 if (error == -EUCLEAN) {
920 pr_notice("correctable error on pba %d, page %d",
926 *extra = msb->regs.extra_data;
928 if (!error || msb_reset(msb, true))
934 if (error == -EBADMSG) {
935 pr_err("uncorrectable error on read of pba %d, page %d",
938 if (msb->regs.extra_data.overwrite_flag &
939 MEMSTICK_OVERWRITE_PGST0)
940 msb_mark_page_bad(msb, pba, page);
945 pr_err("read of pba %d, page %d failed with error %d",
950 /* Reads oob of page only */
951 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
952 struct ms_extra_data_register *extra)
957 msb->regs.param.block_address = cpu_to_be16(pba);
958 msb->regs.param.page_address = page;
959 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
961 if (pba > msb->block_count) {
962 pr_err("BUG: attempt to read beyond"
963 " the end of card at pba %d", pba);
967 error = msb_run_state_machine(msb, h_msb_read_page);
968 *extra = msb->regs.extra_data;
970 if (error == -EUCLEAN) {
971 pr_notice("correctable error on pba %d, page %d",
980 /* Reads a block and compares it with data contained in scatterlist orig_sg */
981 static int msb_verify_block(struct msb_data *msb, u16 pba,
982 struct scatterlist *orig_sg, int offset)
984 struct scatterlist sg;
987 sg_init_one(&sg, msb->block_buffer, msb->block_size);
989 while (page < msb->pages_in_block) {
991 error = msb_read_page(msb, pba, page,
992 NULL, &sg, page * msb->page_size);
998 if (msb_sg_compare_to_buffer(orig_sg, offset,
999 msb->block_buffer, msb->block_size))
1004 /* Writes exectly one block + oob */
1005 static int msb_write_block(struct msb_data *msb,
1006 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1008 int error, current_try = 1;
1009 BUG_ON(sg->length < msb->page_size);
1014 if (pba == MS_BLOCK_INVALID) {
1016 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1020 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1022 "BUG: write: attempt to write beyond the end of device");
1026 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1027 pr_err("BUG: write: lba zone mismatch");
1031 if (pba == msb->boot_block_locations[0] ||
1032 pba == msb->boot_block_locations[1]) {
1033 pr_err("BUG: write: attempt to write to boot blocks!");
1042 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1043 msb->regs.param.page_address = 0;
1044 msb->regs.param.block_address = cpu_to_be16(pba);
1046 msb->regs.extra_data.management_flag = 0xFF;
1047 msb->regs.extra_data.overwrite_flag = 0xF8;
1048 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1050 msb->current_sg = sg;
1051 msb->current_sg_offset = offset;
1052 msb->current_page = 0;
1054 error = msb_run_state_machine(msb, h_msb_write_block);
1056 /* Sector we just wrote to is assumed erased since its pba
1057 was erased. If it wasn't erased, write will succeed
1058 and will just clear the bits that were set in the block
1059 thus test that what we have written,
1060 matches what we expect.
1061 We do trust the blocks that we erased */
1062 if (!error && (verify_writes ||
1063 !test_bit(pba, msb->erased_blocks_bitmap)))
1064 error = msb_verify_block(msb, pba, sg, offset);
1069 if (current_try > 1 || msb_reset(msb, true))
1072 pr_err("write failed, trying to erase the pba %d", pba);
1073 error = msb_erase_block(msb, pba);
1082 /* Finds a free block for write replacement */
1083 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1086 int pba = zone * MS_BLOCKS_IN_ZONE;
1089 get_random_bytes(&pos, sizeof(pos));
1091 if (!msb->free_block_count[zone]) {
1092 pr_err("NO free blocks in the zone %d, to use for a write, "
1093 "(media is WORN out) switching to RO mode", zone);
1094 msb->read_only = true;
1095 return MS_BLOCK_INVALID;
1098 pos %= msb->free_block_count[zone];
1100 dbg_verbose("have %d choices for a free block, selected randomally: %d",
1101 msb->free_block_count[zone], pos);
1103 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1104 msb->block_count, pba);
1105 for (i = 0 ; i < pos ; ++i)
1106 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1107 msb->block_count, pba + 1);
1109 dbg_verbose("result of the free blocks scan: pba %d", pba);
1111 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1112 pr_err("BUG: cant get a free block");
1113 msb->read_only = true;
1114 return MS_BLOCK_INVALID;
1117 msb_mark_block_used(msb, pba);
1121 static int msb_update_block(struct msb_data *msb, u16 lba,
1122 struct scatterlist *sg, int offset)
1127 pba = msb->lba_to_pba_table[lba];
1128 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1130 if (pba != MS_BLOCK_INVALID) {
1131 dbg_verbose("setting the update flag on the block");
1132 msb_set_overwrite_flag(msb, pba, 0,
1133 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1136 for (try = 0 ; try < 3 ; try++) {
1137 new_pba = msb_get_free_block(msb,
1138 msb_get_zone_from_lba(lba));
1140 if (new_pba == MS_BLOCK_INVALID) {
1145 dbg_verbose("block update: writing updated block to the pba %d",
1147 error = msb_write_block(msb, new_pba, lba, sg, offset);
1148 if (error == -EBADMSG) {
1149 msb_mark_bad(msb, new_pba);
1156 dbg_verbose("block update: erasing the old block");
1157 msb_erase_block(msb, pba);
1158 msb->lba_to_pba_table[lba] = new_pba;
1163 pr_err("block update error after %d tries, "
1164 "switching to r/o mode", try);
1165 msb->read_only = true;
1170 /* Converts endiannes in the boot block for easy use */
1171 static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1173 p->header.block_id = be16_to_cpu(p->header.block_id);
1174 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1175 p->entry.disabled_block.start_addr
1176 = be32_to_cpu(p->entry.disabled_block.start_addr);
1177 p->entry.disabled_block.data_size
1178 = be32_to_cpu(p->entry.disabled_block.data_size);
1179 p->entry.cis_idi.start_addr
1180 = be32_to_cpu(p->entry.cis_idi.start_addr);
1181 p->entry.cis_idi.data_size
1182 = be32_to_cpu(p->entry.cis_idi.data_size);
1183 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1184 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1185 p->attr.number_of_effective_blocks
1186 = be16_to_cpu(p->attr.number_of_effective_blocks);
1187 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1188 p->attr.memory_manufacturer_code
1189 = be16_to_cpu(p->attr.memory_manufacturer_code);
1190 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1191 p->attr.implemented_capacity
1192 = be16_to_cpu(p->attr.implemented_capacity);
1193 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1194 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1197 static int msb_read_boot_blocks(struct msb_data *msb)
1200 struct scatterlist sg;
1201 struct ms_extra_data_register extra;
1202 struct ms_boot_page *page;
1204 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1205 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1206 msb->boot_block_count = 0;
1208 dbg_verbose("Start of a scan for the boot blocks");
1210 if (!msb->boot_page) {
1211 page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
1215 msb->boot_page = page;
1217 page = msb->boot_page;
1219 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1221 for (pba = 0 ; pba < MS_BLOCK_MAX_BOOT_ADDR ; pba++) {
1223 sg_init_one(&sg, page, sizeof(*page));
1224 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1225 dbg("boot scan: can't read pba %d", pba);
1229 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1230 dbg("managment flag doesn't indicate boot block %d",
1235 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1236 dbg("the pba at %d doesn' contain boot block ID", pba);
1240 msb_fix_boot_page_endianness(page);
1241 msb->boot_block_locations[msb->boot_block_count] = pba;
1244 msb->boot_block_count++;
1246 if (msb->boot_block_count == 2)
1250 if (!msb->boot_block_count) {
1251 pr_err("media doesn't contain master page, aborting");
1255 dbg_verbose("End of scan for boot blocks");
1259 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1261 struct ms_boot_page *boot_block;
1262 struct scatterlist sg;
1266 int data_size, data_offset, page, page_offset, size_to_read;
1269 BUG_ON(block_nr > 1);
1270 boot_block = &msb->boot_page[block_nr];
1271 pba = msb->boot_block_locations[block_nr];
1273 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1276 data_size = boot_block->entry.disabled_block.data_size;
1277 data_offset = sizeof(struct ms_boot_page) +
1278 boot_block->entry.disabled_block.start_addr;
1282 page = data_offset / msb->page_size;
1283 page_offset = data_offset % msb->page_size;
1285 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1288 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1289 pba, data_offset, data_size);
1291 buffer = kzalloc(size_to_read, GFP_KERNEL);
1295 /* Read the buffer */
1296 sg_init_one(&sg, buffer, size_to_read);
1298 while (offset < size_to_read) {
1299 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1304 offset += msb->page_size;
1306 if (page == msb->pages_in_block) {
1308 "bad block table extends beyond the boot block");
1313 /* Process the bad block table */
1314 for (i = page_offset ; i < data_size / sizeof(u16) ; i++) {
1316 u16 bad_block = be16_to_cpu(buffer[i]);
1318 if (bad_block >= msb->block_count) {
1319 dbg("bad block table contains invalid block %d",
1324 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1325 dbg("duplicate bad block %d in the table",
1330 dbg("block %d is marked as factory bad", bad_block);
1331 msb_mark_block_used(msb, bad_block);
1338 static int msb_ftl_initialize(struct msb_data *msb)
1342 if (msb->ftl_initialized)
1345 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1346 msb->logical_block_count = msb->zone_count * 496 - 2;
1348 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1349 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1350 msb->lba_to_pba_table =
1351 kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1353 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1354 !msb->erased_blocks_bitmap) {
1355 kfree(msb->used_blocks_bitmap);
1356 kfree(msb->lba_to_pba_table);
1357 kfree(msb->erased_blocks_bitmap);
1361 for (i = 0 ; i < msb->zone_count ; i++)
1362 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1364 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1365 msb->logical_block_count * sizeof(u16));
1367 dbg("initial FTL tables created. Zone count = %d, "
1368 "Logical block count = %d",
1369 msb->zone_count, msb->logical_block_count);
1371 msb->ftl_initialized = true;
1375 static int msb_ftl_scan(struct msb_data *msb)
1377 u16 pba, lba, other_block;
1378 u8 overwrite_flag, managment_flag, other_overwrite_flag;
1380 struct ms_extra_data_register extra;
1381 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1383 if (!overwrite_flags)
1386 dbg("Start of media scanning");
1387 for (pba = 0 ; pba < msb->block_count ; pba++) {
1389 if (pba == msb->boot_block_locations[0] ||
1390 pba == msb->boot_block_locations[1]) {
1391 dbg_verbose("pba %05d -> [boot block]", pba);
1392 msb_mark_block_used(msb, pba);
1396 if (test_bit(pba, msb->used_blocks_bitmap)) {
1397 dbg_verbose("pba %05d -> [factory bad]", pba);
1401 memset(&extra, 0, sizeof(extra));
1402 error = msb_read_oob(msb, pba, 0, &extra);
1404 /* can't trust the page if we can't read the oob */
1405 if (error == -EBADMSG) {
1407 "oob of pba %d damaged, will try to erase it", pba);
1408 msb_mark_block_used(msb, pba);
1409 msb_erase_block(msb, pba);
1414 lba = be16_to_cpu(extra.logical_address);
1415 managment_flag = extra.management_flag;
1416 overwrite_flag = extra.overwrite_flag;
1417 overwrite_flags[pba] = overwrite_flag;
1419 /* Skip bad blocks */
1420 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1421 dbg("pba %05d -> [BAD]", pba);
1422 msb_mark_block_used(msb, pba);
1426 /* Skip system/drm blocks */
1427 if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
1428 MEMSTICK_MANAGMENT_FLAG_NORMAL) {
1429 dbg("pba %05d -> [reserved managment flag %02x]",
1430 pba, managment_flag);
1431 msb_mark_block_used(msb, pba);
1435 /* Erase temporary tables */
1436 if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1437 dbg("pba %05d -> [temp table] - will erase", pba);
1439 msb_mark_block_used(msb, pba);
1440 msb_erase_block(msb, pba);
1444 if (lba == MS_BLOCK_INVALID) {
1445 dbg_verbose("pba %05d -> [free]", pba);
1449 msb_mark_block_used(msb, pba);
1451 /* Block has LBA not according to zoning*/
1452 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1453 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455 msb_erase_block(msb, pba);
1459 /* No collisions - great */
1460 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1461 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1462 msb->lba_to_pba_table[lba] = pba;
1466 other_block = msb->lba_to_pba_table[lba];
1467 other_overwrite_flag = overwrite_flags[other_block];
1469 pr_notice("Collision between pba %d and pba %d",
1472 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1473 pr_notice("pba %d is marked as stable, use it", pba);
1474 msb_erase_block(msb, other_block);
1475 msb->lba_to_pba_table[lba] = pba;
1479 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1480 pr_notice("pba %d is marked as stable, use it",
1482 msb_erase_block(msb, pba);
1486 pr_notice("collision between blocks %d and %d,"
1487 " without stable flag set on both, erasing pba %d",
1488 pba, other_block, other_block);
1490 msb_erase_block(msb, other_block);
1491 msb->lba_to_pba_table[lba] = pba;
1494 dbg("End of media scanning");
1495 kfree(overwrite_flags);
1499 static void msb_cache_flush_timer(unsigned long data)
1501 struct msb_data *msb = (struct msb_data *)data;
1502 msb->need_flush_cache = true;
1503 queue_work(msb->io_queue, &msb->io_work);
1507 static void msb_cache_discard(struct msb_data *msb)
1509 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1512 del_timer_sync(&msb->cache_flush_timer);
1514 dbg_verbose("Discarding the write cache");
1515 msb->cache_block_lba = MS_BLOCK_INVALID;
1516 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1519 static int msb_cache_init(struct msb_data *msb)
1521 setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
1522 (unsigned long)msb);
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1529 msb_cache_discard(msb);
1533 static int msb_cache_flush(struct msb_data *msb)
1535 struct scatterlist sg;
1536 struct ms_extra_data_register extra;
1537 int page, offset, error;
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1546 lba = msb->cache_block_lba;
1547 pba = msb->lba_to_pba_table[lba];
1549 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550 pba, msb->cache_block_lba);
1552 sg_init_one(&sg, msb->cache , msb->block_size);
1554 /* Read all missing pages in cache */
1555 for (page = 0 ; page < msb->pages_in_block ; page++) {
1557 if (test_bit(page, &msb->valid_cache_bitmap))
1560 offset = page * msb->page_size;
1562 dbg_verbose("reading non-present sector %d of cache block %d",
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1566 /* Bad pages are copied with 00 page status */
1567 if (error == -EBADMSG) {
1568 pr_err("read error on sector %d, contents probably"
1576 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1577 MEMSTICK_OV_PG_NORMAL) {
1578 dbg("page %d is marked as bad", page);
1582 set_bit(page, &msb->valid_cache_bitmap);
1585 /* Write the cache now */
1586 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1587 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1589 /* Mark invalid pages */
1591 for (page = 0 ; page < msb->pages_in_block ; page++) {
1593 if (test_bit(page, &msb->valid_cache_bitmap))
1596 dbg("marking page %d as containing damaged data",
1598 msb_set_overwrite_flag(msb,
1599 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1603 msb_cache_discard(msb);
1607 static int msb_cache_write(struct msb_data *msb, int lba,
1608 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1611 struct scatterlist sg_tmp[10];
1616 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1617 lba != msb->cache_block_lba)
1618 if (add_to_cache_only)
1621 /* If we need to write different block */
1622 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1623 lba != msb->cache_block_lba) {
1624 dbg_verbose("first flush the cache");
1625 error = msb_cache_flush(msb);
1630 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1631 msb->cache_block_lba = lba;
1632 mod_timer(&msb->cache_flush_timer,
1633 jiffies + msecs_to_jiffies(cache_flush_timeout));
1636 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1638 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1639 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1641 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1642 msb->cache + page * msb->page_size, msb->page_size);
1644 set_bit(page, &msb->valid_cache_bitmap);
1648 static int msb_cache_read(struct msb_data *msb, int lba,
1649 int page, struct scatterlist *sg, int offset)
1651 int pba = msb->lba_to_pba_table[lba];
1652 struct scatterlist sg_tmp[10];
1655 if (lba == msb->cache_block_lba &&
1656 test_bit(page, &msb->valid_cache_bitmap)) {
1658 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1661 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1662 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1663 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664 msb->cache + msb->page_size * page,
1667 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1674 msb_cache_write(msb, lba, page, true, sg, offset);
1679 /* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1683 * 64 MB entry is what windows reports for my 64M memstick */
1685 static const struct chs_entry chs_table[] = {
1686 /* size sectors cylynders heads */
1692 {128, 16, 991, 16 },
1696 /* Load information about the card */
1697 static int msb_init_card(struct memstick_dev *card)
1699 struct msb_data *msb = memstick_get_drvdata(card);
1700 struct memstick_host *host = card->host;
1701 struct ms_boot_page *boot_block;
1702 int error = 0, i, raw_size_in_megs;
1706 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1707 card->id.class <= MEMSTICK_CLASS_ROM)
1708 msb->read_only = true;
1711 error = msb_reset(msb, false);
1715 /* Due to a bug in Jmicron driver written by Alex Dubov,
1716 its serial mode barely works,
1717 so we switch to parallel mode right away */
1718 if (host->caps & MEMSTICK_CAP_PAR4)
1719 msb_switch_to_parallel(msb);
1721 msb->page_size = sizeof(struct ms_boot_page);
1723 /* Read the boot page */
1724 error = msb_read_boot_blocks(msb);
1728 boot_block = &msb->boot_page[0];
1730 /* Save intersting attributes from boot page */
1731 msb->block_count = boot_block->attr.number_of_blocks;
1732 msb->page_size = boot_block->attr.page_size;
1734 msb->pages_in_block = boot_block->attr.block_size * 2;
1735 msb->block_size = msb->page_size * msb->pages_in_block;
1737 if (msb->page_size > PAGE_SIZE) {
1738 /* this isn't supported by linux at all, anyway*/
1739 dbg("device page %d size isn't supported", msb->page_size);
1743 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1744 if (!msb->block_buffer)
1747 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1749 for (i = 0 ; chs_table[i].size ; i++) {
1751 if (chs_table[i].size != raw_size_in_megs)
1754 msb->geometry.cylinders = chs_table[i].cyl;
1755 msb->geometry.heads = chs_table[i].head;
1756 msb->geometry.sectors = chs_table[i].sec;
1760 if (boot_block->attr.transfer_supporting == 1)
1761 msb->caps |= MEMSTICK_CAP_PAR4;
1763 if (boot_block->attr.device_type & 0x03)
1764 msb->read_only = true;
1766 dbg("Total block count = %d", msb->block_count);
1767 dbg("Each block consists of %d pages", msb->pages_in_block);
1768 dbg("Page size = %d bytes", msb->page_size);
1769 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1770 dbg("Read only: %d", msb->read_only);
1773 /* Now we can switch the interface */
1774 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1775 msb_switch_to_parallel(msb);
1778 error = msb_cache_init(msb);
1782 error = msb_ftl_initialize(msb);
1787 /* Read the bad block table */
1788 error = msb_read_bad_block_table(msb, 0);
1790 if (error && error != -ENOMEM) {
1791 dbg("failed to read bad block table from primary boot block,"
1792 " trying from backup");
1793 error = msb_read_bad_block_table(msb, 1);
1799 /* *drum roll* Scan the media */
1800 error = msb_ftl_scan(msb);
1802 pr_err("Scan of media failed");
1810 static int msb_do_write_request(struct msb_data *msb, int lba,
1811 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1815 *sucessfuly_written = 0;
1817 while (offset < len) {
1818 if (page == 0 && len - offset >= msb->block_size) {
1820 if (msb->cache_block_lba == lba)
1821 msb_cache_discard(msb);
1823 dbg_verbose("Writing whole lba %d", lba);
1824 error = msb_update_block(msb, lba, sg, offset);
1828 offset += msb->block_size;
1829 *sucessfuly_written += msb->block_size;
1834 error = msb_cache_write(msb, lba, page, false, sg, offset);
1838 offset += msb->page_size;
1839 *sucessfuly_written += msb->page_size;
1842 if (page == msb->pages_in_block) {
1850 static int msb_do_read_request(struct msb_data *msb, int lba,
1851 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1855 *sucessfuly_read = 0;
1857 while (offset < len) {
1859 error = msb_cache_read(msb, lba, page, sg, offset);
1863 offset += msb->page_size;
1864 *sucessfuly_read += msb->page_size;
1867 if (page == msb->pages_in_block) {
1875 static void msb_io_work(struct work_struct *work)
1877 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1878 int page, error, len;
1880 unsigned long flags;
1881 struct scatterlist *sg = msb->prealloc_sg;
1883 dbg_verbose("IO: work started");
1886 spin_lock_irqsave(&msb->q_lock, flags);
1888 if (msb->need_flush_cache) {
1889 msb->need_flush_cache = false;
1890 spin_unlock_irqrestore(&msb->q_lock, flags);
1891 msb_cache_flush(msb);
1896 msb->req = blk_fetch_request(msb->queue);
1898 dbg_verbose("IO: no more requests exiting");
1899 spin_unlock_irqrestore(&msb->q_lock, flags);
1904 spin_unlock_irqrestore(&msb->q_lock, flags);
1906 /* If card was removed meanwhile */
1910 /* process the request */
1911 dbg_verbose("IO: processing new request");
1912 blk_rq_map_sg(msb->queue, msb->req, sg);
1914 lba = blk_rq_pos(msb->req);
1916 sector_div(lba, msb->page_size / 512);
1917 page = do_div(lba, msb->pages_in_block);
1919 if (rq_data_dir(msb->req) == READ)
1920 error = msb_do_read_request(msb, lba, page, sg,
1921 blk_rq_bytes(msb->req), &len);
1923 error = msb_do_write_request(msb, lba, page, sg,
1924 blk_rq_bytes(msb->req), &len);
1926 spin_lock_irqsave(&msb->q_lock, flags);
1929 if (!__blk_end_request(msb->req, 0, len))
1932 if (error && msb->req) {
1933 dbg_verbose("IO: ending one sector "
1934 "of the request with error");
1935 if (!__blk_end_request(msb->req, error, msb->page_size))
1940 dbg_verbose("IO: request still pending");
1942 spin_unlock_irqrestore(&msb->q_lock, flags);
1946 static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1947 static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1949 static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1951 struct gendisk *disk = bdev->bd_disk;
1952 struct msb_data *msb = disk->private_data;
1954 dbg_verbose("block device open");
1956 mutex_lock(&msb_disk_lock);
1958 if (msb && msb->card)
1961 mutex_unlock(&msb_disk_lock);
1965 static void msb_data_clear(struct msb_data *msb)
1967 kfree(msb->boot_page);
1968 kfree(msb->used_blocks_bitmap);
1969 kfree(msb->lba_to_pba_table);
1974 static int msb_disk_release(struct gendisk *disk)
1976 struct msb_data *msb = disk->private_data;
1978 dbg_verbose("block device release");
1979 mutex_lock(&msb_disk_lock);
1982 if (msb->usage_count)
1985 if (!msb->usage_count) {
1987 disk->private_data = NULL;
1988 idr_remove(&msb_disk_idr, msb->disk_id);
1992 mutex_unlock(&msb_disk_lock);
1996 static int msb_bd_release(struct gendisk *disk, fmode_t mode)
1998 return msb_disk_release(disk);
2001 static int msb_bd_getgeo(struct block_device *bdev,
2002 struct hd_geometry *geo)
2004 struct msb_data *msb = bdev->bd_disk->private_data;
2005 *geo = msb->geometry;
2009 static int msb_prepare_req(struct request_queue *q, struct request *req)
2011 if (req->cmd_type != REQ_TYPE_FS &&
2012 req->cmd_type != REQ_TYPE_BLOCK_PC) {
2013 blk_dump_rq_flags(req, "MS unsupported request");
2014 return BLKPREP_KILL;
2016 req->cmd_flags |= REQ_DONTPREP;
2020 static void msb_submit_req(struct request_queue *q)
2022 struct memstick_dev *card = q->queuedata;
2023 struct msb_data *msb = memstick_get_drvdata(card);
2024 struct request *req = NULL;
2026 dbg_verbose("Submit request");
2028 if (msb->card_dead) {
2029 dbg("Refusing requests on removed card");
2031 WARN_ON(!msb->io_queue_stopped);
2033 while ((req = blk_fetch_request(q)) != NULL)
2034 __blk_end_request_all(req, -ENODEV);
2041 if (!msb->io_queue_stopped)
2042 queue_work(msb->io_queue, &msb->io_work);
2045 static int msb_check_card(struct memstick_dev *card)
2047 struct msb_data *msb = memstick_get_drvdata(card);
2048 return (msb->card_dead == 0);
2051 static void msb_stop(struct memstick_dev *card)
2053 struct msb_data *msb = memstick_get_drvdata(card);
2054 unsigned long flags;
2056 dbg("Stopping all msblock IO");
2058 spin_lock_irqsave(&msb->q_lock, flags);
2059 blk_stop_queue(msb->queue);
2060 msb->io_queue_stopped = true;
2061 spin_unlock_irqrestore(&msb->q_lock, flags);
2063 del_timer_sync(&msb->cache_flush_timer);
2064 flush_workqueue(msb->io_queue);
2067 spin_lock_irqsave(&msb->q_lock, flags);
2068 blk_requeue_request(msb->queue, msb->req);
2070 spin_unlock_irqrestore(&msb->q_lock, flags);
2075 static void msb_start(struct memstick_dev *card)
2077 struct msb_data *msb = memstick_get_drvdata(card);
2078 unsigned long flags;
2080 dbg("Resuming IO from msblock");
2082 msb_invalidate_reg_window(msb);
2084 spin_lock_irqsave(&msb->q_lock, flags);
2085 if (!msb->io_queue_stopped || msb->card_dead) {
2086 spin_unlock_irqrestore(&msb->q_lock, flags);
2089 spin_unlock_irqrestore(&msb->q_lock, flags);
2091 /* Kick cache flush anyway, its harmless */
2092 msb->need_flush_cache = true;
2093 msb->io_queue_stopped = false;
2095 spin_lock_irqsave(&msb->q_lock, flags);
2096 blk_start_queue(msb->queue);
2097 spin_unlock_irqrestore(&msb->q_lock, flags);
2099 queue_work(msb->io_queue, &msb->io_work);
2103 static const struct block_device_operations msb_bdops = {
2104 .open = msb_bd_open,
2105 .release = msb_bd_release,
2106 .getgeo = msb_bd_getgeo,
2107 .owner = THIS_MODULE
2110 /* Registers the block device */
2111 static int msb_init_disk(struct memstick_dev *card)
2113 struct msb_data *msb = memstick_get_drvdata(card);
2114 struct memstick_host *host = card->host;
2116 u64 limit = BLK_BOUNCE_HIGH;
2117 unsigned long capacity;
2119 if (host->dev.dma_mask && *(host->dev.dma_mask))
2120 limit = *(host->dev.dma_mask);
2122 mutex_lock(&msb_disk_lock);
2123 if (!idr_pre_get(&msb_disk_idr, GFP_KERNEL)) {
2124 mutex_unlock(&msb_disk_lock);
2127 rc = idr_get_new(&msb_disk_idr, card, &msb->disk_id);
2128 mutex_unlock(&msb_disk_lock);
2133 msb->disk = alloc_disk(0);
2136 goto out_release_id;
2139 msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2145 msb->queue->queuedata = card;
2146 blk_queue_prep_rq(msb->queue, msb_prepare_req);
2148 blk_queue_bounce_limit(msb->queue, limit);
2149 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2150 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2151 blk_queue_max_segment_size(msb->queue,
2152 MS_BLOCK_MAX_PAGES * msb->page_size);
2153 blk_queue_logical_block_size(msb->queue, msb->page_size);
2155 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2156 msb->disk->fops = &msb_bdops;
2157 msb->disk->private_data = msb;
2158 msb->disk->queue = msb->queue;
2159 msb->disk->driverfs_dev = &card->dev;
2160 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2162 capacity = msb->pages_in_block * msb->logical_block_count;
2163 capacity *= (msb->page_size / 512);
2164 set_capacity(msb->disk, capacity);
2165 dbg("Set total disk size to %lu sectors", capacity);
2167 msb->usage_count = 1;
2168 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2169 INIT_WORK(&msb->io_work, msb_io_work);
2170 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2173 set_disk_ro(msb->disk, 1);
2176 add_disk(msb->disk);
2181 put_disk(msb->disk);
2183 mutex_lock(&msb_disk_lock);
2184 idr_remove(&msb_disk_idr, msb->disk_id);
2185 mutex_unlock(&msb_disk_lock);
2189 static int msb_probe(struct memstick_dev *card)
2191 struct msb_data *msb;
2194 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2197 memstick_set_drvdata(card, msb);
2199 spin_lock_init(&msb->q_lock);
2201 rc = msb_init_card(card);
2205 rc = msb_init_disk(card);
2207 card->check = msb_check_card;
2208 card->stop = msb_stop;
2209 card->start = msb_start;
2213 memstick_set_drvdata(card, NULL);
2214 msb_data_clear(msb);
2219 static void msb_remove(struct memstick_dev *card)
2221 struct msb_data *msb = memstick_get_drvdata(card);
2222 unsigned long flags;
2224 if (!msb->io_queue_stopped)
2227 dbg("Removing the disk device");
2229 /* Take care of unhandled + new requests from now on */
2230 spin_lock_irqsave(&msb->q_lock, flags);
2231 msb->card_dead = true;
2232 blk_start_queue(msb->queue);
2233 spin_unlock_irqrestore(&msb->q_lock, flags);
2235 /* Remove the disk */
2236 del_gendisk(msb->disk);
2237 blk_cleanup_queue(msb->queue);
2240 mutex_lock(&msb_disk_lock);
2241 msb_data_clear(msb);
2242 mutex_unlock(&msb_disk_lock);
2244 msb_disk_release(msb->disk);
2245 memstick_set_drvdata(card, NULL);
2250 static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2256 static int msb_resume(struct memstick_dev *card)
2258 struct msb_data *msb = memstick_get_drvdata(card);
2259 struct msb_data *new_msb = NULL;
2260 bool card_dead = true;
2262 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2263 msb->card_dead = true;
2266 mutex_lock(&card->host->lock);
2268 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2272 new_msb->card = card;
2273 memstick_set_drvdata(card, new_msb);
2274 spin_lock_init(&new_msb->q_lock);
2275 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2277 if (msb_init_card(card))
2280 if (msb->block_size != new_msb->block_size)
2283 if (memcmp(msb->boot_page, new_msb->boot_page,
2284 sizeof(struct ms_boot_page)))
2287 if (msb->logical_block_count != new_msb->logical_block_count ||
2288 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2289 msb->logical_block_count))
2292 if (msb->block_count != new_msb->block_count ||
2293 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2294 msb->block_count / 8))
2300 dbg("Card was removed/replaced during suspend");
2302 msb->card_dead = card_dead;
2303 memstick_set_drvdata(card, msb);
2306 msb_data_clear(new_msb);
2311 mutex_unlock(&card->host->lock);
2316 #define msb_suspend NULL
2317 #define msb_resume NULL
2319 #endif /* CONFIG_PM */
2321 static struct memstick_device_id msb_id_tbl[] = {
2322 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2323 MEMSTICK_CLASS_FLASH},
2325 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2326 MEMSTICK_CLASS_ROM},
2328 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2331 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2334 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2335 MEMSTICK_CLASS_DUO},
2338 MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2341 static struct memstick_driver msb_driver = {
2343 .name = DRIVER_NAME,
2344 .owner = THIS_MODULE
2346 .id_table = msb_id_tbl,
2348 .remove = msb_remove,
2349 .suspend = msb_suspend,
2350 .resume = msb_resume
2355 static int __init msb_init(void)
2357 int rc = register_blkdev(0, DRIVER_NAME);
2360 pr_err("failed to register major (error %d)\n", rc);
2365 rc = memstick_register_driver(&msb_driver);
2367 unregister_blkdev(major, DRIVER_NAME);
2368 pr_err("failed to register memstick driver (error %d)\n", rc);
2374 static void __exit msb_exit(void)
2376 memstick_unregister_driver(&msb_driver);
2377 unregister_blkdev(major, DRIVER_NAME);
2378 idr_destroy(&msb_disk_idr);
2381 module_init(msb_init);
2382 module_exit(msb_exit);
2384 module_param(cache_flush_timeout, int, S_IRUGO);
2385 MODULE_PARM_DESC(cache_flush_timeout,
2386 "Cache flush timeout in msec (1000 default)");
2387 module_param(debug, int, S_IRUGO | S_IWUSR);
2388 MODULE_PARM_DESC(debug, "Debug level (0-2)");
2390 module_param(verify_writes, bool, S_IRUGO);
2391 MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2393 MODULE_LICENSE("GPL");
2394 MODULE_AUTHOR("Maxim Levitsky");
2395 MODULE_DESCRIPTION("Sony MemoryStick block device driver");