dump_table(dc);
for (i = PORT_MDM; i < MAX_PORT; i++) {
- kfifo_alloc(&dc->port[i].fifo_ul,
- FIFO_BUFFER_SIZE_UL, GFP_ATOMIC);
memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
}
goto err_free_sbuf;
}
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (kfifo_alloc(&dc->port[i].fifo_ul,
+ FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) {
+ dev_err(&pdev->dev,
+ "Could not allocate kfifo buffer\n");
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+ }
+
spin_lock_init(&dc->spin_mutex);
nozomi_setup_private_data(dc);
NOZOMI_NAME, dc);
if (unlikely(ret)) {
dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
- goto err_free_sbuf;
+ goto err_free_kfifo;
}
DBG1("base_addr: %p", dc->base_addr);
dc->state = NOZOMI_STATE_ENABLED;
for (i = 0; i < MAX_PORT; i++) {
+ struct device *tty_dev;
+
mutex_init(&dc->port[i].tty_sem);
tty_port_init(&dc->port[i].port);
- tty_register_device(ntty_driver, dc->index_start + i,
+ tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
+
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+ dev_err(&pdev->dev, "Could not allocate tty?\n");
+ goto err_free_tty;
+ }
}
+
return 0;
+err_free_tty:
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+err_free_kfifo:
+ for (i = 0; i < MAX_PORT; i++)
+ kfifo_free(&dc->port[i].fifo_ul);
err_free_sbuf:
kfree(dc->send_buf);
iounmap(dc->base_addr);
kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
for (i = 0; i < skip_low + skip_high; i++)
- kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock);
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock) != sizeof(u32))
+ break;
return 0;
}
/* fall through */
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
- kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
- sizeof(int), &meye.doneq_lock);
+ if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
+ sizeof(int), &meye.doneq_lock) != sizeof(int))
+ break;
}
*i = meye.grab_buffer[*i].size;
mutex_unlock(&meye.lock);
while (kfifo_len(&priv->event_fifo)) {
u32 event;
- kfifo_out(&priv->event_fifo, (unsigned char *) &event,
- sizeof(event));
+ if (kfifo_out(&priv->event_fifo,
+ (unsigned char *) &event, sizeof(event)) !=
+ sizeof(event))
+ break;
spin_unlock_irq(&priv->driver_lock);
lbs_process_event(priv, event);
spin_lock_irq(&priv->driver_lock);
}
if (r2t == NULL) {
- kfifo_out(&tcp_task->r2tqueue,
- (void *)&tcp_task->r2t, sizeof(void *));
+ if (kfifo_out(&tcp_task->r2tqueue,
+ (void *)&tcp_task->r2t, sizeof(void *)) !=
+ sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ r2t = NULL;
+ }
+
r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
{
struct iu_entry *iue = NULL;
- kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
- sizeof(void *), &target->iu_queue.lock);
+ if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
+ sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ return NULL;
+ }
if (!iue)
return iue;
iue->target = target;
extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
-extern __must_check unsigned int kfifo_in(struct kfifo *fifo,
+extern unsigned int kfifo_in(struct kfifo *fifo,
const unsigned char *from, unsigned int len);
extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
unsigned char *to, unsigned int len);
* the FIFO depending on the free space, and returns the number of
* bytes copied.
*/
-static inline __must_check unsigned int kfifo_in_locked(struct kfifo *fifo,
+static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
const unsigned char *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;