/*
* submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
*/
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+int vpdma_submit_descs(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, int list_num)
{
- /* we always use the first list */
- int list_num = 0;
int list_size;
if (vpdma_list_busy(vpdma, list_num))
}
EXPORT_SYMBOL(vpdma_submit_descs);
+static void dump_dtd(struct vpdma_dtd *dtd);
+
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx)
+{
+ struct vpdma_dtd *dtd = list->buf.addr;
+ dma_addr_t write_desc_addr;
+ int offset;
+
+ dtd += idx;
+ vpdma_unmap_desc_buf(vpdma, &list->buf);
+
+ dtd->start_addr = dma_addr;
+
+ /* Calculate write address from the offset of write_dtd from start
+ * of the list->buf
+ */
+ offset = (void *)write_dtd - list->buf.addr;
+ write_desc_addr = list->buf.dma_addr + offset;
+
+ if (drop)
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 1, 0);
+ else
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 0, 0);
+
+ vpdma_map_desc_buf(vpdma, &list->buf);
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_update_dma_addr);
+
static void dump_cfd(struct vpdma_cfd *cfd)
{
int class;
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, u32 flags)
+{
+ vpdma_rawchan_add_out_dtd(list, width, c_rect, fmt, dma_addr,
+ chan_info[chan].num, flags);
+}
+EXPORT_SYMBOL(vpdma_add_out_dtd);
+
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int raw_vpdma_chan, u32 flags)
{
int priority = 0;
int field = 0;
int stride;
struct vpdma_dtd *dtd;
- channel = next_chan = chan_info[chan].num;
+ channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
fmt->data_type == DATA_TYPE_C420) {
dump_dtd(dtd);
}
-EXPORT_SYMBOL(vpdma_add_out_dtd);
+EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
/*
* append an inbound data transfer descriptor to the given descriptor list,
EXPORT_SYMBOL(vpdma_add_in_dtd);
/* set or clear the mask for list complete interrupt */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable)
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable)
{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
u32 val;
- val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+ val = read_reg(vpdma, reg_addr);
if (enable)
val |= (1 << (list_num * 2));
else
val &= ~(1 << (list_num * 2));
- write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+ write_reg(vpdma, reg_addr, val);
}
EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
+/* set or clear the mask for list complete interrupt */
+void vpdma_enable_list_notify_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+ u32 val;
+
+ val = read_reg(vpdma, reg_addr);
+ if (enable)
+ val |= (1 << ((list_num * 2) + 1));
+ else
+ val &= ~(1 << ((list_num * 2) + 1));
+ write_reg(vpdma, reg_addr, val);
+}
+EXPORT_SYMBOL(vpdma_enable_list_notify_irq);
+
+/* get the LIST_STAT register */
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_stat);
+
+/* get the LIST_MASK register */
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_mask);
+
/* clear previosuly occured list intterupts in the LIST_STAT register */
-void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num)
{
- write_reg(vpdma, VPDMA_INT_LIST0_STAT,
- read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ write_reg(vpdma, reg_addr,
+ read_reg(vpdma, reg_addr));
}
EXPORT_SYMBOL(vpdma_clear_list_stat);
VPE_CHAN_RGB_OUT,
};
+#define VIP_CHAN_VIP2_OFFSET 70
+#define VIP_CHAN_MULT_PORTB_OFFSET 16
+#define VIP_CHAN_YUV_PORTB_OFFSET 2
+#define VIP_CHAN_RGB_PORTB_OFFSET 1
+
/* flags for VPDMA data descriptors */
#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
void vpdma_reset_desc_list(struct vpdma_desc_list *list);
void vpdma_free_desc_list(struct vpdma_desc_list *list);
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
-
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
+ int list_num);
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx);
/* helpers for creating vpdma descriptors */
void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
struct vpdma_buf *blk, u32 dest_offset);
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, u32 flags);
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int raw_vpdma_chan, u32 flags);
void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
int frame_height, int start_h, int start_v);
/* vpdma list interrupt management */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable);
-void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num);
/* vpdma client configuration */
void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
#define VPDMA_INT_LIST0_STAT 0x88
#define VPDMA_INT_LIST0_MASK 0x8c
+#define VPDMA_INTX_OFFSET 0x50
+
#define VPDMA_PERFMON(i) (0x200 + i * 4)
-/* VPE specific client registers */
+/* VIP/VPE client registers */
#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
#define VPDMA_DEI_LUMA1_CSTAT 0x0304
#define VPDMA_DEI_LUMA2_CSTAT 0x0308
#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
#define VPDMA_DEI_MV_IN_CSTAT 0x0330
#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_LO_Y_CSTAT 0x0388
+#define VPDMA_VIP_LO_UV_CSTAT 0x038c
#define VPDMA_VIP_UP_Y_CSTAT 0x0390
#define VPDMA_VIP_UP_UV_CSTAT 0x0394
#define VPDMA_VPI_CTL_CSTAT 0x03d0
#define DATA_TYPE_MV 0x3
-/* VPDMA channel numbers(only VPE channels for now) */
+/* VPDMA channel numbers, some are common between VIP/VPE and appear twice */
#define VPE_CHAN_NUM_LUMA1_IN 0
#define VPE_CHAN_NUM_CHROMA1_IN 1
#define VPE_CHAN_NUM_LUMA2_IN 2
#define VPE_CHAN_NUM_CHROMA3_IN 5
#define VPE_CHAN_NUM_MV_IN 12
#define VPE_CHAN_NUM_MV_OUT 15
+#define VIP1_CHAN_NUM_MULT_PORT_A_SRC0 38
+#define VIP1_CHAN_NUM_MULT_ANC_A_SRC0 70
#define VPE_CHAN_NUM_LUMA_OUT 102
#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VIP1_CHAN_NUM_PORT_A_LUMA 102
+#define VIP1_CHAN_NUM_PORT_A_CHROMA 103
#define VPE_CHAN_NUM_RGB_OUT 106
-
+#define VIP1_CHAN_NUM_PORT_A_RGB 106
+#define VIP1_CHAN_NUM_PORT_B_RGB 107
/*
* a VPDMA address data block payload for a configuration descriptor needs to
* have each sub block length as a multiple of 16 bytes. Therefore, the overall
write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
VPE_DS1_UV_ERROR_INT);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
}
static void disable_irqs(struct vpe_ctx *ctx)
write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
}
/* device_run() - prepares and starts the device
enable_irqs(ctx);
vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
- vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
}
static void dei_error(struct vpe_ctx *ctx)
if (irqst0) {
if (irqst0 & VPE_INT0_LIST0_COMPLETE)
- vpdma_clear_list_stat(ctx->dev->vpdma);
+ vpdma_clear_list_stat(ctx->dev->vpdma, 0);
irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
}