struct mid_q_entry *);
bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
/* setup request: allocate mid, sign message */
- int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
- struct mid_q_entry **);
+ struct mid_q_entry *(*setup_request)(struct cifs_ses *,
+ struct smb_rqst *);
/* setup async request: allocate mid, sign message */
- int (*setup_async_request)(struct TCP_Server_Info *, struct kvec *,
- unsigned int, struct mid_q_entry **);
+ struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
+ struct smb_rqst *);
/* check response: verify signature, map error */
int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
bool);
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
extern void cifs_delete_mid(struct mid_q_entry *mid);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
-extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
- unsigned int nvec, mid_receive_t *receive,
- mid_callback_t *callback, void *cbdata,
- const int flags);
+extern int cifs_call_async(struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+ mid_receive_t *receive, mid_callback_t *callback,
+ void *cbdata, const int flags);
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
-extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
- struct mid_q_entry **);
-extern int cifs_setup_async_request(struct TCP_Server_Info *, struct kvec *,
- unsigned int, struct mid_q_entry **);
+extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
+ struct smb_rqst *);
+extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
+ struct smb_rqst *);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
ECHO_REQ *smb;
int rc = 0;
struct kvec iov;
+ struct smb_rqst rqst = { .rq_iov = &iov,
+ .rq_nvec = 1 };
cFYI(1, "In echo request");
iov.iov_base = smb;
iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
- rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
+ rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback,
server, CIFS_ASYNC_OP | CIFS_ECHO_OP);
if (rc)
cFYI(1, "Echo request failed: %d", rc);
READ_REQ *smb = NULL;
int wct;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ struct smb_rqst rqst = { .rq_iov = rdata->iov,
+ .rq_nvec = 1 };
cFYI(1, "%s: offset=%llu bytes=%u", __func__,
rdata->offset, rdata->bytes);
rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
kref_get(&rdata->refcount);
- rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
- cifs_readv_receive, cifs_readv_callback,
- rdata, 0);
+ rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
+ cifs_readv_callback, rdata, 0);
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
int wct;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct kvec *iov = NULL;
+ struct smb_rqst rqst = { };
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
wct = 14;
goto async_writev_out;
/* 1 iov per page + 1 for header */
- iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
+ rqst.rq_nvec = wdata->nr_pages + 1;
+ iov = kzalloc((rqst.rq_nvec) * sizeof(*iov), GFP_NOFS);
if (iov == NULL) {
rc = -ENOMEM;
goto async_writev_out;
}
+ rqst.rq_iov = iov;
smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
}
kref_get(&wdata->refcount);
- rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
- NULL, cifs_writev_callback, wdata, 0);
+ rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
+ cifs_writev_callback, wdata, 0);
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
struct smb2_echo_req *req;
int rc = 0;
struct kvec iov;
+ struct smb_rqst rqst = { .rq_iov = &iov,
+ .rq_nvec = 1 };
cFYI(1, "In echo request");
/* 4 for rfc1002 length field */
iov.iov_len = get_rfc1002_length(req) + 4;
- rc = cifs_call_async(server, &iov, 1, NULL, smb2_echo_callback, server,
+ rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
CIFS_ECHO_OP);
if (rc)
cFYI(1, "Echo request failed: %d", rc);
int rc;
struct smb2_hdr *buf;
struct cifs_io_parms io_parms;
+ struct smb_rqst rqst = { .rq_iov = rdata->iov,
+ .rq_nvec = 1 };
cFYI(1, "%s: offset=%llu bytes=%u", __func__,
rdata->offset, rdata->bytes);
rdata->iov[0].iov_len = get_rfc1002_length(rdata->iov[0].iov_base) + 4;
kref_get(&rdata->refcount);
- rc = cifs_call_async(io_parms.tcon->ses->server, rdata->iov, 1,
+ rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
cifs_readv_receive, smb2_readv_callback,
rdata, 0);
if (rc)
struct smb2_write_req *req = NULL;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct kvec *iov = NULL;
+ struct smb_rqst rqst;
rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
if (rc)
rc = -ENOMEM;
goto async_writev_out;
}
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = wdata->nr_pages + 1;
req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
kref_get(&wdata->refcount);
- rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
- NULL, smb2_writev_callback, wdata, 0);
+ rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
+ smb2_writev_callback, wdata, 0);
if (rc)
kref_put(&wdata->refcount, cifs_writedata_release);
extern int smb2_verify_signature(struct smb_rqst *, struct TCP_Server_Info *);
extern int smb2_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
-extern int smb2_setup_request(struct cifs_ses *ses, struct kvec *iov,
- unsigned int nvec, struct mid_q_entry **ret_mid);
-extern int smb2_setup_async_request(struct TCP_Server_Info *server,
- struct kvec *iov, unsigned int nvec,
- struct mid_q_entry **ret_mid);
+extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
+ struct smb_rqst *rqst);
+extern struct mid_q_entry *smb2_setup_async_request(
+ struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern void smb2_echo_request(struct work_struct *work);
extern bool smb2_is_valid_oplock_break(char *buffer,
struct TCP_Server_Info *srv);
return map_smb2_to_linux_error(mid->resp_buf, log_error);
}
-int
-smb2_setup_request(struct cifs_ses *ses, struct kvec *iov,
- unsigned int nvec, struct mid_q_entry **ret_mid)
+struct mid_q_entry *
+smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
{
int rc;
- struct smb2_hdr *hdr = (struct smb2_hdr *)iov[0].iov_base;
+ struct smb2_hdr *hdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = nvec };
smb2_seq_num_into_buf(ses->server, hdr);
rc = smb2_get_mid_entry(ses, hdr, &mid);
if (rc)
- return rc;
- rc = smb2_sign_rqst(&rqst, ses->server);
- if (rc)
+ return ERR_PTR(rc);
+ rc = smb2_sign_rqst(rqst, ses->server);
+ if (rc) {
cifs_delete_mid(mid);
- *ret_mid = mid;
- return rc;
+ return ERR_PTR(rc);
+ }
+ return mid;
}
-int
-smb2_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
- unsigned int nvec, struct mid_q_entry **ret_mid)
+struct mid_q_entry *
+smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
- int rc = 0;
- struct smb2_hdr *hdr = (struct smb2_hdr *)iov[0].iov_base;
+ int rc;
+ struct smb2_hdr *hdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = nvec };
smb2_seq_num_into_buf(server, hdr);
mid = smb2_mid_entry_alloc(hdr, server);
if (mid == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- rc = smb2_sign_rqst(&rqst, server);
+ rc = smb2_sign_rqst(rqst, server);
if (rc) {
DeleteMidQEntry(mid);
- return rc;
+ return ERR_PTR(rc);
}
- *ret_mid = mid;
- return rc;
+ return mid;
}
return 0;
}
-int
-cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
- unsigned int nvec, struct mid_q_entry **ret_mid)
+struct mid_q_entry *
+cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
- struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+ struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
/* enable signing if server requires it */
mid = AllocMidQEntry(hdr, server);
if (mid == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number);
+ rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
if (rc) {
DeleteMidQEntry(mid);
- return rc;
+ return ERR_PTR(rc);
}
- *ret_mid = mid;
- return 0;
+ return mid;
}
/*
* the result. Caller is responsible for dealing with timeouts.
*/
int
-cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
- unsigned int nvec, mid_receive_t *receive,
- mid_callback_t *callback, void *cbdata, const int flags)
+cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ mid_receive_t *receive, mid_callback_t *callback,
+ void *cbdata, const int flags)
{
int rc, timeout, optype;
struct mid_q_entry *mid;
return rc;
mutex_lock(&server->srv_mutex);
- rc = server->ops->setup_async_request(server, iov, nvec, &mid);
- if (rc) {
+ mid = server->ops->setup_async_request(server, rqst);
+ if (IS_ERR(mid)) {
mutex_unlock(&server->srv_mutex);
add_credits(server, 1, optype);
wake_up(&server->request_q);
- return rc;
+ return PTR_ERR(mid);
}
mid->receive = receive;
cifs_in_send_inc(server);
- rc = smb_sendv(server, iov, nvec);
+ rc = smb_send_rqst(server, rqst);
cifs_in_send_dec(server);
cifs_save_when_sent(mid);
mutex_unlock(&server->srv_mutex);
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
-int
-cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
- unsigned int nvec, struct mid_q_entry **ret_mid)
+struct mid_q_entry *
+cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
{
int rc;
- struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
+ struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
rc = allocate_mid(ses, hdr, &mid);
if (rc)
- return rc;
- rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number);
- if (rc)
+ return ERR_PTR(rc);
+ rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
+ if (rc) {
cifs_delete_mid(mid);
- *ret_mid = mid;
- return rc;
+ return ERR_PTR(rc);
+ }
+ return mid;
}
int
struct mid_q_entry *midQ;
char *buf = iov[0].iov_base;
unsigned int credits = 1;
+ struct smb_rqst rqst = { .rq_iov = iov,
+ .rq_nvec = n_vec };
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
mutex_lock(&ses->server->srv_mutex);
- rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
+ midQ = ses->server->ops->setup_request(ses, &rqst);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(buf);