1
0
Fork 0

Merge branch 'sendmsg.cifs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull cifs iovec cleanups from Al Viro.

* 'sendmsg.cifs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  cifs: don't bother with kmap on read_pages side
  cifs_readv_receive: use cifs_read_from_socket()
  cifs: no need to wank with copying and advancing iovec on recvmsg side either
  cifs: quit playing games with draining iovecs
  cifs: merge the hash calculation helpers
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2016-05-18 10:17:56 -07:00
commit 442c9ac989
8 changed files with 172 additions and 388 deletions

View File

@ -66,6 +66,60 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
return 0;
}
int __cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash)
{
int i;
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry\n");
return -EIO;
}
/* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
rc = crypto_shash_update(shash,
iov[i].iov_base + 4, iov[i].iov_len - 4);
} else {
rc = crypto_shash_update(shash,
iov[i].iov_base, iov[i].iov_len);
}
if (rc) {
cifs_dbg(VFS, "%s: Could not update with payload\n",
__func__);
return rc;
}
}
/* now hash over the rq_pages array */
for (i = 0; i < rqst->rq_npages; i++) {
void *kaddr = kmap(rqst->rq_pages[i]);
size_t len = rqst->rq_pagesz;
if (i == rqst->rq_npages - 1)
len = rqst->rq_tailsz;
crypto_shash_update(shash, kaddr, len);
kunmap(rqst->rq_pages[i]);
}
rc = crypto_shash_final(shash, signature);
if (rc)
cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
return rc;
}
/*
* Calculate and return the CIFS signature based on the mac key and SMB PDU.
* The 16 byte signature must be allocated by the caller. Note we only use the
@ -76,12 +130,9 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
static int cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature)
{
int i;
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
if (iov == NULL || signature == NULL || server == NULL)
if (!rqst->rq_iov || !signature || !server)
return -EINVAL;
if (!server->secmech.sdescmd5) {
@ -105,48 +156,8 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
return rc;
}
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry\n");
return -EIO;
}
/* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
rc =
crypto_shash_update(&server->secmech.sdescmd5->shash,
iov[i].iov_base + 4, iov[i].iov_len - 4);
} else {
rc =
crypto_shash_update(&server->secmech.sdescmd5->shash,
iov[i].iov_base, iov[i].iov_len);
}
if (rc) {
cifs_dbg(VFS, "%s: Could not update with payload\n",
__func__);
return rc;
}
}
/* now hash over the rq_pages array */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
crypto_shash_update(&server->secmech.sdescmd5->shash,
p_iov.iov_base, p_iov.iov_len);
kunmap(rqst->rq_pages[i]);
}
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
if (rc)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
return rc;
return __cifs_calc_signature(rqst, server, signature,
&server->secmech.sdescmd5->shash);
}
/* must be called with server->srv_mutex held */

View File

@ -615,8 +615,6 @@ struct TCP_Server_Info {
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool large_buf; /* is current buffer large? */
struct delayed_work echo; /* echo ping workqueue job */
struct kvec *iov; /* reusable kvec array for receives */
unsigned int nr_iov; /* number of kvecs in array */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
unsigned int total_read; /* total amount of data read in this pass */

View File

@ -37,8 +37,6 @@ extern void cifs_buf_release(void *);
extern struct smb_hdr *cifs_small_buf_get(void);
extern void cifs_small_buf_release(void *);
extern void free_rsp_buf(int, void *);
extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
struct kvec *iov);
extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
extern unsigned int _get_xid(void);
@ -181,10 +179,9 @@ extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read);
extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
struct kvec *iov_orig, unsigned int nr_segs,
unsigned int to_read);
unsigned int to_read);
extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
struct page *page, unsigned int to_read);
extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb);
extern int cifs_match_super(struct super_block *, void *);
@ -512,4 +509,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_written);
int __cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash);
#endif /* _CIFSPROTO_H */

View File

@ -1447,10 +1447,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
HEADER_SIZE(server) + 1;
rdata->iov.iov_base = buf + HEADER_SIZE(server) - 1;
rdata->iov.iov_len = len;
length = cifs_readv_from_socket(server, &rdata->iov, 1, len);
length = cifs_read_from_socket(server,
buf + HEADER_SIZE(server) - 1, len);
if (length < 0)
return length;
server->total_read += length;
@ -1502,9 +1500,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
len = data_offset - server->total_read;
if (len > 0) {
/* read any junk before data into the rest of smallbuf */
rdata->iov.iov_base = buf + server->total_read;
rdata->iov.iov_len = len;
length = cifs_readv_from_socket(server, &rdata->iov, 1, len);
length = cifs_read_from_socket(server,
buf + server->total_read, len);
if (length < 0)
return length;
server->total_read += length;

View File

@ -501,99 +501,34 @@ server_unresponsive(struct TCP_Server_Info *server)
return false;
}
/*
* kvec_array_init - clone a kvec array, and advance into it
* @new: pointer to memory for cloned array
* @iov: pointer to original array
* @nr_segs: number of members in original array
* @bytes: number of bytes to advance into the cloned array
*
* This function will copy the array provided in iov to a section of memory
* and advance the specified number of bytes into the new array. It returns
* the number of segments in the new array. "new" must be at least as big as
* the original iov array.
*/
static unsigned int
kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
size_t bytes)
{
size_t base = 0;
while (bytes || !iov->iov_len) {
int copy = min(bytes, iov->iov_len);
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
memcpy(new, iov, sizeof(*iov) * nr_segs);
new->iov_base += base;
new->iov_len -= base;
return nr_segs;
}
static struct kvec *
get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
{
struct kvec *new_iov;
if (server->iov && nr_segs <= server->nr_iov)
return server->iov;
/* not big enough -- allocate a new one and release the old */
new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
if (new_iov) {
kfree(server->iov);
server->iov = new_iov;
server->nr_iov = nr_segs;
}
return new_iov;
}
int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
unsigned int nr_segs, unsigned int to_read)
static int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
{
int length = 0;
int total_read;
unsigned int segs;
struct msghdr smb_msg;
struct kvec *iov;
iov = get_server_iovec(server, nr_segs);
if (!iov)
return -ENOMEM;
smb_msg->msg_control = NULL;
smb_msg->msg_controllen = 0;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
try_to_freeze();
if (server_unresponsive(server)) {
total_read = -ECONNABORTED;
break;
if (server_unresponsive(server))
return -ECONNABORTED;
length = sock_recvmsg(server->ssocket, smb_msg, 0);
if (server->tcpStatus == CifsExiting)
return -ESHUTDOWN;
if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
return -ECONNABORTED;
}
segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
length = kernel_recvmsg(server->ssocket, &smb_msg,
iov, segs, to_read, 0);
if (server->tcpStatus == CifsExiting) {
total_read = -ESHUTDOWN;
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
total_read = -ECONNABORTED;
break;
} else if (length == -ERESTARTSYS ||
length == -EAGAIN ||
length == -EINTR) {
if (length == -ERESTARTSYS ||
length == -EAGAIN ||
length == -EINTR) {
/*
* Minimum sleep to prevent looping, allowing socket
* to clear and app threads to set tcpStatus
@ -602,12 +537,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
usleep_range(1000, 2000);
length = 0;
continue;
} else if (length <= 0) {
cifs_dbg(FYI, "Received no data or error: expecting %d\n"
"got %d", to_read, length);
}
if (length <= 0) {
cifs_dbg(FYI, "Received no data or error: %d\n", length);
cifs_reconnect(server);
total_read = -ECONNABORTED;
break;
return -ECONNABORTED;
}
}
return total_read;
@ -617,12 +552,21 @@ int
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
struct kvec iov;
struct msghdr smb_msg;
struct kvec iov = {.iov_base = buf, .iov_len = to_read};
iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read);
iov.iov_base = buf;
iov.iov_len = to_read;
return cifs_readv_from_socket(server, &smb_msg);
}
return cifs_readv_from_socket(server, &iov, 1, to_read);
int
cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
unsigned int to_read)
{
struct msghdr smb_msg;
struct bio_vec bv = {.bv_page = page, .bv_len = to_read};
iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read);
return cifs_readv_from_socket(server, &smb_msg);
}
static bool
@ -783,7 +727,6 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
}
kfree(server->hostname);
kfree(server->iov);
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);

View File

@ -2852,39 +2852,31 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
int result = 0;
unsigned int i;
unsigned int nr_pages = rdata->nr_pages;
struct kvec iov;
rdata->got_bytes = 0;
rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
size_t n;
if (len >= PAGE_SIZE) {
/* enough data to fill the page */
iov.iov_base = kmap(page);
iov.iov_len = PAGE_SIZE;
cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
i, iov.iov_base, iov.iov_len);
len -= PAGE_SIZE;
} else if (len > 0) {
/* enough for partial page, fill and zero the rest */
iov.iov_base = kmap(page);
iov.iov_len = len;
cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
i, iov.iov_base, iov.iov_len);
memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
rdata->tailsz = len;
len = 0;
} else {
if (len <= 0) {
/* no need to hold page hostage */
rdata->pages[i] = NULL;
rdata->nr_pages--;
put_page(page);
continue;
}
result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
kunmap(page);
n = len;
if (len >= PAGE_SIZE) {
/* enough data to fill the page */
n = PAGE_SIZE;
len -= n;
} else {
zero_user(page, len, PAGE_SIZE - len);
rdata->tailsz = len;
len = 0;
}
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
break;
@ -3300,7 +3292,6 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
u64 eof;
pgoff_t eof_index;
unsigned int nr_pages = rdata->nr_pages;
struct kvec iov;
/* determine the eof that the server (probably) has */
eof = CIFS_I(rdata->mapping->host)->server_eof;
@ -3311,23 +3302,14 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
size_t n = PAGE_SIZE;
if (len >= PAGE_SIZE) {
/* enough data to fill the page */
iov.iov_base = kmap(page);
iov.iov_len = PAGE_SIZE;
cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
i, page->index, iov.iov_base, iov.iov_len);
len -= PAGE_SIZE;
} else if (len > 0) {
/* enough for partial page, fill and zero the rest */
iov.iov_base = kmap(page);
iov.iov_len = len;
cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
i, page->index, iov.iov_base, iov.iov_len);
memset(iov.iov_base + len,
'\0', PAGE_SIZE - len);
rdata->tailsz = len;
zero_user(page, len, PAGE_SIZE - len);
n = rdata->tailsz = len;
len = 0;
} else if (page->index > eof_index) {
/*
@ -3357,8 +3339,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
continue;
}
result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
kunmap(page);
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
break;

View File

@ -135,11 +135,10 @@ smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int i, rc;
int rc;
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
@ -171,53 +170,11 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry\n");
return -EIO;
}
/*
* The first entry includes a length field (which does not get
* signed that occupies the first 4 bytes before the header).
*/
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
rc =
crypto_shash_update(
&server->secmech.sdeschmacsha256->shash,
iov[i].iov_base + 4, iov[i].iov_len - 4);
} else {
rc =
crypto_shash_update(
&server->secmech.sdeschmacsha256->shash,
iov[i].iov_base, iov[i].iov_len);
}
if (rc) {
cifs_dbg(VFS, "%s: Could not update with payload\n",
__func__);
return rc;
}
}
rc = __cifs_calc_signature(rqst, server, sigptr,
&server->secmech.sdeschmacsha256->shash);
/* now hash over the rq_pages array */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
p_iov.iov_base, p_iov.iov_len);
kunmap(rqst->rq_pages[i]);
}
rc = crypto_shash_final(&server->secmech.sdeschmacsha256->shash,
sigptr);
if (rc)
cifs_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
if (!rc)
memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
return rc;
}
@ -395,12 +352,10 @@ generate_smb311signingkey(struct cifs_ses *ses)
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int i;
int rc = 0;
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
@ -431,54 +386,12 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
return rc;
}
rc = __cifs_calc_signature(rqst, server, sigptr,
&server->secmech.sdesccmacaes->shash);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry");
return -EIO;
}
/*
* The first entry includes a length field (which does not get
* signed that occupies the first 4 bytes before the header).
*/
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
rc =
crypto_shash_update(
&server->secmech.sdesccmacaes->shash,
iov[i].iov_base + 4, iov[i].iov_len - 4);
} else {
rc =
crypto_shash_update(
&server->secmech.sdesccmacaes->shash,
iov[i].iov_base, iov[i].iov_len);
}
if (rc) {
cifs_dbg(VFS, "%s: Couldn't update cmac aes with payload\n",
__func__);
return rc;
}
}
/* now hash over the rq_pages array */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
crypto_shash_update(&server->secmech.sdesccmacaes->shash,
p_iov.iov_base, p_iov.iov_len);
kunmap(rqst->rq_pages[i]);
}
rc = crypto_shash_final(&server->secmech.sdesccmacaes->shash,
sigptr);
if (rc)
cifs_dbg(VFS, "%s: Could not generate cmac aes\n", __func__);
memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
if (!rc)
memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
return rc;
}

View File

@ -124,41 +124,32 @@ cifs_delete_mid(struct mid_q_entry *mid)
/*
* smb_send_kvec - send an array of kvecs to the server
* @server: Server to send the data to
* @iov: Pointer to array of kvecs
* @n_vec: length of kvec array
* @smb_msg: Message to send
* @sent: amount of data sent on socket is stored here
*
* Our basic "send data to server" function. Should be called with srv_mutex
* held. The caller is responsible for handling the results.
*/
static int
smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
size_t *sent)
smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
size_t *sent)
{
int rc = 0;
int i = 0;
struct msghdr smb_msg;
unsigned int remaining;
size_t first_vec = 0;
int retries = 0;
struct socket *ssocket = server->ssocket;
*sent = 0;
smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg->msg_namelen = sizeof(struct sockaddr);
smb_msg->msg_control = NULL;
smb_msg->msg_controllen = 0;
if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
smb_msg->msg_flags = MSG_NOSIGNAL;
remaining = 0;
for (i = 0; i < n_vec; i++)
remaining += iov[i].iov_len;
i = 0;
while (remaining) {
while (msg_data_left(smb_msg)) {
/*
* If blocking send, we try 3 times, since each can block
* for 5 seconds. For nonblocking we have to try more
@ -177,35 +168,21 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
*/
rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
n_vec - first_vec, remaining);
rc = sock_sendmsg(ssocket, smb_msg);
if (rc == -EAGAIN) {
i++;
if (i >= 14 || (!server->noblocksnd && (i > 2))) {
retries++;
if (retries >= 14 ||
(!server->noblocksnd && (retries > 2))) {
cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
ssocket);
rc = -EAGAIN;
break;
return -EAGAIN;
}
msleep(1 << i);
msleep(1 << retries);
continue;
}
if (rc < 0)
break;
/* send was at least partially successful */
*sent += rc;
if (rc == remaining) {
remaining = 0;
break;
}
if (rc > remaining) {
cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
break;
}
return rc;
if (rc == 0) {
/* should never happen, letting socket clear before
@ -215,59 +192,11 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
continue;
}
remaining -= rc;
/* the line below resets i */
for (i = first_vec; i < n_vec; i++) {
if (iov[i].iov_len) {
if (rc > iov[i].iov_len) {
rc -= iov[i].iov_len;
iov[i].iov_len = 0;
} else {
iov[i].iov_base += rc;
iov[i].iov_len -= rc;
first_vec = i;
break;
}
}
}
i = 0; /* in case we get ENOSPC on the next send */
rc = 0;
/* send was at least partially successful */
*sent += rc;
retries = 0; /* in case we get ENOSPC on the next send */
}
return rc;
}
/**
* rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
* @rqst: pointer to smb_rqst
* @idx: index into the array of the page
* @iov: pointer to struct kvec that will hold the result
*
* Helper function to convert a slot in the rqst->rq_pages array into a kvec.
* The page will be kmapped and the address placed into iov_base. The length
* will then be adjusted according to the ptailoff.
*/
void
cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
struct kvec *iov)
{
/*
* FIXME: We could avoid this kmap altogether if we used
* kernel_sendpage instead of kernel_sendmsg. That will only
* work if signing is disabled though as sendpage inlines the
* page directly into the fraglist. If userspace modifies the
* page after we calculate the signature, then the server will
* reject it and may break the connection. kernel_sendmsg does
* an extra copy of the data and avoids that issue.
*/
iov->iov_base = kmap(rqst->rq_pages[idx]);
/* if last page, don't send beyond this offset into page */
if (idx == (rqst->rq_npages - 1))
iov->iov_len = rqst->rq_tailsz;
else
iov->iov_len = rqst->rq_pagesz;
return 0;
}
static unsigned long
@ -299,8 +228,9 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
unsigned long send_length;
unsigned int i;
size_t total_len = 0, sent;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
int val = 1;
if (ssocket == NULL)
@ -321,7 +251,13 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
rc = smb_send_kvec(server, iov, n_vec, &sent);
size = 0;
for (i = 0; i < n_vec; i++)
size += iov[i].iov_len;
iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
goto uncork;
@ -329,11 +265,16 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
/* now walk the page array and send each page in it */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
rc = smb_send_kvec(server, &p_iov, 1, &sent);
kunmap(rqst->rq_pages[i]);
size_t len = i == rqst->rq_npages - 1
? rqst->rq_tailsz
: rqst->rq_pagesz;
struct bio_vec bvec = {
.bv_page = rqst->rq_pages[i],
.bv_len = len
};
iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
&bvec, 1, len);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
break;