aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ubifs/io.c')
-rw-r--r--fs/ubifs/io.c47
1 files changed, 41 insertions, 6 deletions
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index fab29f899f91..0ad819c904df 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
331{ 331{
332 uint32_t crc; 332 uint32_t crc;
333 333
334 ubifs_assert(c, pad >= 0 && !(pad & 7)); 334 ubifs_assert(c, pad >= 0);
335 335
336 if (pad >= UBIFS_PAD_NODE_SZ) { 336 if (pad >= UBIFS_PAD_NODE_SZ) {
337 struct ubifs_ch *ch = buf; 337 struct ubifs_ch *ch = buf;
@@ -728,6 +728,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
728 * write-buffer. 728 * write-buffer.
729 */ 729 */
730 memcpy(wbuf->buf + wbuf->used, buf, len); 730 memcpy(wbuf->buf + wbuf->used, buf, len);
731 if (aligned_len > len) {
732 ubifs_assert(c, aligned_len - len < 8);
733 ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
734 }
731 735
732 if (aligned_len == wbuf->avail) { 736 if (aligned_len == wbuf->avail) {
733 dbg_io("flush jhead %s wbuf to LEB %d:%d", 737 dbg_io("flush jhead %s wbuf to LEB %d:%d",
@@ -806,27 +810,58 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
806 */ 810 */
807 n = aligned_len >> c->max_write_shift; 811 n = aligned_len >> c->max_write_shift;
808 if (n) { 812 if (n) {
809 n <<= c->max_write_shift; 813 int m = n - 1;
814
810 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, 815 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
811 wbuf->offs); 816 wbuf->offs);
812 err = ubifs_leb_write(c, wbuf->lnum, buf + written, 817
813 wbuf->offs, n); 818 if (m) {
819 /* '(n-1)<<c->max_write_shift < len' is always true. */
820 m <<= c->max_write_shift;
821 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
822 wbuf->offs, m);
823 if (err)
824 goto out;
825 wbuf->offs += m;
826 aligned_len -= m;
827 len -= m;
828 written += m;
829 }
830
831 /*
832 * The non-written len of buf may be less than 'n' because
833 * parameter 'len' is not 8 bytes aligned, so here we read
834 * min(len, n) bytes from buf.
835 */
836 n = 1 << c->max_write_shift;
837 memcpy(wbuf->buf, buf + written, min(len, n));
838 if (n > len) {
839 ubifs_assert(c, n - len < 8);
840 ubifs_pad(c, wbuf->buf + len, n - len);
841 }
842
843 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
814 if (err) 844 if (err)
815 goto out; 845 goto out;
816 wbuf->offs += n; 846 wbuf->offs += n;
817 aligned_len -= n; 847 aligned_len -= n;
818 len -= n; 848 len -= min(len, n);
819 written += n; 849 written += n;
820 } 850 }
821 851
822 spin_lock(&wbuf->lock); 852 spin_lock(&wbuf->lock);
823 if (aligned_len) 853 if (aligned_len) {
824 /* 854 /*
825 * And now we have what's left and what does not take whole 855 * And now we have what's left and what does not take whole
826 * max. write unit, so write it to the write-buffer and we are 856 * max. write unit, so write it to the write-buffer and we are
827 * done. 857 * done.
828 */ 858 */
829 memcpy(wbuf->buf, buf + written, len); 859 memcpy(wbuf->buf, buf + written, len);
860 if (aligned_len > len) {
861 ubifs_assert(c, aligned_len - len < 8);
862 ubifs_pad(c, wbuf->buf + len, aligned_len - len);
863 }
864 }
830 865
831 if (c->leb_size - wbuf->offs >= c->max_write_size) 866 if (c->leb_size - wbuf->offs >= c->max_write_size)
832 wbuf->size = c->max_write_size; 867 wbuf->size = c->max_write_size;