aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'fs/yaffs2/yaffs_guts.c')
-rw-r--r--fs/yaffs2/yaffs_guts.c5164
1 files changed, 5164 insertions, 0 deletions
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 00000000000..f4ae9deed72
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5164 @@
1/*
2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3 *
4 * Copyright (C) 2002-2010 Aleph One Ltd.
5 * for Toby Churchill Ltd and Brightstar Engineering
6 *
7 * Created by Charles Manning <charles@aleph1.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include "yportenv.h"
15#include "yaffs_trace.h"
16
17#include "yaffs_guts.h"
18#include "yaffs_tagsvalidity.h"
19#include "yaffs_getblockinfo.h"
20
21#include "yaffs_tagscompat.h"
22
23#include "yaffs_nand.h"
24
25#include "yaffs_yaffs1.h"
26#include "yaffs_yaffs2.h"
27#include "yaffs_bitmap.h"
28#include "yaffs_verify.h"
29
30#include "yaffs_nand.h"
31#include "yaffs_packedtags2.h"
32
33#include "yaffs_nameval.h"
34#include "yaffs_allocator.h"
35
36#include "yaffs_attribs.h"
37
38/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
39#define YAFFS_GC_GOOD_ENOUGH 2
40#define YAFFS_GC_PASSIVE_THRESHOLD 4
41
42#include "yaffs_ecc.h"
43
44/* Forward declarations */
45
46static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
47 const u8 * buffer, int n_bytes, int use_reserve);
48
49
50
51/* Function to calculate chunk and offset */
52
53static void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
54 int *chunk_out, u32 * offset_out)
55{
56 int chunk;
57 u32 offset;
58
59 chunk = (u32) (addr >> dev->chunk_shift);
60
61 if (dev->chunk_div == 1) {
62 /* easy power of 2 case */
63 offset = (u32) (addr & dev->chunk_mask);
64 } else {
65 /* Non power-of-2 case */
66
67 loff_t chunk_base;
68
69 chunk /= dev->chunk_div;
70
71 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
72 offset = (u32) (addr - chunk_base);
73 }
74
75 *chunk_out = chunk;
76 *offset_out = offset;
77}
78
79/* Function to return the number of shifts for a power of 2 greater than or
80 * equal to the given number
81 * Note we don't try to cater for all possible numbers and this does not have to
82 * be hellishly efficient.
83 */
84
85static u32 calc_shifts_ceiling(u32 x)
86{
87 int extra_bits;
88 int shifts;
89
90 shifts = extra_bits = 0;
91
92 while (x > 1) {
93 if (x & 1)
94 extra_bits++;
95 x >>= 1;
96 shifts++;
97 }
98
99 if (extra_bits)
100 shifts++;
101
102 return shifts;
103}
104
105/* Function to return the number of shifts to get a 1 in bit 0
106 */
107
108static u32 calc_shifts(u32 x)
109{
110 u32 shifts;
111
112 shifts = 0;
113
114 if (!x)
115 return 0;
116
117 while (!(x & 1)) {
118 x >>= 1;
119 shifts++;
120 }
121
122 return shifts;
123}
124
125/*
126 * Temporary buffer manipulations.
127 */
128
129static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
130{
131 int i;
132 u8 *buf = (u8 *) 1;
133
134 memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
135
136 for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
137 dev->temp_buffer[i].line = 0; /* not in use */
138 dev->temp_buffer[i].buffer = buf =
139 kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
140 }
141
142 return buf ? YAFFS_OK : YAFFS_FAIL;
143}
144
145u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
146{
147 int i, j;
148
149 dev->temp_in_use++;
150 if (dev->temp_in_use > dev->max_temp)
151 dev->max_temp = dev->temp_in_use;
152
153 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
154 if (dev->temp_buffer[i].line == 0) {
155 dev->temp_buffer[i].line = line_no;
156 if ((i + 1) > dev->max_temp) {
157 dev->max_temp = i + 1;
158 for (j = 0; j <= i; j++)
159 dev->temp_buffer[j].max_line =
160 dev->temp_buffer[j].line;
161 }
162
163 return dev->temp_buffer[i].buffer;
164 }
165 }
166
167 yaffs_trace(YAFFS_TRACE_BUFFERS,
168 "Out of temp buffers at line %d, other held by lines:",
169 line_no);
170 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
171 yaffs_trace(YAFFS_TRACE_BUFFERS," %d", dev->temp_buffer[i].line);
172
173 /*
174 * If we got here then we have to allocate an unmanaged one
175 * This is not good.
176 */
177
178 dev->unmanaged_buffer_allocs++;
179 return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
180
181}
182
183void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no)
184{
185 int i;
186
187 dev->temp_in_use--;
188
189 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
190 if (dev->temp_buffer[i].buffer == buffer) {
191 dev->temp_buffer[i].line = 0;
192 return;
193 }
194 }
195
196 if (buffer) {
197 /* assume it is an unmanaged one. */
198 yaffs_trace(YAFFS_TRACE_BUFFERS,
199 "Releasing unmanaged temp buffer in line %d",
200 line_no);
201 kfree(buffer);
202 dev->unmanaged_buffer_deallocs++;
203 }
204
205}
206
207/*
208 * Determine if we have a managed buffer.
209 */
210int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 * buffer)
211{
212 int i;
213
214 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
215 if (dev->temp_buffer[i].buffer == buffer)
216 return 1;
217 }
218
219 for (i = 0; i < dev->param.n_caches; i++) {
220 if (dev->cache[i].data == buffer)
221 return 1;
222 }
223
224 if (buffer == dev->checkpt_buffer)
225 return 1;
226
227 yaffs_trace(YAFFS_TRACE_ALWAYS,
228 "yaffs: unmaged buffer detected.");
229 return 0;
230}
231
232/*
233 * Functions for robustisizing TODO
234 *
235 */
236
237static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
238 const u8 * data,
239 const struct yaffs_ext_tags *tags)
240{
241 dev = dev;
242 nand_chunk = nand_chunk;
243 data = data;
244 tags = tags;
245}
246
247static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
248 const struct yaffs_ext_tags *tags)
249{
250 dev = dev;
251 nand_chunk = nand_chunk;
252 tags = tags;
253}
254
255void yaffs_handle_chunk_error(struct yaffs_dev *dev,
256 struct yaffs_block_info *bi)
257{
258 if (!bi->gc_prioritise) {
259 bi->gc_prioritise = 1;
260 dev->has_pending_prioritised_gc = 1;
261 bi->chunk_error_strikes++;
262
263 if (bi->chunk_error_strikes > 3) {
264 bi->needs_retiring = 1; /* Too many stikes, so retire this */
265 yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Block struck out");
266
267 }
268 }
269}
270
271static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
272 int erased_ok)
273{
274 int flash_block = nand_chunk / dev->param.chunks_per_block;
275 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
276
277 yaffs_handle_chunk_error(dev, bi);
278
279 if (erased_ok) {
280 /* Was an actual write failure, so mark the block for retirement */
281 bi->needs_retiring = 1;
282 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
283 "**>> Block %d needs retiring", flash_block);
284 }
285
286 /* Delete the chunk */
287 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
288 yaffs_skip_rest_of_block(dev);
289}
290
291/*
292 * Verification code
293 */
294
295/*
296 * Simple hash function. Needs to have a reasonable spread
297 */
298
299static inline int yaffs_hash_fn(int n)
300{
301 n = abs(n);
302 return n % YAFFS_NOBJECT_BUCKETS;
303}
304
305/*
306 * Access functions to useful fake objects.
307 * Note that root might have a presence in NAND if permissions are set.
308 */
309
310struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
311{
312 return dev->root_dir;
313}
314
315struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
316{
317 return dev->lost_n_found;
318}
319
320/*
321 * Erased NAND checking functions
322 */
323
324int yaffs_check_ff(u8 * buffer, int n_bytes)
325{
326 /* Horrible, slow implementation */
327 while (n_bytes--) {
328 if (*buffer != 0xFF)
329 return 0;
330 buffer++;
331 }
332 return 1;
333}
334
335static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
336{
337 int retval = YAFFS_OK;
338 u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
339 struct yaffs_ext_tags tags;
340 int result;
341
342 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
343
344 if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
345 retval = YAFFS_FAIL;
346
347 if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
348 tags.chunk_used) {
349 yaffs_trace(YAFFS_TRACE_NANDACCESS, "Chunk %d not erased", nand_chunk);
350 retval = YAFFS_FAIL;
351 }
352
353 yaffs_release_temp_buffer(dev, data, __LINE__);
354
355 return retval;
356
357}
358
359static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
360 int nand_chunk,
361 const u8 * data,
362 struct yaffs_ext_tags *tags)
363{
364 int retval = YAFFS_OK;
365 struct yaffs_ext_tags temp_tags;
366 u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
367 int result;
368
369 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
370 if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
371 temp_tags.obj_id != tags->obj_id ||
372 temp_tags.chunk_id != tags->chunk_id ||
373 temp_tags.n_bytes != tags->n_bytes)
374 retval = YAFFS_FAIL;
375
376 yaffs_release_temp_buffer(dev, buffer, __LINE__);
377
378 return retval;
379}
380
381
382int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
383{
384 int reserved_chunks;
385 int reserved_blocks = dev->param.n_reserved_blocks;
386 int checkpt_blocks;
387
388 checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
389
390 reserved_chunks =
391 ((reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block);
392
393 return (dev->n_free_chunks > (reserved_chunks + n_chunks));
394}
395
396static int yaffs_find_alloc_block(struct yaffs_dev *dev)
397{
398 int i;
399
400 struct yaffs_block_info *bi;
401
402 if (dev->n_erased_blocks < 1) {
403 /* Hoosterman we've got a problem.
404 * Can't get space to gc
405 */
406 yaffs_trace(YAFFS_TRACE_ERROR,
407 "yaffs tragedy: no more erased blocks" );
408
409 return -1;
410 }
411
412 /* Find an empty block. */
413
414 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
415 dev->alloc_block_finder++;
416 if (dev->alloc_block_finder < dev->internal_start_block
417 || dev->alloc_block_finder > dev->internal_end_block) {
418 dev->alloc_block_finder = dev->internal_start_block;
419 }
420
421 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
422
423 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
424 bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
425 dev->seq_number++;
426 bi->seq_number = dev->seq_number;
427 dev->n_erased_blocks--;
428 yaffs_trace(YAFFS_TRACE_ALLOCATE,
429 "Allocated block %d, seq %d, %d left" ,
430 dev->alloc_block_finder, dev->seq_number,
431 dev->n_erased_blocks);
432 return dev->alloc_block_finder;
433 }
434 }
435
436 yaffs_trace(YAFFS_TRACE_ALWAYS,
437 "yaffs tragedy: no more erased blocks, but there should have been %d",
438 dev->n_erased_blocks);
439
440 return -1;
441}
442
443static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
444 struct yaffs_block_info **block_ptr)
445{
446 int ret_val;
447 struct yaffs_block_info *bi;
448
449 if (dev->alloc_block < 0) {
450 /* Get next block to allocate off */
451 dev->alloc_block = yaffs_find_alloc_block(dev);
452 dev->alloc_page = 0;
453 }
454
455 if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
456 /* Not enough space to allocate unless we're allowed to use the reserve. */
457 return -1;
458 }
459
460 if (dev->n_erased_blocks < dev->param.n_reserved_blocks
461 && dev->alloc_page == 0)
462 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
463
464 /* Next page please.... */
465 if (dev->alloc_block >= 0) {
466 bi = yaffs_get_block_info(dev, dev->alloc_block);
467
468 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
469 dev->alloc_page;
470 bi->pages_in_use++;
471 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
472
473 dev->alloc_page++;
474
475 dev->n_free_chunks--;
476
477 /* If the block is full set the state to full */
478 if (dev->alloc_page >= dev->param.chunks_per_block) {
479 bi->block_state = YAFFS_BLOCK_STATE_FULL;
480 dev->alloc_block = -1;
481 }
482
483 if (block_ptr)
484 *block_ptr = bi;
485
486 return ret_val;
487 }
488
489 yaffs_trace(YAFFS_TRACE_ERROR, "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" );
490
491 return -1;
492}
493
494static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
495{
496 int n;
497
498 n = dev->n_erased_blocks * dev->param.chunks_per_block;
499
500 if (dev->alloc_block > 0)
501 n += (dev->param.chunks_per_block - dev->alloc_page);
502
503 return n;
504
505}
506
507/*
508 * yaffs_skip_rest_of_block() skips over the rest of the allocation block
509 * if we don't want to write to it.
510 */
511void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
512{
513 if (dev->alloc_block > 0) {
514 struct yaffs_block_info *bi =
515 yaffs_get_block_info(dev, dev->alloc_block);
516 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
517 bi->block_state = YAFFS_BLOCK_STATE_FULL;
518 dev->alloc_block = -1;
519 }
520 }
521}
522
523static int yaffs_write_new_chunk(struct yaffs_dev *dev,
524 const u8 * data,
525 struct yaffs_ext_tags *tags, int use_reserver)
526{
527 int attempts = 0;
528 int write_ok = 0;
529 int chunk;
530
531 yaffs2_checkpt_invalidate(dev);
532
533 do {
534 struct yaffs_block_info *bi = 0;
535 int erased_ok = 0;
536
537 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
538 if (chunk < 0) {
539 /* no space */
540 break;
541 }
542
543 /* First check this chunk is erased, if it needs
544 * checking. The checking policy (unless forced
545 * always on) is as follows:
546 *
547 * Check the first page we try to write in a block.
548 * If the check passes then we don't need to check any
549 * more. If the check fails, we check again...
550 * If the block has been erased, we don't need to check.
551 *
552 * However, if the block has been prioritised for gc,
553 * then we think there might be something odd about
554 * this block and stop using it.
555 *
556 * Rationale: We should only ever see chunks that have
557 * not been erased if there was a partially written
558 * chunk due to power loss. This checking policy should
559 * catch that case with very few checks and thus save a
560 * lot of checks that are most likely not needed.
561 *
562 * Mods to the above
563 * If an erase check fails or the write fails we skip the
564 * rest of the block.
565 */
566
567 /* let's give it a try */
568 attempts++;
569
570 if (dev->param.always_check_erased)
571 bi->skip_erased_check = 0;
572
573 if (!bi->skip_erased_check) {
574 erased_ok = yaffs_check_chunk_erased(dev, chunk);
575 if (erased_ok != YAFFS_OK) {
576 yaffs_trace(YAFFS_TRACE_ERROR,
577 "**>> yaffs chunk %d was not erased",
578 chunk);
579
580 /* If not erased, delete this one,
581 * skip rest of block and
582 * try another chunk */
583 yaffs_chunk_del(dev, chunk, 1, __LINE__);
584 yaffs_skip_rest_of_block(dev);
585 continue;
586 }
587 }
588
589 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
590
591 if (!bi->skip_erased_check)
592 write_ok =
593 yaffs_verify_chunk_written(dev, chunk, data, tags);
594
595 if (write_ok != YAFFS_OK) {
596 /* Clean up aborted write, skip to next block and
597 * try another chunk */
598 yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
599 continue;
600 }
601
602 bi->skip_erased_check = 1;
603
604 /* Copy the data into the robustification buffer */
605 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
606
607 } while (write_ok != YAFFS_OK &&
608 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
609
610 if (!write_ok)
611 chunk = -1;
612
613 if (attempts > 1) {
614 yaffs_trace(YAFFS_TRACE_ERROR,
615 "**>> yaffs write required %d attempts",
616 attempts);
617 dev->n_retired_writes += (attempts - 1);
618 }
619
620 return chunk;
621}
622
623/*
624 * Block retiring for handling a broken block.
625 */
626
627static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
628{
629 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
630
631 yaffs2_checkpt_invalidate(dev);
632
633 yaffs2_clear_oldest_dirty_seq(dev, bi);
634
635 if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
636 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
637 yaffs_trace(YAFFS_TRACE_ALWAYS,
638 "yaffs: Failed to mark bad and erase block %d",
639 flash_block);
640 } else {
641 struct yaffs_ext_tags tags;
642 int chunk_id =
643 flash_block * dev->param.chunks_per_block;
644
645 u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
646
647 memset(buffer, 0xff, dev->data_bytes_per_chunk);
648 yaffs_init_tags(&tags);
649 tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
650 if (dev->param.write_chunk_tags_fn(dev, chunk_id -
651 dev->chunk_offset,
652 buffer,
653 &tags) != YAFFS_OK)
654 yaffs_trace(YAFFS_TRACE_ALWAYS,
655 "yaffs: Failed to write bad block marker to block %d",
656 flash_block);
657
658 yaffs_release_temp_buffer(dev, buffer, __LINE__);
659 }
660 }
661
662 bi->block_state = YAFFS_BLOCK_STATE_DEAD;
663 bi->gc_prioritise = 0;
664 bi->needs_retiring = 0;
665
666 dev->n_retired_blocks++;
667}
668
669/*---------------- Name handling functions ------------*/
670
671static u16 yaffs_calc_name_sum(const YCHAR * name)
672{
673 u16 sum = 0;
674 u16 i = 1;
675
676 const YUCHAR *bname = (const YUCHAR *)name;
677 if (bname) {
678 while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH / 2))) {
679
680 /* 0x1f mask is case insensitive */
681 sum += ((*bname) & 0x1f) * i;
682 i++;
683 bname++;
684 }
685 }
686 return sum;
687}
688
689void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
690{
691#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
692 memset(obj->short_name, 0, sizeof(obj->short_name));
693 if (name &&
694 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
695 YAFFS_SHORT_NAME_LENGTH)
696 strcpy(obj->short_name, name);
697 else
698 obj->short_name[0] = _Y('\0');
699#endif
700 obj->sum = yaffs_calc_name_sum(name);
701}
702
703void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
704 const struct yaffs_obj_hdr *oh)
705{
706#ifdef CONFIG_YAFFS_AUTO_UNICODE
707 YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
708 memset(tmp_name, 0, sizeof(tmp_name));
709 yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
710 YAFFS_MAX_NAME_LENGTH + 1);
711 yaffs_set_obj_name(obj, tmp_name);
712#else
713 yaffs_set_obj_name(obj, oh->name);
714#endif
715}
716
717/*-------------------- TNODES -------------------
718
719 * List of spare tnodes
720 * The list is hooked together using the first pointer
721 * in the tnode.
722 */
723
724struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
725{
726 struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
727 if (tn) {
728 memset(tn, 0, dev->tnode_size);
729 dev->n_tnodes++;
730 }
731
732 dev->checkpoint_blocks_required = 0; /* force recalculation */
733
734 return tn;
735}
736
737/* FreeTnode frees up a tnode and puts it back on the free list */
738static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
739{
740 yaffs_free_raw_tnode(dev, tn);
741 dev->n_tnodes--;
742 dev->checkpoint_blocks_required = 0; /* force recalculation */
743}
744
745static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
746{
747 yaffs_deinit_raw_tnodes_and_objs(dev);
748 dev->n_obj = 0;
749 dev->n_tnodes = 0;
750}
751
752void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
753 unsigned pos, unsigned val)
754{
755 u32 *map = (u32 *) tn;
756 u32 bit_in_map;
757 u32 bit_in_word;
758 u32 word_in_map;
759 u32 mask;
760
761 pos &= YAFFS_TNODES_LEVEL0_MASK;
762 val >>= dev->chunk_grp_bits;
763
764 bit_in_map = pos * dev->tnode_width;
765 word_in_map = bit_in_map / 32;
766 bit_in_word = bit_in_map & (32 - 1);
767
768 mask = dev->tnode_mask << bit_in_word;
769
770 map[word_in_map] &= ~mask;
771 map[word_in_map] |= (mask & (val << bit_in_word));
772
773 if (dev->tnode_width > (32 - bit_in_word)) {
774 bit_in_word = (32 - bit_in_word);
775 word_in_map++;;
776 mask =
777 dev->tnode_mask >> ( /*dev->tnode_width - */ bit_in_word);
778 map[word_in_map] &= ~mask;
779 map[word_in_map] |= (mask & (val >> bit_in_word));
780 }
781}
782
783u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
784 unsigned pos)
785{
786 u32 *map = (u32 *) tn;
787 u32 bit_in_map;
788 u32 bit_in_word;
789 u32 word_in_map;
790 u32 val;
791
792 pos &= YAFFS_TNODES_LEVEL0_MASK;
793
794 bit_in_map = pos * dev->tnode_width;
795 word_in_map = bit_in_map / 32;
796 bit_in_word = bit_in_map & (32 - 1);
797
798 val = map[word_in_map] >> bit_in_word;
799
800 if (dev->tnode_width > (32 - bit_in_word)) {
801 bit_in_word = (32 - bit_in_word);
802 word_in_map++;;
803 val |= (map[word_in_map] << bit_in_word);
804 }
805
806 val &= dev->tnode_mask;
807 val <<= dev->chunk_grp_bits;
808
809 return val;
810}
811
812/* ------------------- End of individual tnode manipulation -----------------*/
813
814/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
815 * The look up tree is represented by the top tnode and the number of top_level
816 * in the tree. 0 means only the level 0 tnode is in the tree.
817 */
818
819/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
820struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
821 struct yaffs_file_var *file_struct,
822 u32 chunk_id)
823{
824 struct yaffs_tnode *tn = file_struct->top;
825 u32 i;
826 int required_depth;
827 int level = file_struct->top_level;
828
829 dev = dev;
830
831 /* Check sane level and chunk Id */
832 if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
833 return NULL;
834
835 if (chunk_id > YAFFS_MAX_CHUNK_ID)
836 return NULL;
837
838 /* First check we're tall enough (ie enough top_level) */
839
840 i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
841 required_depth = 0;
842 while (i) {
843 i >>= YAFFS_TNODES_INTERNAL_BITS;
844 required_depth++;
845 }
846
847 if (required_depth > file_struct->top_level)
848 return NULL; /* Not tall enough, so we can't find it */
849
850 /* Traverse down to level 0 */
851 while (level > 0 && tn) {
852 tn = tn->internal[(chunk_id >>
853 (YAFFS_TNODES_LEVEL0_BITS +
854 (level - 1) *
855 YAFFS_TNODES_INTERNAL_BITS)) &
856 YAFFS_TNODES_INTERNAL_MASK];
857 level--;
858 }
859
860 return tn;
861}
862
863/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
864 * This happens in two steps:
865 * 1. If the tree isn't tall enough, then make it taller.
866 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
867 *
868 * Used when modifying the tree.
869 *
870 * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
871 * be plugged into the ttree.
872 */
873
874struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
875 struct yaffs_file_var *file_struct,
876 u32 chunk_id,
877 struct yaffs_tnode *passed_tn)
878{
879 int required_depth;
880 int i;
881 int l;
882 struct yaffs_tnode *tn;
883
884 u32 x;
885
886 /* Check sane level and page Id */
887 if (file_struct->top_level < 0
888 || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
889 return NULL;
890
891 if (chunk_id > YAFFS_MAX_CHUNK_ID)
892 return NULL;
893
894 /* First check we're tall enough (ie enough top_level) */
895
896 x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
897 required_depth = 0;
898 while (x) {
899 x >>= YAFFS_TNODES_INTERNAL_BITS;
900 required_depth++;
901 }
902
903 if (required_depth > file_struct->top_level) {
904 /* Not tall enough, gotta make the tree taller */
905 for (i = file_struct->top_level; i < required_depth; i++) {
906
907 tn = yaffs_get_tnode(dev);
908
909 if (tn) {
910 tn->internal[0] = file_struct->top;
911 file_struct->top = tn;
912 file_struct->top_level++;
913 } else {
914 yaffs_trace(YAFFS_TRACE_ERROR, "yaffs: no more tnodes");
915 return NULL;
916 }
917 }
918 }
919
920 /* Traverse down to level 0, adding anything we need */
921
922 l = file_struct->top_level;
923 tn = file_struct->top;
924
925 if (l > 0) {
926 while (l > 0 && tn) {
927 x = (chunk_id >>
928 (YAFFS_TNODES_LEVEL0_BITS +
929 (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
930 YAFFS_TNODES_INTERNAL_MASK;
931
932 if ((l > 1) && !tn->internal[x]) {
933 /* Add missing non-level-zero tnode */
934 tn->internal[x] = yaffs_get_tnode(dev);
935 if (!tn->internal[x])
936 return NULL;
937 } else if (l == 1) {
938 /* Looking from level 1 at level 0 */
939 if (passed_tn) {
940 /* If we already have one, then release it. */
941 if (tn->internal[x])
942 yaffs_free_tnode(dev,
943 tn->
944 internal[x]);
945 tn->internal[x] = passed_tn;
946
947 } else if (!tn->internal[x]) {
948 /* Don't have one, none passed in */
949 tn->internal[x] = yaffs_get_tnode(dev);
950 if (!tn->internal[x])
951 return NULL;
952 }
953 }
954
955 tn = tn->internal[x];
956 l--;
957 }
958 } else {
959 /* top is level 0 */
960 if (passed_tn) {
961 memcpy(tn, passed_tn,
962 (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
963 yaffs_free_tnode(dev, passed_tn);
964 }
965 }
966
967 return tn;
968}
969
970static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
971 int chunk_obj)
972{
973 return (tags->chunk_id == chunk_obj &&
974 tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
975
976}
977
978static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
979 struct yaffs_ext_tags *tags, int obj_id,
980 int inode_chunk)
981{
982 int j;
983
984 for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
985 if (yaffs_check_chunk_bit
986 (dev, the_chunk / dev->param.chunks_per_block,
987 the_chunk % dev->param.chunks_per_block)) {
988
989 if (dev->chunk_grp_size == 1)
990 return the_chunk;
991 else {
992 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
993 tags);
994 if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
995 /* found it; */
996 return the_chunk;
997 }
998 }
999 }
1000 the_chunk++;
1001 }
1002 return -1;
1003}
1004
1005static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1006 struct yaffs_ext_tags *tags)
1007{
1008 /*Get the Tnode, then get the level 0 offset chunk offset */
1009 struct yaffs_tnode *tn;
1010 int the_chunk = -1;
1011 struct yaffs_ext_tags local_tags;
1012 int ret_val = -1;
1013
1014 struct yaffs_dev *dev = in->my_dev;
1015
1016 if (!tags) {
1017 /* Passed a NULL, so use our own tags space */
1018 tags = &local_tags;
1019 }
1020
1021 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1022
1023 if (tn) {
1024 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1025
1026 ret_val =
1027 yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1028 inode_chunk);
1029 }
1030 return ret_val;
1031}
1032
1033static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1034 struct yaffs_ext_tags *tags)
1035{
1036 /* Get the Tnode, then get the level 0 offset chunk offset */
1037 struct yaffs_tnode *tn;
1038 int the_chunk = -1;
1039 struct yaffs_ext_tags local_tags;
1040
1041 struct yaffs_dev *dev = in->my_dev;
1042 int ret_val = -1;
1043
1044 if (!tags) {
1045 /* Passed a NULL, so use our own tags space */
1046 tags = &local_tags;
1047 }
1048
1049 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1050
1051 if (tn) {
1052
1053 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1054
1055 ret_val =
1056 yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1057 inode_chunk);
1058
1059 /* Delete the entry in the filestructure (if found) */
1060 if (ret_val != -1)
1061 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1062 }
1063
1064 return ret_val;
1065}
1066
1067int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1068 int nand_chunk, int in_scan)
1069{
1070 /* NB in_scan is zero unless scanning.
1071 * For forward scanning, in_scan is > 0;
1072 * for backward scanning in_scan is < 0
1073 *
1074 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1075 */
1076
1077 struct yaffs_tnode *tn;
1078 struct yaffs_dev *dev = in->my_dev;
1079 int existing_cunk;
1080 struct yaffs_ext_tags existing_tags;
1081 struct yaffs_ext_tags new_tags;
1082 unsigned existing_serial, new_serial;
1083
1084 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1085 /* Just ignore an attempt at putting a chunk into a non-file during scanning
1086 * If it is not during Scanning then something went wrong!
1087 */
1088 if (!in_scan) {
1089 yaffs_trace(YAFFS_TRACE_ERROR,
1090 "yaffs tragedy:attempt to put data chunk into a non-file"
1091 );
1092 YBUG();
1093 }
1094
1095 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1096 return YAFFS_OK;
1097 }
1098
1099 tn = yaffs_add_find_tnode_0(dev,
1100 &in->variant.file_variant,
1101 inode_chunk, NULL);
1102 if (!tn)
1103 return YAFFS_FAIL;
1104
1105 if (!nand_chunk)
1106 /* Dummy insert, bail now */
1107 return YAFFS_OK;
1108
1109 existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1110
1111 if (in_scan != 0) {
1112 /* If we're scanning then we need to test for duplicates
1113 * NB This does not need to be efficient since it should only ever
1114 * happen when the power fails during a write, then only one
1115 * chunk should ever be affected.
1116 *
1117 * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
1118 * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
1119 */
1120
1121 if (existing_cunk > 0) {
1122 /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
1123 * thus we have to do a FindChunkInFile to get the real chunk id.
1124 *
1125 * We have a duplicate now we need to decide which one to use:
1126 *
1127 * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
1128 * Forward scanning YAFFS2: The new one is what we use, dump the old one.
1129 * YAFFS1: Get both sets of tags and compare serial numbers.
1130 */
1131
1132 if (in_scan > 0) {
1133 /* Only do this for forward scanning */
1134 yaffs_rd_chunk_tags_nand(dev,
1135 nand_chunk,
1136 NULL, &new_tags);
1137
1138 /* Do a proper find */
1139 existing_cunk =
1140 yaffs_find_chunk_in_file(in, inode_chunk,
1141 &existing_tags);
1142 }
1143
1144 if (existing_cunk <= 0) {
1145 /*Hoosterman - how did this happen? */
1146
1147 yaffs_trace(YAFFS_TRACE_ERROR,
1148 "yaffs tragedy: existing chunk < 0 in scan"
1149 );
1150
1151 }
1152
1153 /* NB The deleted flags should be false, otherwise the chunks will
1154 * not be loaded during a scan
1155 */
1156
1157 if (in_scan > 0) {
1158 new_serial = new_tags.serial_number;
1159 existing_serial = existing_tags.serial_number;
1160 }
1161
1162 if ((in_scan > 0) &&
1163 (existing_cunk <= 0 ||
1164 ((existing_serial + 1) & 3) == new_serial)) {
1165 /* Forward scanning.
1166 * Use new
1167 * Delete the old one and drop through to update the tnode
1168 */
1169 yaffs_chunk_del(dev, existing_cunk, 1,
1170 __LINE__);
1171 } else {
1172 /* Backward scanning or we want to use the existing one
1173 * Use existing.
1174 * Delete the new one and return early so that the tnode isn't changed
1175 */
1176 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1177 return YAFFS_OK;
1178 }
1179 }
1180
1181 }
1182
1183 if (existing_cunk == 0)
1184 in->n_data_chunks++;
1185
1186 yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1187
1188 return YAFFS_OK;
1189}
1190
1191static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1192{
1193 struct yaffs_block_info *the_block;
1194 unsigned block_no;
1195
1196 yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1197
1198 block_no = chunk / dev->param.chunks_per_block;
1199 the_block = yaffs_get_block_info(dev, block_no);
1200 if (the_block) {
1201 the_block->soft_del_pages++;
1202 dev->n_free_chunks++;
1203 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1204 }
1205}
1206
1207/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
1208 * All soft deleting does is increment the block's softdelete count and pulls the chunk out
1209 * of the tnode.
1210 * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
1211 */
1212
1213static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1214 u32 level, int chunk_offset)
1215{
1216 int i;
1217 int the_chunk;
1218 int all_done = 1;
1219 struct yaffs_dev *dev = in->my_dev;
1220
1221 if (tn) {
1222 if (level > 0) {
1223
1224 for (i = YAFFS_NTNODES_INTERNAL - 1; all_done && i >= 0;
1225 i--) {
1226 if (tn->internal[i]) {
1227 all_done =
1228 yaffs_soft_del_worker(in,
1229 tn->internal
1230 [i],
1231 level - 1,
1232 (chunk_offset
1233 <<
1234 YAFFS_TNODES_INTERNAL_BITS)
1235 + i);
1236 if (all_done) {
1237 yaffs_free_tnode(dev,
1238 tn->internal
1239 [i]);
1240 tn->internal[i] = NULL;
1241 } else {
1242 /* Hoosterman... how could this happen? */
1243 }
1244 }
1245 }
1246 return (all_done) ? 1 : 0;
1247 } else if (level == 0) {
1248
1249 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1250 the_chunk = yaffs_get_group_base(dev, tn, i);
1251 if (the_chunk) {
1252 /* Note this does not find the real chunk, only the chunk group.
1253 * We make an assumption that a chunk group is not larger than
1254 * a block.
1255 */
1256 yaffs_soft_del_chunk(dev, the_chunk);
1257 yaffs_load_tnode_0(dev, tn, i, 0);
1258 }
1259
1260 }
1261 return 1;
1262
1263 }
1264
1265 }
1266
1267 return 1;
1268
1269}
1270
1271static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1272{
1273 struct yaffs_dev *dev = obj->my_dev;
1274 struct yaffs_obj *parent;
1275
1276 yaffs_verify_obj_in_dir(obj);
1277 parent = obj->parent;
1278
1279 yaffs_verify_dir(parent);
1280
1281 if (dev && dev->param.remove_obj_fn)
1282 dev->param.remove_obj_fn(obj);
1283
1284 list_del_init(&obj->siblings);
1285 obj->parent = NULL;
1286
1287 yaffs_verify_dir(parent);
1288}
1289
1290void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1291{
1292 if (!directory) {
1293 yaffs_trace(YAFFS_TRACE_ALWAYS,
1294 "tragedy: Trying to add an object to a null pointer directory"
1295 );
1296 YBUG();
1297 return;
1298 }
1299 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1300 yaffs_trace(YAFFS_TRACE_ALWAYS,
1301 "tragedy: Trying to add an object to a non-directory"
1302 );
1303 YBUG();
1304 }
1305
1306 if (obj->siblings.prev == NULL) {
1307 /* Not initialised */
1308 YBUG();
1309 }
1310
1311 yaffs_verify_dir(directory);
1312
1313 yaffs_remove_obj_from_dir(obj);
1314
1315 /* Now add it */
1316 list_add(&obj->siblings, &directory->variant.dir_variant.children);
1317 obj->parent = directory;
1318
1319 if (directory == obj->my_dev->unlinked_dir
1320 || directory == obj->my_dev->del_dir) {
1321 obj->unlinked = 1;
1322 obj->my_dev->n_unlinked_files++;
1323 obj->rename_allowed = 0;
1324 }
1325
1326 yaffs_verify_dir(directory);
1327 yaffs_verify_obj_in_dir(obj);
1328}
1329
1330static int yaffs_change_obj_name(struct yaffs_obj *obj,
1331 struct yaffs_obj *new_dir,
1332 const YCHAR * new_name, int force, int shadows)
1333{
1334 int unlink_op;
1335 int del_op;
1336
1337 struct yaffs_obj *existing_target;
1338
1339 if (new_dir == NULL)
1340 new_dir = obj->parent; /* use the old directory */
1341
1342 if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1343 yaffs_trace(YAFFS_TRACE_ALWAYS,
1344 "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1345 );
1346 YBUG();
1347 }
1348
1349 /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
1350 if (obj->my_dev->param.is_yaffs2)
1351 unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1352 else
1353 unlink_op = (new_dir == obj->my_dev->unlinked_dir
1354 && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
1355
1356 del_op = (new_dir == obj->my_dev->del_dir);
1357
1358 existing_target = yaffs_find_by_name(new_dir, new_name);
1359
1360 /* If the object is a file going into the unlinked directory,
1361 * then it is OK to just stuff it in since duplicate names are allowed.
1362 * else only proceed if the new name does not exist and if we're putting
1363 * it into a directory.
1364 */
1365 if ((unlink_op ||
1366 del_op ||
1367 force ||
1368 (shadows > 0) ||
1369 !existing_target) &&
1370 new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
1371 yaffs_set_obj_name(obj, new_name);
1372 obj->dirty = 1;
1373
1374 yaffs_add_obj_to_dir(new_dir, obj);
1375
1376 if (unlink_op)
1377 obj->unlinked = 1;
1378
1379 /* If it is a deletion then we mark it as a shrink for gc purposes. */
1380 if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >=
1381 0)
1382 return YAFFS_OK;
1383 }
1384
1385 return YAFFS_FAIL;
1386}
1387
1388/*------------------------ Short Operations Cache ----------------------------------------
1389 * In many situations where there is no high level buffering a lot of
1390 * reads might be short sequential reads, and a lot of writes may be short
1391 * sequential writes. eg. scanning/writing a jpeg file.
1392 * In these cases, a short read/write cache can provide a huge perfomance
1393 * benefit with dumb-as-a-rock code.
1394 * In Linux, the page cache provides read buffering and the short op cache
1395 * provides write buffering.
1396 *
1397 * There are a limited number (~10) of cache chunks per device so that we don't
1398 * need a very intelligent search.
1399 */
1400
1401static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1402{
1403 struct yaffs_dev *dev = obj->my_dev;
1404 int i;
1405 struct yaffs_cache *cache;
1406 int n_caches = obj->my_dev->param.n_caches;
1407
1408 for (i = 0; i < n_caches; i++) {
1409 cache = &dev->cache[i];
1410 if (cache->object == obj && cache->dirty)
1411 return 1;
1412 }
1413
1414 return 0;
1415}
1416
1417static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1418{
1419 struct yaffs_dev *dev = obj->my_dev;
1420 int lowest = -99; /* Stop compiler whining. */
1421 int i;
1422 struct yaffs_cache *cache;
1423 int chunk_written = 0;
1424 int n_caches = obj->my_dev->param.n_caches;
1425
1426 if (n_caches > 0) {
1427 do {
1428 cache = NULL;
1429
1430 /* Find the dirty cache for this object with the lowest chunk id. */
1431 for (i = 0; i < n_caches; i++) {
1432 if (dev->cache[i].object == obj &&
1433 dev->cache[i].dirty) {
1434 if (!cache
1435 || dev->cache[i].chunk_id <
1436 lowest) {
1437 cache = &dev->cache[i];
1438 lowest = cache->chunk_id;
1439 }
1440 }
1441 }
1442
1443 if (cache && !cache->locked) {
1444 /* Write it out and free it up */
1445
1446 chunk_written =
1447 yaffs_wr_data_obj(cache->object,
1448 cache->chunk_id,
1449 cache->data,
1450 cache->n_bytes, 1);
1451 cache->dirty = 0;
1452 cache->object = NULL;
1453 }
1454
1455 } while (cache && chunk_written > 0);
1456
1457 if (cache)
1458 /* Hoosterman, disk full while writing cache out. */
1459 yaffs_trace(YAFFS_TRACE_ERROR,
1460 "yaffs tragedy: no space during cache write");
1461
1462 }
1463
1464}
1465
1466/*yaffs_flush_whole_cache(dev)
1467 *
1468 *
1469 */
1470
1471void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1472{
1473 struct yaffs_obj *obj;
1474 int n_caches = dev->param.n_caches;
1475 int i;
1476
1477 /* Find a dirty object in the cache and flush it...
1478 * until there are no further dirty objects.
1479 */
1480 do {
1481 obj = NULL;
1482 for (i = 0; i < n_caches && !obj; i++) {
1483 if (dev->cache[i].object && dev->cache[i].dirty)
1484 obj = dev->cache[i].object;
1485
1486 }
1487 if (obj)
1488 yaffs_flush_file_cache(obj);
1489
1490 } while (obj);
1491
1492}
1493
1494/* Grab us a cache chunk for use.
1495 * First look for an empty one.
1496 * Then look for the least recently used non-dirty one.
1497 * Then look for the least recently used dirty one...., flush and look again.
1498 */
1499static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1500{
1501 int i;
1502
1503 if (dev->param.n_caches > 0) {
1504 for (i = 0; i < dev->param.n_caches; i++) {
1505 if (!dev->cache[i].object)
1506 return &dev->cache[i];
1507 }
1508 }
1509
1510 return NULL;
1511}
1512
1513static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1514{
1515 struct yaffs_cache *cache;
1516 struct yaffs_obj *the_obj;
1517 int usage;
1518 int i;
1519 int pushout;
1520
1521 if (dev->param.n_caches > 0) {
1522 /* Try find a non-dirty one... */
1523
1524 cache = yaffs_grab_chunk_worker(dev);
1525
1526 if (!cache) {
1527 /* They were all dirty, find the last recently used object and flush
1528 * its cache, then find again.
1529 * NB what's here is not very accurate, we actually flush the object
1530 * the last recently used page.
1531 */
1532
1533 /* With locking we can't assume we can use entry zero */
1534
1535 the_obj = NULL;
1536 usage = -1;
1537 cache = NULL;
1538 pushout = -1;
1539
1540 for (i = 0; i < dev->param.n_caches; i++) {
1541 if (dev->cache[i].object &&
1542 !dev->cache[i].locked &&
1543 (dev->cache[i].last_use < usage
1544 || !cache)) {
1545 usage = dev->cache[i].last_use;
1546 the_obj = dev->cache[i].object;
1547 cache = &dev->cache[i];
1548 pushout = i;
1549 }
1550 }
1551
1552 if (!cache || cache->dirty) {
1553 /* Flush and try again */
1554 yaffs_flush_file_cache(the_obj);
1555 cache = yaffs_grab_chunk_worker(dev);
1556 }
1557
1558 }
1559 return cache;
1560 } else {
1561 return NULL;
1562 }
1563}
1564
1565/* Find a cached chunk */
1566static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1567 int chunk_id)
1568{
1569 struct yaffs_dev *dev = obj->my_dev;
1570 int i;
1571 if (dev->param.n_caches > 0) {
1572 for (i = 0; i < dev->param.n_caches; i++) {
1573 if (dev->cache[i].object == obj &&
1574 dev->cache[i].chunk_id == chunk_id) {
1575 dev->cache_hits++;
1576
1577 return &dev->cache[i];
1578 }
1579 }
1580 }
1581 return NULL;
1582}
1583
1584/* Mark the chunk for the least recently used algorithym */
1585static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1586 int is_write)
1587{
1588
1589 if (dev->param.n_caches > 0) {
1590 if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
1591 /* Reset the cache usages */
1592 int i;
1593 for (i = 1; i < dev->param.n_caches; i++)
1594 dev->cache[i].last_use = 0;
1595
1596 dev->cache_last_use = 0;
1597 }
1598
1599 dev->cache_last_use++;
1600
1601 cache->last_use = dev->cache_last_use;
1602
1603 if (is_write)
1604 cache->dirty = 1;
1605 }
1606}
1607
1608/* Invalidate a single cache page.
1609 * Do this when a whole page gets written,
1610 * ie the short cache for this page is no longer valid.
1611 */
1612static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1613{
1614 if (object->my_dev->param.n_caches > 0) {
1615 struct yaffs_cache *cache =
1616 yaffs_find_chunk_cache(object, chunk_id);
1617
1618 if (cache)
1619 cache->object = NULL;
1620 }
1621}
1622
1623/* Invalidate all the cache pages associated with this object
1624 * Do this whenever ther file is deleted or resized.
1625 */
1626static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1627{
1628 int i;
1629 struct yaffs_dev *dev = in->my_dev;
1630
1631 if (dev->param.n_caches > 0) {
1632 /* Invalidate it. */
1633 for (i = 0; i < dev->param.n_caches; i++) {
1634 if (dev->cache[i].object == in)
1635 dev->cache[i].object = NULL;
1636 }
1637 }
1638}
1639
1640static void yaffs_unhash_obj(struct yaffs_obj *obj)
1641{
1642 int bucket;
1643 struct yaffs_dev *dev = obj->my_dev;
1644
1645 /* If it is still linked into the bucket list, free from the list */
1646 if (!list_empty(&obj->hash_link)) {
1647 list_del_init(&obj->hash_link);
1648 bucket = yaffs_hash_fn(obj->obj_id);
1649 dev->obj_bucket[bucket].count--;
1650 }
1651}
1652
1653/* FreeObject frees up a Object and puts it back on the free list */
1654static void yaffs_free_obj(struct yaffs_obj *obj)
1655{
1656 struct yaffs_dev *dev = obj->my_dev;
1657
1658 yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1659 obj, obj->my_inode);
1660
1661 if (!obj)
1662 YBUG();
1663 if (obj->parent)
1664 YBUG();
1665 if (!list_empty(&obj->siblings))
1666 YBUG();
1667
1668 if (obj->my_inode) {
1669 /* We're still hooked up to a cached inode.
1670 * Don't delete now, but mark for later deletion
1671 */
1672 obj->defered_free = 1;
1673 return;
1674 }
1675
1676 yaffs_unhash_obj(obj);
1677
1678 yaffs_free_raw_obj(dev, obj);
1679 dev->n_obj--;
1680 dev->checkpoint_blocks_required = 0; /* force recalculation */
1681}
1682
1683void yaffs_handle_defered_free(struct yaffs_obj *obj)
1684{
1685 if (obj->defered_free)
1686 yaffs_free_obj(obj);
1687}
1688
1689static int yaffs_generic_obj_del(struct yaffs_obj *in)
1690{
1691
1692 /* First off, invalidate the file's data in the cache, without flushing. */
1693 yaffs_invalidate_whole_cache(in);
1694
1695 if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
1696 /* Move to the unlinked directory so we have a record that it was deleted. */
1697 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1698 0);
1699
1700 }
1701
1702 yaffs_remove_obj_from_dir(in);
1703 yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1704 in->hdr_chunk = 0;
1705
1706 yaffs_free_obj(in);
1707 return YAFFS_OK;
1708
1709}
1710
1711static void yaffs_soft_del_file(struct yaffs_obj *obj)
1712{
1713 if (obj->deleted &&
1714 obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
1715 if (obj->n_data_chunks <= 0) {
1716 /* Empty file with no duplicate object headers,
1717 * just delete it immediately */
1718 yaffs_free_tnode(obj->my_dev,
1719 obj->variant.file_variant.top);
1720 obj->variant.file_variant.top = NULL;
1721 yaffs_trace(YAFFS_TRACE_TRACING,
1722 "yaffs: Deleting empty file %d",
1723 obj->obj_id);
1724 yaffs_generic_obj_del(obj);
1725 } else {
1726 yaffs_soft_del_worker(obj,
1727 obj->variant.file_variant.top,
1728 obj->variant.
1729 file_variant.top_level, 0);
1730 obj->soft_del = 1;
1731 }
1732 }
1733}
1734
1735/* Pruning removes any part of the file structure tree that is beyond the
1736 * bounds of the file (ie that does not point to chunks).
1737 *
1738 * A file should only get pruned when its size is reduced.
1739 *
1740 * Before pruning, the chunks must be pulled from the tree and the
1741 * level 0 tnode entries must be zeroed out.
1742 * Could also use this for file deletion, but that's probably better handled
1743 * by a special case.
1744 *
1745 * This function is recursive. For levels > 0 the function is called again on
1746 * any sub-tree. For level == 0 we just check if the sub-tree has data.
1747 * If there is no data in a subtree then it is pruned.
1748 */
1749
1750static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1751 struct yaffs_tnode *tn, u32 level,
1752 int del0)
1753{
1754 int i;
1755 int has_data;
1756
1757 if (tn) {
1758 has_data = 0;
1759
1760 if (level > 0) {
1761 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1762 if (tn->internal[i]) {
1763 tn->internal[i] =
1764 yaffs_prune_worker(dev,
1765 tn->internal[i],
1766 level - 1,
1767 (i ==
1768 0) ? del0 : 1);
1769 }
1770
1771 if (tn->internal[i])
1772 has_data++;
1773 }
1774 } else {
1775 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1776 u32 *map = (u32 *) tn;
1777
1778 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1779 if (map[i])
1780 has_data++;
1781 }
1782 }
1783
1784 if (has_data == 0 && del0) {
1785 /* Free and return NULL */
1786
1787 yaffs_free_tnode(dev, tn);
1788 tn = NULL;
1789 }
1790
1791 }
1792
1793 return tn;
1794
1795}
1796
1797static int yaffs_prune_tree(struct yaffs_dev *dev,
1798 struct yaffs_file_var *file_struct)
1799{
1800 int i;
1801 int has_data;
1802 int done = 0;
1803 struct yaffs_tnode *tn;
1804
1805 if (file_struct->top_level > 0) {
1806 file_struct->top =
1807 yaffs_prune_worker(dev, file_struct->top,
1808 file_struct->top_level, 0);
1809
1810 /* Now we have a tree with all the non-zero branches NULL but the height
1811 * is the same as it was.
1812 * Let's see if we can trim internal tnodes to shorten the tree.
1813 * We can do this if only the 0th element in the tnode is in use
1814 * (ie all the non-zero are NULL)
1815 */
1816
1817 while (file_struct->top_level && !done) {
1818 tn = file_struct->top;
1819
1820 has_data = 0;
1821 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1822 if (tn->internal[i])
1823 has_data++;
1824 }
1825
1826 if (!has_data) {
1827 file_struct->top = tn->internal[0];
1828 file_struct->top_level--;
1829 yaffs_free_tnode(dev, tn);
1830 } else {
1831 done = 1;
1832 }
1833 }
1834 }
1835
1836 return YAFFS_OK;
1837}
1838
1839/*-------------------- End of File Structure functions.-------------------*/
1840
1841/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
1842static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1843{
1844 struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1845
1846 if (obj) {
1847 dev->n_obj++;
1848
1849 /* Now sweeten it up... */
1850
1851 memset(obj, 0, sizeof(struct yaffs_obj));
1852 obj->being_created = 1;
1853
1854 obj->my_dev = dev;
1855 obj->hdr_chunk = 0;
1856 obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1857 INIT_LIST_HEAD(&(obj->hard_links));
1858 INIT_LIST_HEAD(&(obj->hash_link));
1859 INIT_LIST_HEAD(&obj->siblings);
1860
1861 /* Now make the directory sane */
1862 if (dev->root_dir) {
1863 obj->parent = dev->root_dir;
1864 list_add(&(obj->siblings),
1865 &dev->root_dir->variant.dir_variant.children);
1866 }
1867
1868 /* Add it to the lost and found directory.
1869 * NB Can't put root or lost-n-found in lost-n-found so
1870 * check if lost-n-found exists first
1871 */
1872 if (dev->lost_n_found)
1873 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1874
1875 obj->being_created = 0;
1876 }
1877
1878 dev->checkpoint_blocks_required = 0; /* force recalculation */
1879
1880 return obj;
1881}
1882
1883static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1884{
1885 int i;
1886 int l = 999;
1887 int lowest = 999999;
1888
1889 /* Search for the shortest list or one that
1890 * isn't too long.
1891 */
1892
1893 for (i = 0; i < 10 && lowest > 4; i++) {
1894 dev->bucket_finder++;
1895 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1896 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1897 lowest = dev->obj_bucket[dev->bucket_finder].count;
1898 l = dev->bucket_finder;
1899 }
1900
1901 }
1902
1903 return l;
1904}
1905
1906static int yaffs_new_obj_id(struct yaffs_dev *dev)
1907{
1908 int bucket = yaffs_find_nice_bucket(dev);
1909
1910 /* Now find an object value that has not already been taken
1911 * by scanning the list.
1912 */
1913
1914 int found = 0;
1915 struct list_head *i;
1916
1917 u32 n = (u32) bucket;
1918
1919 /* yaffs_check_obj_hash_sane(); */
1920
1921 while (!found) {
1922 found = 1;
1923 n += YAFFS_NOBJECT_BUCKETS;
1924 if (1 || dev->obj_bucket[bucket].count > 0) {
1925 list_for_each(i, &dev->obj_bucket[bucket].list) {
1926 /* If there is already one in the list */
1927 if (i && list_entry(i, struct yaffs_obj,
1928 hash_link)->obj_id == n) {
1929 found = 0;
1930 }
1931 }
1932 }
1933 }
1934
1935 return n;
1936}
1937
1938static void yaffs_hash_obj(struct yaffs_obj *in)
1939{
1940 int bucket = yaffs_hash_fn(in->obj_id);
1941 struct yaffs_dev *dev = in->my_dev;
1942
1943 list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1944 dev->obj_bucket[bucket].count++;
1945}
1946
1947struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1948{
1949 int bucket = yaffs_hash_fn(number);
1950 struct list_head *i;
1951 struct yaffs_obj *in;
1952
1953 list_for_each(i, &dev->obj_bucket[bucket].list) {
1954 /* Look if it is in the list */
1955 if (i) {
1956 in = list_entry(i, struct yaffs_obj, hash_link);
1957 if (in->obj_id == number) {
1958
1959 /* Don't tell the VFS about this one if it is defered free */
1960 if (in->defered_free)
1961 return NULL;
1962
1963 return in;
1964 }
1965 }
1966 }
1967
1968 return NULL;
1969}
1970
1971struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1972 enum yaffs_obj_type type)
1973{
1974 struct yaffs_obj *the_obj = NULL;
1975 struct yaffs_tnode *tn = NULL;
1976
1977 if (number < 0)
1978 number = yaffs_new_obj_id(dev);
1979
1980 if (type == YAFFS_OBJECT_TYPE_FILE) {
1981 tn = yaffs_get_tnode(dev);
1982 if (!tn)
1983 return NULL;
1984 }
1985
1986 the_obj = yaffs_alloc_empty_obj(dev);
1987 if (!the_obj) {
1988 if (tn)
1989 yaffs_free_tnode(dev, tn);
1990 return NULL;
1991 }
1992
1993 if (the_obj) {
1994 the_obj->fake = 0;
1995 the_obj->rename_allowed = 1;
1996 the_obj->unlink_allowed = 1;
1997 the_obj->obj_id = number;
1998 yaffs_hash_obj(the_obj);
1999 the_obj->variant_type = type;
2000 yaffs_load_current_time(the_obj, 1, 1);
2001
2002 switch (type) {
2003 case YAFFS_OBJECT_TYPE_FILE:
2004 the_obj->variant.file_variant.file_size = 0;
2005 the_obj->variant.file_variant.scanned_size = 0;
2006 the_obj->variant.file_variant.shrink_size = ~0; /* max */
2007 the_obj->variant.file_variant.top_level = 0;
2008 the_obj->variant.file_variant.top = tn;
2009 break;
2010 case YAFFS_OBJECT_TYPE_DIRECTORY:
2011 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
2012 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
2013 break;
2014 case YAFFS_OBJECT_TYPE_SYMLINK:
2015 case YAFFS_OBJECT_TYPE_HARDLINK:
2016 case YAFFS_OBJECT_TYPE_SPECIAL:
2017 /* No action required */
2018 break;
2019 case YAFFS_OBJECT_TYPE_UNKNOWN:
2020 /* todo this should not happen */
2021 break;
2022 }
2023 }
2024
2025 return the_obj;
2026}
2027
2028static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
2029 int number, u32 mode)
2030{
2031
2032 struct yaffs_obj *obj =
2033 yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
2034 if (obj) {
2035 obj->fake = 1; /* it is fake so it might have no NAND presence... */
2036 obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
2037 obj->unlink_allowed = 0; /* ... or unlink it */
2038 obj->deleted = 0;
2039 obj->unlinked = 0;
2040 obj->yst_mode = mode;
2041 obj->my_dev = dev;
2042 obj->hdr_chunk = 0; /* Not a valid chunk. */
2043 }
2044
2045 return obj;
2046
2047}
2048
2049
2050static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
2051{
2052 int i;
2053
2054 dev->n_obj = 0;
2055 dev->n_tnodes = 0;
2056
2057 yaffs_init_raw_tnodes_and_objs(dev);
2058
2059 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
2060 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
2061 dev->obj_bucket[i].count = 0;
2062 }
2063}
2064
2065struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2066 int number,
2067 enum yaffs_obj_type type)
2068{
2069 struct yaffs_obj *the_obj = NULL;
2070
2071 if (number > 0)
2072 the_obj = yaffs_find_by_number(dev, number);
2073
2074 if (!the_obj)
2075 the_obj = yaffs_new_obj(dev, number, type);
2076
2077 return the_obj;
2078
2079}
2080
2081YCHAR *yaffs_clone_str(const YCHAR * str)
2082{
2083 YCHAR *new_str = NULL;
2084 int len;
2085
2086 if (!str)
2087 str = _Y("");
2088
2089 len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2090 new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2091 if (new_str) {
2092 strncpy(new_str, str, len);
2093 new_str[len] = 0;
2094 }
2095 return new_str;
2096
2097}
2098/*
2099 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2100 * link (ie. name) is created or deleted in the directory.
2101 *
2102 * ie.
2103 * create dir/a : update dir's mtime/ctime
2104 * rm dir/a: update dir's mtime/ctime
2105 * modify dir/a: don't update dir's mtimme/ctime
2106 *
2107 * This can be handled immediately or defered. Defering helps reduce the number
2108 * of updates when many files in a directory are changed within a brief period.
2109 *
2110 * If the directory updating is defered then yaffs_update_dirty_dirs must be
2111 * called periodically.
2112 */
2113
2114static void yaffs_update_parent(struct yaffs_obj *obj)
2115{
2116 struct yaffs_dev *dev;
2117 if (!obj)
2118 return;
2119 dev = obj->my_dev;
2120 obj->dirty = 1;
2121 yaffs_load_current_time(obj, 0, 1);
2122 if (dev->param.defered_dir_update) {
2123 struct list_head *link = &obj->variant.dir_variant.dirty;
2124
2125 if (list_empty(link)) {
2126 list_add(link, &dev->dirty_dirs);
2127 yaffs_trace(YAFFS_TRACE_BACKGROUND,
2128 "Added object %d to dirty directories",
2129 obj->obj_id);
2130 }
2131
2132 } else {
2133 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2134 }
2135}
2136
2137void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2138{
2139 struct list_head *link;
2140 struct yaffs_obj *obj;
2141 struct yaffs_dir_var *d_s;
2142 union yaffs_obj_var *o_v;
2143
2144 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2145
2146 while (!list_empty(&dev->dirty_dirs)) {
2147 link = dev->dirty_dirs.next;
2148 list_del_init(link);
2149
2150 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2151 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2152 obj = list_entry(o_v, struct yaffs_obj, variant);
2153
2154 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2155 obj->obj_id);
2156
2157 if (obj->dirty)
2158 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2159 }
2160}
2161
2162/*
2163 * Mknod (create) a new object.
2164 * equiv_obj only has meaning for a hard link;
2165 * alias_str only has meaning for a symlink.
2166 * rdev only has meaning for devices (a subset of special objects)
2167 */
2168
2169static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2170 struct yaffs_obj *parent,
2171 const YCHAR * name,
2172 u32 mode,
2173 u32 uid,
2174 u32 gid,
2175 struct yaffs_obj *equiv_obj,
2176 const YCHAR * alias_str, u32 rdev)
2177{
2178 struct yaffs_obj *in;
2179 YCHAR *str = NULL;
2180
2181 struct yaffs_dev *dev = parent->my_dev;
2182
2183 /* Check if the entry exists. If it does then fail the call since we don't want a dup. */
2184 if (yaffs_find_by_name(parent, name))
2185 return NULL;
2186
2187 if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2188 str = yaffs_clone_str(alias_str);
2189 if (!str)
2190 return NULL;
2191 }
2192
2193 in = yaffs_new_obj(dev, -1, type);
2194
2195 if (!in) {
2196 if (str)
2197 kfree(str);
2198 return NULL;
2199 }
2200
2201 if (in) {
2202 in->hdr_chunk = 0;
2203 in->valid = 1;
2204 in->variant_type = type;
2205
2206 in->yst_mode = mode;
2207
2208 yaffs_attribs_init(in, gid, uid, rdev);
2209
2210 in->n_data_chunks = 0;
2211
2212 yaffs_set_obj_name(in, name);
2213 in->dirty = 1;
2214
2215 yaffs_add_obj_to_dir(parent, in);
2216
2217 in->my_dev = parent->my_dev;
2218
2219 switch (type) {
2220 case YAFFS_OBJECT_TYPE_SYMLINK:
2221 in->variant.symlink_variant.alias = str;
2222 break;
2223 case YAFFS_OBJECT_TYPE_HARDLINK:
2224 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2225 in->variant.hardlink_variant.equiv_id =
2226 equiv_obj->obj_id;
2227 list_add(&in->hard_links, &equiv_obj->hard_links);
2228 break;
2229 case YAFFS_OBJECT_TYPE_FILE:
2230 case YAFFS_OBJECT_TYPE_DIRECTORY:
2231 case YAFFS_OBJECT_TYPE_SPECIAL:
2232 case YAFFS_OBJECT_TYPE_UNKNOWN:
2233 /* do nothing */
2234 break;
2235 }
2236
2237 if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2238 /* Could not create the object header, fail the creation */
2239 yaffs_del_obj(in);
2240 in = NULL;
2241 }
2242
2243 yaffs_update_parent(parent);
2244 }
2245
2246 return in;
2247}
2248
2249struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2250 const YCHAR * name, u32 mode, u32 uid,
2251 u32 gid)
2252{
2253 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2254 uid, gid, NULL, NULL, 0);
2255}
2256
2257struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
2258 u32 mode, u32 uid, u32 gid)
2259{
2260 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2261 mode, uid, gid, NULL, NULL, 0);
2262}
2263
2264struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2265 const YCHAR * name, u32 mode, u32 uid,
2266 u32 gid, u32 rdev)
2267{
2268 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2269 uid, gid, NULL, NULL, rdev);
2270}
2271
2272struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2273 const YCHAR * name, u32 mode, u32 uid,
2274 u32 gid, const YCHAR * alias)
2275{
2276 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2277 uid, gid, NULL, alias, 0);
2278}
2279
2280/* yaffs_link_obj returns the object id of the equivalent object.*/
2281struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2282 struct yaffs_obj *equiv_obj)
2283{
2284 /* Get the real object in case we were fed a hard link as an equivalent object */
2285 equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2286
2287 if (yaffs_create_obj
2288 (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
2289 equiv_obj, NULL, 0)) {
2290 return equiv_obj;
2291 } else {
2292 return NULL;
2293 }
2294
2295}
2296
2297
2298
2299/*------------------------- Block Management and Page Allocation ----------------*/
2300
2301static int yaffs_init_blocks(struct yaffs_dev *dev)
2302{
2303 int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2304
2305 dev->block_info = NULL;
2306 dev->chunk_bits = NULL;
2307
2308 dev->alloc_block = -1; /* force it to get a new one */
2309
2310 /* If the first allocation strategy fails, thry the alternate one */
2311 dev->block_info =
2312 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2313 if (!dev->block_info) {
2314 dev->block_info =
2315 vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2316 dev->block_info_alt = 1;
2317 } else {
2318 dev->block_info_alt = 0;
2319 }
2320
2321 if (dev->block_info) {
2322 /* Set up dynamic blockinfo stuff. Round up bytes. */
2323 dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2324 dev->chunk_bits =
2325 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2326 if (!dev->chunk_bits) {
2327 dev->chunk_bits =
2328 vmalloc(dev->chunk_bit_stride * n_blocks);
2329 dev->chunk_bits_alt = 1;
2330 } else {
2331 dev->chunk_bits_alt = 0;
2332 }
2333 }
2334
2335 if (dev->block_info && dev->chunk_bits) {
2336 memset(dev->block_info, 0,
2337 n_blocks * sizeof(struct yaffs_block_info));
2338 memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2339 return YAFFS_OK;
2340 }
2341
2342 return YAFFS_FAIL;
2343}
2344
2345static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2346{
2347 if (dev->block_info_alt && dev->block_info)
2348 vfree(dev->block_info);
2349 else if (dev->block_info)
2350 kfree(dev->block_info);
2351
2352 dev->block_info_alt = 0;
2353
2354 dev->block_info = NULL;
2355
2356 if (dev->chunk_bits_alt && dev->chunk_bits)
2357 vfree(dev->chunk_bits);
2358 else if (dev->chunk_bits)
2359 kfree(dev->chunk_bits);
2360 dev->chunk_bits_alt = 0;
2361 dev->chunk_bits = NULL;
2362}
2363
2364void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2365{
2366 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2367
2368 int erased_ok = 0;
2369
2370 /* If the block is still healthy erase it and mark as clean.
2371 * If the block has had a data failure, then retire it.
2372 */
2373
2374 yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2375 "yaffs_block_became_dirty block %d state %d %s",
2376 block_no, bi->block_state,
2377 (bi->needs_retiring) ? "needs retiring" : "");
2378
2379 yaffs2_clear_oldest_dirty_seq(dev, bi);
2380
2381 bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2382
2383 /* If this is the block being garbage collected then stop gc'ing this block */
2384 if (block_no == dev->gc_block)
2385 dev->gc_block = 0;
2386
2387 /* If this block is currently the best candidate for gc then drop as a candidate */
2388 if (block_no == dev->gc_dirtiest) {
2389 dev->gc_dirtiest = 0;
2390 dev->gc_pages_in_use = 0;
2391 }
2392
2393 if (!bi->needs_retiring) {
2394 yaffs2_checkpt_invalidate(dev);
2395 erased_ok = yaffs_erase_block(dev, block_no);
2396 if (!erased_ok) {
2397 dev->n_erase_failures++;
2398 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2399 "**>> Erasure failed %d", block_no);
2400 }
2401 }
2402
2403 if (erased_ok &&
2404 ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
2405 || !yaffs_skip_verification(dev))) {
2406 int i;
2407 for (i = 0; i < dev->param.chunks_per_block; i++) {
2408 if (!yaffs_check_chunk_erased
2409 (dev, block_no * dev->param.chunks_per_block + i)) {
2410 yaffs_trace(YAFFS_TRACE_ERROR,
2411 ">>Block %d erasure supposedly OK, but chunk %d not erased",
2412 block_no, i);
2413 }
2414 }
2415 }
2416
2417 if (erased_ok) {
2418 /* Clean it up... */
2419 bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2420 bi->seq_number = 0;
2421 dev->n_erased_blocks++;
2422 bi->pages_in_use = 0;
2423 bi->soft_del_pages = 0;
2424 bi->has_shrink_hdr = 0;
2425 bi->skip_erased_check = 1; /* Clean, so no need to check */
2426 bi->gc_prioritise = 0;
2427 yaffs_clear_chunk_bits(dev, block_no);
2428
2429 yaffs_trace(YAFFS_TRACE_ERASE,
2430 "Erased block %d", block_no);
2431 } else {
2432 /* We lost a block of free space */
2433 dev->n_free_chunks -= dev->param.chunks_per_block;
2434 yaffs_retire_block(dev, block_no);
2435 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2436 "**>> Block %d retired", block_no);
2437 }
2438}
2439
2440
2441
2442static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2443{
2444 int old_chunk;
2445 int new_chunk;
2446 int mark_flash;
2447 int ret_val = YAFFS_OK;
2448 int i;
2449 int is_checkpt_block;
2450 int matching_chunk;
2451 int max_copies;
2452
2453 int chunks_before = yaffs_get_erased_chunks(dev);
2454 int chunks_after;
2455
2456 struct yaffs_ext_tags tags;
2457
2458 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2459
2460 struct yaffs_obj *object;
2461
2462 is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2463
2464 yaffs_trace(YAFFS_TRACE_TRACING,
2465 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2466 block, bi->pages_in_use, bi->has_shrink_hdr,
2467 whole_block);
2468
2469 /*yaffs_verify_free_chunks(dev); */
2470
2471 if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2472 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2473
2474 bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2475
2476 dev->gc_disable = 1;
2477
2478 if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2479 yaffs_trace(YAFFS_TRACE_TRACING,
2480 "Collecting block %d that has no chunks in use",
2481 block);
2482 yaffs_block_became_dirty(dev, block);
2483 } else {
2484
2485 u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
2486
2487 yaffs_verify_blk(dev, bi, block);
2488
2489 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2490 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2491
2492 for ( /* init already done */ ;
2493 ret_val == YAFFS_OK &&
2494 dev->gc_chunk < dev->param.chunks_per_block &&
2495 (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2496 max_copies > 0; dev->gc_chunk++, old_chunk++) {
2497 if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2498
2499 /* This page is in use and might need to be copied off */
2500
2501 max_copies--;
2502
2503 mark_flash = 1;
2504
2505 yaffs_init_tags(&tags);
2506
2507 yaffs_rd_chunk_tags_nand(dev, old_chunk,
2508 buffer, &tags);
2509
2510 object = yaffs_find_by_number(dev, tags.obj_id);
2511
2512 yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2513 "Collecting chunk in block %d, %d %d %d ",
2514 dev->gc_chunk, tags.obj_id,
2515 tags.chunk_id, tags.n_bytes);
2516
2517 if (object && !yaffs_skip_verification(dev)) {
2518 if (tags.chunk_id == 0)
2519 matching_chunk =
2520 object->hdr_chunk;
2521 else if (object->soft_del)
2522 matching_chunk = old_chunk; /* Defeat the test */
2523 else
2524 matching_chunk =
2525 yaffs_find_chunk_in_file
2526 (object, tags.chunk_id,
2527 NULL);
2528
2529 if (old_chunk != matching_chunk)
2530 yaffs_trace(YAFFS_TRACE_ERROR,
2531 "gc: page in gc mismatch: %d %d %d %d",
2532 old_chunk,
2533 matching_chunk,
2534 tags.obj_id,
2535 tags.chunk_id);
2536
2537 }
2538
2539 if (!object) {
2540 yaffs_trace(YAFFS_TRACE_ERROR,
2541 "page %d in gc has no object: %d %d %d ",
2542 old_chunk,
2543 tags.obj_id, tags.chunk_id,
2544 tags.n_bytes);
2545 }
2546
2547 if (object &&
2548 object->deleted &&
2549 object->soft_del && tags.chunk_id != 0) {
2550 /* Data chunk in a soft deleted file, throw it away
2551 * It's a soft deleted data chunk,
2552 * No need to copy this, just forget about it and
2553 * fix up the object.
2554 */
2555
2556 /* Free chunks already includes softdeleted chunks.
2557 * How ever this chunk is going to soon be really deleted
2558 * which will increment free chunks.
2559 * We have to decrement free chunks so this works out properly.
2560 */
2561 dev->n_free_chunks--;
2562 bi->soft_del_pages--;
2563
2564 object->n_data_chunks--;
2565
2566 if (object->n_data_chunks <= 0) {
2567 /* remeber to clean up the object */
2568 dev->gc_cleanup_list[dev->
2569 n_clean_ups]
2570 = tags.obj_id;
2571 dev->n_clean_ups++;
2572 }
2573 mark_flash = 0;
2574 } else if (0) {
2575 /* Todo object && object->deleted && object->n_data_chunks == 0 */
2576 /* Deleted object header with no data chunks.
2577 * Can be discarded and the file deleted.
2578 */
2579 object->hdr_chunk = 0;
2580 yaffs_free_tnode(object->my_dev,
2581 object->
2582 variant.file_variant.
2583 top);
2584 object->variant.file_variant.top = NULL;
2585 yaffs_generic_obj_del(object);
2586
2587 } else if (object) {
2588 /* It's either a data chunk in a live file or
2589 * an ObjectHeader, so we're interested in it.
2590 * NB Need to keep the ObjectHeaders of deleted files
2591 * until the whole file has been deleted off
2592 */
2593 tags.serial_number++;
2594
2595 dev->n_gc_copies++;
2596
2597 if (tags.chunk_id == 0) {
2598 /* It is an object Id,
2599 * We need to nuke the shrinkheader flags first
2600 * Also need to clean up shadowing.
2601 * We no longer want the shrink_header flag since its work is done
2602 * and if it is left in place it will mess up scanning.
2603 */
2604
2605 struct yaffs_obj_hdr *oh;
2606 oh = (struct yaffs_obj_hdr *)
2607 buffer;
2608
2609 oh->is_shrink = 0;
2610 tags.extra_is_shrink = 0;
2611
2612 oh->shadows_obj = 0;
2613 oh->inband_shadowed_obj_id = 0;
2614 tags.extra_shadows = 0;
2615
2616 /* Update file size */
2617 if (object->variant_type ==
2618 YAFFS_OBJECT_TYPE_FILE) {
2619 oh->file_size =
2620 object->variant.
2621 file_variant.
2622 file_size;
2623 tags.extra_length =
2624 oh->file_size;
2625 }
2626
2627 yaffs_verify_oh(object, oh,
2628 &tags, 1);
2629 new_chunk =
2630 yaffs_write_new_chunk(dev,
2631 (u8 *)
2632 oh,
2633 &tags,
2634 1);
2635 } else {
2636 new_chunk =
2637 yaffs_write_new_chunk(dev,
2638 buffer,
2639 &tags,
2640 1);
2641 }
2642
2643 if (new_chunk < 0) {
2644 ret_val = YAFFS_FAIL;
2645 } else {
2646
2647 /* Ok, now fix up the Tnodes etc. */
2648
2649 if (tags.chunk_id == 0) {
2650 /* It's a header */
2651 object->hdr_chunk =
2652 new_chunk;
2653 object->serial =
2654 tags.serial_number;
2655 } else {
2656 /* It's a data chunk */
2657 int ok;
2658 ok = yaffs_put_chunk_in_file(object, tags.chunk_id, new_chunk, 0);
2659 }
2660 }
2661 }
2662
2663 if (ret_val == YAFFS_OK)
2664 yaffs_chunk_del(dev, old_chunk,
2665 mark_flash, __LINE__);
2666
2667 }
2668 }
2669
2670 yaffs_release_temp_buffer(dev, buffer, __LINE__);
2671
2672 }
2673
2674 yaffs_verify_collected_blk(dev, bi, block);
2675
2676 if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2677 /*
2678 * The gc did not complete. Set block state back to FULL
2679 * because checkpointing does not restore gc.
2680 */
2681 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2682 } else {
2683 /* The gc completed. */
2684 /* Do any required cleanups */
2685 for (i = 0; i < dev->n_clean_ups; i++) {
2686 /* Time to delete the file too */
2687 object =
2688 yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2689 if (object) {
2690 yaffs_free_tnode(dev,
2691 object->variant.
2692 file_variant.top);
2693 object->variant.file_variant.top = NULL;
2694 yaffs_trace(YAFFS_TRACE_GC,
2695 "yaffs: About to finally delete object %d",
2696 object->obj_id);
2697 yaffs_generic_obj_del(object);
2698 object->my_dev->n_deleted_files--;
2699 }
2700
2701 }
2702
2703 chunks_after = yaffs_get_erased_chunks(dev);
2704 if (chunks_before >= chunks_after)
2705 yaffs_trace(YAFFS_TRACE_GC,
2706 "gc did not increase free chunks before %d after %d",
2707 chunks_before, chunks_after);
2708 dev->gc_block = 0;
2709 dev->gc_chunk = 0;
2710 dev->n_clean_ups = 0;
2711 }
2712
2713 dev->gc_disable = 0;
2714
2715 return ret_val;
2716}
2717
2718/*
2719 * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
2720 * for garbage collection.
2721 */
2722
2723static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2724 int aggressive, int background)
2725{
2726 int i;
2727 int iterations;
2728 unsigned selected = 0;
2729 int prioritised = 0;
2730 int prioritised_exist = 0;
2731 struct yaffs_block_info *bi;
2732 int threshold;
2733
2734 /* First let's see if we need to grab a prioritised block */
2735 if (dev->has_pending_prioritised_gc && !aggressive) {
2736 dev->gc_dirtiest = 0;
2737 bi = dev->block_info;
2738 for (i = dev->internal_start_block;
2739 i <= dev->internal_end_block && !selected; i++) {
2740
2741 if (bi->gc_prioritise) {
2742 prioritised_exist = 1;
2743 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2744 yaffs_block_ok_for_gc(dev, bi)) {
2745 selected = i;
2746 prioritised = 1;
2747 }
2748 }
2749 bi++;
2750 }
2751
2752 /*
2753 * If there is a prioritised block and none was selected then
2754 * this happened because there is at least one old dirty block gumming
2755 * up the works. Let's gc the oldest dirty block.
2756 */
2757
2758 if (prioritised_exist &&
2759 !selected && dev->oldest_dirty_block > 0)
2760 selected = dev->oldest_dirty_block;
2761
2762 if (!prioritised_exist) /* None found, so we can clear this */
2763 dev->has_pending_prioritised_gc = 0;
2764 }
2765
2766 /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
2767 * search harder.
2768 * else (we're doing a leasurely gc), then we only bother to do this if the
2769 * block has only a few pages in use.
2770 */
2771
2772 if (!selected) {
2773 int pages_used;
2774 int n_blocks =
2775 dev->internal_end_block - dev->internal_start_block + 1;
2776 if (aggressive) {
2777 threshold = dev->param.chunks_per_block;
2778 iterations = n_blocks;
2779 } else {
2780 int max_threshold;
2781
2782 if (background)
2783 max_threshold = dev->param.chunks_per_block / 2;
2784 else
2785 max_threshold = dev->param.chunks_per_block / 8;
2786
2787 if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2788 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2789
2790 threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2791 if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2792 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2793 if (threshold > max_threshold)
2794 threshold = max_threshold;
2795
2796 iterations = n_blocks / 16 + 1;
2797 if (iterations > 100)
2798 iterations = 100;
2799 }
2800
2801 for (i = 0;
2802 i < iterations &&
2803 (dev->gc_dirtiest < 1 ||
2804 dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); i++) {
2805 dev->gc_block_finder++;
2806 if (dev->gc_block_finder < dev->internal_start_block ||
2807 dev->gc_block_finder > dev->internal_end_block)
2808 dev->gc_block_finder =
2809 dev->internal_start_block;
2810
2811 bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2812
2813 pages_used = bi->pages_in_use - bi->soft_del_pages;
2814
2815 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2816 pages_used < dev->param.chunks_per_block &&
2817 (dev->gc_dirtiest < 1
2818 || pages_used < dev->gc_pages_in_use)
2819 && yaffs_block_ok_for_gc(dev, bi)) {
2820 dev->gc_dirtiest = dev->gc_block_finder;
2821 dev->gc_pages_in_use = pages_used;
2822 }
2823 }
2824
2825 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2826 selected = dev->gc_dirtiest;
2827 }
2828
2829 /*
2830 * If nothing has been selected for a while, try selecting the oldest dirty
2831 * because that's gumming up the works.
2832 */
2833
2834 if (!selected && dev->param.is_yaffs2 &&
2835 dev->gc_not_done >= (background ? 10 : 20)) {
2836 yaffs2_find_oldest_dirty_seq(dev);
2837 if (dev->oldest_dirty_block > 0) {
2838 selected = dev->oldest_dirty_block;
2839 dev->gc_dirtiest = selected;
2840 dev->oldest_dirty_gc_count++;
2841 bi = yaffs_get_block_info(dev, selected);
2842 dev->gc_pages_in_use =
2843 bi->pages_in_use - bi->soft_del_pages;
2844 } else {
2845 dev->gc_not_done = 0;
2846 }
2847 }
2848
2849 if (selected) {
2850 yaffs_trace(YAFFS_TRACE_GC,
2851 "GC Selected block %d with %d free, prioritised:%d",
2852 selected,
2853 dev->param.chunks_per_block - dev->gc_pages_in_use,
2854 prioritised);
2855
2856 dev->n_gc_blocks++;
2857 if (background)
2858 dev->bg_gcs++;
2859
2860 dev->gc_dirtiest = 0;
2861 dev->gc_pages_in_use = 0;
2862 dev->gc_not_done = 0;
2863 if (dev->refresh_skip > 0)
2864 dev->refresh_skip--;
2865 } else {
2866 dev->gc_not_done++;
2867 yaffs_trace(YAFFS_TRACE_GC,
2868 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2869 dev->gc_block_finder, dev->gc_not_done, threshold,
2870 dev->gc_dirtiest, dev->gc_pages_in_use,
2871 dev->oldest_dirty_block, background ? " bg" : "");
2872 }
2873
2874 return selected;
2875}
2876
2877/* New garbage collector
2878 * If we're very low on erased blocks then we do aggressive garbage collection
2879 * otherwise we do "leasurely" garbage collection.
2880 * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2881 * Passive gc only inspects smaller areas and will only accept more dirty blocks.
2882 *
2883 * The idea is to help clear out space in a more spread-out manner.
2884 * Dunno if it really does anything useful.
2885 */
2886static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2887{
2888 int aggressive = 0;
2889 int gc_ok = YAFFS_OK;
2890 int max_tries = 0;
2891 int min_erased;
2892 int erased_chunks;
2893 int checkpt_block_adjust;
2894
2895 if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
2896 return YAFFS_OK;
2897
2898 if (dev->gc_disable) {
2899 /* Bail out so we don't get recursive gc */
2900 return YAFFS_OK;
2901 }
2902
2903 /* This loop should pass the first time.
2904 * We'll only see looping here if the collection does not increase space.
2905 */
2906
2907 do {
2908 max_tries++;
2909
2910 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2911
2912 min_erased =
2913 dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2914 erased_chunks =
2915 dev->n_erased_blocks * dev->param.chunks_per_block;
2916
2917 /* If we need a block soon then do aggressive gc. */
2918 if (dev->n_erased_blocks < min_erased)
2919 aggressive = 1;
2920 else {
2921 if (!background
2922 && erased_chunks > (dev->n_free_chunks / 4))
2923 break;
2924
2925 if (dev->gc_skip > 20)
2926 dev->gc_skip = 20;
2927 if (erased_chunks < dev->n_free_chunks / 2 ||
2928 dev->gc_skip < 1 || background)
2929 aggressive = 0;
2930 else {
2931 dev->gc_skip--;
2932 break;
2933 }
2934 }
2935
2936 dev->gc_skip = 5;
2937
2938 /* If we don't already have a block being gc'd then see if we should start another */
2939
2940 if (dev->gc_block < 1 && !aggressive) {
2941 dev->gc_block = yaffs2_find_refresh_block(dev);
2942 dev->gc_chunk = 0;
2943 dev->n_clean_ups = 0;
2944 }
2945 if (dev->gc_block < 1) {
2946 dev->gc_block =
2947 yaffs_find_gc_block(dev, aggressive, background);
2948 dev->gc_chunk = 0;
2949 dev->n_clean_ups = 0;
2950 }
2951
2952 if (dev->gc_block > 0) {
2953 dev->all_gcs++;
2954 if (!aggressive)
2955 dev->passive_gc_count++;
2956
2957 yaffs_trace(YAFFS_TRACE_GC,
2958 "yaffs: GC n_erased_blocks %d aggressive %d",
2959 dev->n_erased_blocks, aggressive);
2960
2961 gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2962 }
2963
2964 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
2965 && dev->gc_block > 0) {
2966 yaffs_trace(YAFFS_TRACE_GC,
2967 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2968 dev->n_erased_blocks, max_tries,
2969 dev->gc_block);
2970 }
2971 } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2972 (dev->gc_block > 0) && (max_tries < 2));
2973
2974 return aggressive ? gc_ok : YAFFS_OK;
2975}
2976
2977/*
2978 * yaffs_bg_gc()
2979 * Garbage collects. Intended to be called from a background thread.
2980 * Returns non-zero if at least half the free chunks are erased.
2981 */
2982int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2983{
2984 int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2985
2986 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2987
2988 yaffs_check_gc(dev, 1);
2989 return erased_chunks > dev->n_free_chunks / 2;
2990}
2991
2992/*-------------------- Data file manipulation -----------------*/
2993
2994static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2995{
2996 int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2997
2998 if (nand_chunk >= 0)
2999 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
3000 buffer, NULL);
3001 else {
3002 yaffs_trace(YAFFS_TRACE_NANDACCESS,
3003 "Chunk %d not found zero instead",
3004 nand_chunk);
3005 /* get sane (zero) data if you read a hole */
3006 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
3007 return 0;
3008 }
3009
3010}
3011
3012void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
3013 int lyn)
3014{
3015 int block;
3016 int page;
3017 struct yaffs_ext_tags tags;
3018 struct yaffs_block_info *bi;
3019
3020 if (chunk_id <= 0)
3021 return;
3022
3023 dev->n_deletions++;
3024 block = chunk_id / dev->param.chunks_per_block;
3025 page = chunk_id % dev->param.chunks_per_block;
3026
3027 if (!yaffs_check_chunk_bit(dev, block, page))
3028 yaffs_trace(YAFFS_TRACE_VERIFY,
3029 "Deleting invalid chunk %d", chunk_id);
3030
3031 bi = yaffs_get_block_info(dev, block);
3032
3033 yaffs2_update_oldest_dirty_seq(dev, block, bi);
3034
3035 yaffs_trace(YAFFS_TRACE_DELETION,
3036 "line %d delete of chunk %d",
3037 lyn, chunk_id);
3038
3039 if (!dev->param.is_yaffs2 && mark_flash &&
3040 bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
3041
3042 yaffs_init_tags(&tags);
3043
3044 tags.is_deleted = 1;
3045
3046 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
3047 yaffs_handle_chunk_update(dev, chunk_id, &tags);
3048 } else {
3049 dev->n_unmarked_deletions++;
3050 }
3051
3052 /* Pull out of the management area.
3053 * If the whole block became dirty, this will kick off an erasure.
3054 */
3055 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
3056 bi->block_state == YAFFS_BLOCK_STATE_FULL ||
3057 bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
3058 bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
3059 dev->n_free_chunks++;
3060
3061 yaffs_clear_chunk_bit(dev, block, page);
3062
3063 bi->pages_in_use--;
3064
3065 if (bi->pages_in_use == 0 &&
3066 !bi->has_shrink_hdr &&
3067 bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
3068 bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
3069 yaffs_block_became_dirty(dev, block);
3070 }
3071
3072 }
3073
3074}
3075
3076static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
3077 const u8 * buffer, int n_bytes, int use_reserve)
3078{
3079 /* Find old chunk Need to do this to get serial number
3080 * Write new one and patch into tree.
3081 * Invalidate old tags.
3082 */
3083
3084 int prev_chunk_id;
3085 struct yaffs_ext_tags prev_tags;
3086
3087 int new_chunk_id;
3088 struct yaffs_ext_tags new_tags;
3089
3090 struct yaffs_dev *dev = in->my_dev;
3091
3092 yaffs_check_gc(dev, 0);
3093
3094 /* Get the previous chunk at this location in the file if it exists.
3095 * If it does not exist then put a zero into the tree. This creates
3096 * the tnode now, rather than later when it is harder to clean up.
3097 */
3098 prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
3099 if (prev_chunk_id < 1 &&
3100 !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
3101 return 0;
3102
3103 /* Set up new tags */
3104 yaffs_init_tags(&new_tags);
3105
3106 new_tags.chunk_id = inode_chunk;
3107 new_tags.obj_id = in->obj_id;
3108 new_tags.serial_number =
3109 (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3110 new_tags.n_bytes = n_bytes;
3111
3112 if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3113 yaffs_trace(YAFFS_TRACE_ERROR,
3114 "Writing %d bytes to chunk!!!!!!!!!",
3115 n_bytes);
3116 YBUG();
3117 }
3118
3119 new_chunk_id =
3120 yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3121
3122 if (new_chunk_id > 0) {
3123 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3124
3125 if (prev_chunk_id > 0)
3126 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3127
3128 yaffs_verify_file_sane(in);
3129 }
3130 return new_chunk_id;
3131
3132}
3133
3134
3135
3136static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3137 const YCHAR * name, const void *value, int size,
3138 int flags)
3139{
3140 struct yaffs_xattr_mod xmod;
3141
3142 int result;
3143
3144 xmod.set = set;
3145 xmod.name = name;
3146 xmod.data = value;
3147 xmod.size = size;
3148 xmod.flags = flags;
3149 xmod.result = -ENOSPC;
3150
3151 result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3152
3153 if (result > 0)
3154 return xmod.result;
3155 else
3156 return -ENOSPC;
3157}
3158
3159static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3160 struct yaffs_xattr_mod *xmod)
3161{
3162 int retval = 0;
3163 int x_offs = sizeof(struct yaffs_obj_hdr);
3164 struct yaffs_dev *dev = obj->my_dev;
3165 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3166
3167 char *x_buffer = buffer + x_offs;
3168
3169 if (xmod->set)
3170 retval =
3171 nval_set(x_buffer, x_size, xmod->name, xmod->data,
3172 xmod->size, xmod->flags);
3173 else
3174 retval = nval_del(x_buffer, x_size, xmod->name);
3175
3176 obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3177 obj->xattr_known = 1;
3178
3179 xmod->result = retval;
3180
3181 return retval;
3182}
3183
3184static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR * name,
3185 void *value, int size)
3186{
3187 char *buffer = NULL;
3188 int result;
3189 struct yaffs_ext_tags tags;
3190 struct yaffs_dev *dev = obj->my_dev;
3191 int x_offs = sizeof(struct yaffs_obj_hdr);
3192 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3193
3194 char *x_buffer;
3195
3196 int retval = 0;
3197
3198 if (obj->hdr_chunk < 1)
3199 return -ENODATA;
3200
3201 /* If we know that the object has no xattribs then don't do all the
3202 * reading and parsing.
3203 */
3204 if (obj->xattr_known && !obj->has_xattr) {
3205 if (name)
3206 return -ENODATA;
3207 else
3208 return 0;
3209 }
3210
3211 buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);
3212 if (!buffer)
3213 return -ENOMEM;
3214
3215 result =
3216 yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3217
3218 if (result != YAFFS_OK)
3219 retval = -ENOENT;
3220 else {
3221 x_buffer = buffer + x_offs;
3222
3223 if (!obj->xattr_known) {
3224 obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3225 obj->xattr_known = 1;
3226 }
3227
3228 if (name)
3229 retval = nval_get(x_buffer, x_size, name, value, size);
3230 else
3231 retval = nval_list(x_buffer, x_size, value, size);
3232 }
3233 yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
3234 return retval;
3235}
3236
3237int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3238 const void *value, int size, int flags)
3239{
3240 return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3241}
3242
3243int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3244{
3245 return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3246}
3247
3248int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3249 int size)
3250{
3251 return yaffs_do_xattrib_fetch(obj, name, value, size);
3252}
3253
3254int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3255{
3256 return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3257}
3258
3259static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3260{
3261 u8 *chunk_data;
3262 struct yaffs_obj_hdr *oh;
3263 struct yaffs_dev *dev;
3264 struct yaffs_ext_tags tags;
3265 int result;
3266 int alloc_failed = 0;
3267
3268 if (!in)
3269 return;
3270
3271 dev = in->my_dev;
3272
3273 if (in->lazy_loaded && in->hdr_chunk > 0) {
3274 in->lazy_loaded = 0;
3275 chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
3276
3277 result =
3278 yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunk_data,
3279 &tags);
3280 oh = (struct yaffs_obj_hdr *)chunk_data;
3281
3282 in->yst_mode = oh->yst_mode;
3283 yaffs_load_attribs(in, oh);
3284 yaffs_set_obj_name_from_oh(in, oh);
3285
3286 if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3287 in->variant.symlink_variant.alias =
3288 yaffs_clone_str(oh->alias);
3289 if (!in->variant.symlink_variant.alias)
3290 alloc_failed = 1; /* Not returned to caller */
3291 }
3292
3293 yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
3294 }
3295}
3296
3297static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR * name,
3298 const YCHAR * oh_name, int buff_size)
3299{
3300#ifdef CONFIG_YAFFS_AUTO_UNICODE
3301 if (dev->param.auto_unicode) {
3302 if (*oh_name) {
3303 /* It is an ASCII name, do an ASCII to
3304 * unicode conversion */
3305 const char *ascii_oh_name = (const char *)oh_name;
3306 int n = buff_size - 1;
3307 while (n > 0 && *ascii_oh_name) {
3308 *name = *ascii_oh_name;
3309 name++;
3310 ascii_oh_name++;
3311 n--;
3312 }
3313 } else {
3314 strncpy(name, oh_name + 1, buff_size - 1);
3315 }
3316 } else {
3317#else
3318 {
3319#endif
3320 strncpy(name, oh_name, buff_size - 1);
3321 }
3322}
3323
3324static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR * oh_name,
3325 const YCHAR * name)
3326{
3327#ifdef CONFIG_YAFFS_AUTO_UNICODE
3328
3329 int is_ascii;
3330 YCHAR *w;
3331
3332 if (dev->param.auto_unicode) {
3333
3334 is_ascii = 1;
3335 w = name;
3336
3337 /* Figure out if the name will fit in ascii character set */
3338 while (is_ascii && *w) {
3339 if ((*w) & 0xff00)
3340 is_ascii = 0;
3341 w++;
3342 }
3343
3344 if (is_ascii) {
3345 /* It is an ASCII name, so do a unicode to ascii conversion */
3346 char *ascii_oh_name = (char *)oh_name;
3347 int n = YAFFS_MAX_NAME_LENGTH - 1;
3348 while (n > 0 && *name) {
3349 *ascii_oh_name = *name;
3350 name++;
3351 ascii_oh_name++;
3352 n--;
3353 }
3354 } else {
3355 /* It is a unicode name, so save starting at the second YCHAR */
3356 *oh_name = 0;
3357 strncpy(oh_name + 1, name,
3358 YAFFS_MAX_NAME_LENGTH - 2);
3359 }
3360 } else {
3361#else
3362 {
3363#endif
3364 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3365 }
3366
3367}
3368
3369/* UpdateObjectHeader updates the header on NAND for an object.
3370 * If name is not NULL, then that new name is used.
3371 */
3372int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name, int force,
3373 int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3374{
3375
3376 struct yaffs_block_info *bi;
3377
3378 struct yaffs_dev *dev = in->my_dev;
3379
3380 int prev_chunk_id;
3381 int ret_val = 0;
3382 int result = 0;
3383
3384 int new_chunk_id;
3385 struct yaffs_ext_tags new_tags;
3386 struct yaffs_ext_tags old_tags;
3387 const YCHAR *alias = NULL;
3388
3389 u8 *buffer = NULL;
3390 YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3391
3392 struct yaffs_obj_hdr *oh = NULL;
3393
3394 strcpy(old_name, _Y("silly old name"));
3395
3396 if (!in->fake || in == dev->root_dir ||
3397 force || xmod) {
3398
3399 yaffs_check_gc(dev, 0);
3400 yaffs_check_obj_details_loaded(in);
3401
3402 buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
3403 oh = (struct yaffs_obj_hdr *)buffer;
3404
3405 prev_chunk_id = in->hdr_chunk;
3406
3407 if (prev_chunk_id > 0) {
3408 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3409 buffer, &old_tags);
3410
3411 yaffs_verify_oh(in, oh, &old_tags, 0);
3412
3413 memcpy(old_name, oh->name, sizeof(oh->name));
3414 memset(buffer, 0xFF, sizeof(struct yaffs_obj_hdr));
3415 } else {
3416 memset(buffer, 0xFF, dev->data_bytes_per_chunk);
3417 }
3418
3419 oh->type = in->variant_type;
3420 oh->yst_mode = in->yst_mode;
3421 oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3422
3423 yaffs_load_attribs_oh(oh, in);
3424
3425 if (in->parent)
3426 oh->parent_obj_id = in->parent->obj_id;
3427 else
3428 oh->parent_obj_id = 0;
3429
3430 if (name && *name) {
3431 memset(oh->name, 0, sizeof(oh->name));
3432 yaffs_load_oh_from_name(dev, oh->name, name);
3433 } else if (prev_chunk_id > 0) {
3434 memcpy(oh->name, old_name, sizeof(oh->name));
3435 } else {
3436 memset(oh->name, 0, sizeof(oh->name));
3437 }
3438
3439 oh->is_shrink = is_shrink;
3440
3441 switch (in->variant_type) {
3442 case YAFFS_OBJECT_TYPE_UNKNOWN:
3443 /* Should not happen */
3444 break;
3445 case YAFFS_OBJECT_TYPE_FILE:
3446 oh->file_size =
3447 (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
3448 || oh->parent_obj_id ==
3449 YAFFS_OBJECTID_UNLINKED) ? 0 : in->
3450 variant.file_variant.file_size;
3451 break;
3452 case YAFFS_OBJECT_TYPE_HARDLINK:
3453 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3454 break;
3455 case YAFFS_OBJECT_TYPE_SPECIAL:
3456 /* Do nothing */
3457 break;
3458 case YAFFS_OBJECT_TYPE_DIRECTORY:
3459 /* Do nothing */
3460 break;
3461 case YAFFS_OBJECT_TYPE_SYMLINK:
3462 alias = in->variant.symlink_variant.alias;
3463 if (!alias)
3464 alias = _Y("no alias");
3465 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3466 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3467 break;
3468 }
3469
3470 /* process any xattrib modifications */
3471 if (xmod)
3472 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3473
3474 /* Tags */
3475 yaffs_init_tags(&new_tags);
3476 in->serial++;
3477 new_tags.chunk_id = 0;
3478 new_tags.obj_id = in->obj_id;
3479 new_tags.serial_number = in->serial;
3480
3481 /* Add extra info for file header */
3482
3483 new_tags.extra_available = 1;
3484 new_tags.extra_parent_id = oh->parent_obj_id;
3485 new_tags.extra_length = oh->file_size;
3486 new_tags.extra_is_shrink = oh->is_shrink;
3487 new_tags.extra_equiv_id = oh->equiv_id;
3488 new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3489 new_tags.extra_obj_type = in->variant_type;
3490
3491 yaffs_verify_oh(in, oh, &new_tags, 1);
3492
3493 /* Create new chunk in NAND */
3494 new_chunk_id =
3495 yaffs_write_new_chunk(dev, buffer, &new_tags,
3496 (prev_chunk_id > 0) ? 1 : 0);
3497
3498 if (new_chunk_id >= 0) {
3499
3500 in->hdr_chunk = new_chunk_id;
3501
3502 if (prev_chunk_id > 0) {
3503 yaffs_chunk_del(dev, prev_chunk_id, 1,
3504 __LINE__);
3505 }
3506
3507 if (!yaffs_obj_cache_dirty(in))
3508 in->dirty = 0;
3509
3510 /* If this was a shrink, then mark the block that the chunk lives on */
3511 if (is_shrink) {
3512 bi = yaffs_get_block_info(in->my_dev,
3513 new_chunk_id /
3514 in->my_dev->param.
3515 chunks_per_block);
3516 bi->has_shrink_hdr = 1;
3517 }
3518
3519 }
3520
3521 ret_val = new_chunk_id;
3522
3523 }
3524
3525 if (buffer)
3526 yaffs_release_temp_buffer(dev, buffer, __LINE__);
3527
3528 return ret_val;
3529}
3530
3531/*--------------------- File read/write ------------------------
3532 * Read and write have very similar structures.
3533 * In general the read/write has three parts to it
3534 * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3535 * Some complete chunks
3536 * An incomplete chunk to end off with
3537 *
3538 * Curve-balls: the first chunk might also be the last chunk.
3539 */
3540
3541int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3542{
3543
3544 int chunk;
3545 u32 start;
3546 int n_copy;
3547 int n = n_bytes;
3548 int n_done = 0;
3549 struct yaffs_cache *cache;
3550
3551 struct yaffs_dev *dev;
3552
3553 dev = in->my_dev;
3554
3555 while (n > 0) {
3556 /* chunk = offset / dev->data_bytes_per_chunk + 1; */
3557 /* start = offset % dev->data_bytes_per_chunk; */
3558 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3559 chunk++;
3560
3561 /* OK now check for the curveball where the start and end are in
3562 * the same chunk.
3563 */
3564 if ((start + n) < dev->data_bytes_per_chunk)
3565 n_copy = n;
3566 else
3567 n_copy = dev->data_bytes_per_chunk - start;
3568
3569 cache = yaffs_find_chunk_cache(in, chunk);
3570
3571 /* If the chunk is already in the cache or it is less than a whole chunk
3572 * or we're using inband tags then use the cache (if there is caching)
3573 * else bypass the cache.
3574 */
3575 if (cache || n_copy != dev->data_bytes_per_chunk
3576 || dev->param.inband_tags) {
3577 if (dev->param.n_caches > 0) {
3578
3579 /* If we can't find the data in the cache, then load it up. */
3580
3581 if (!cache) {
3582 cache =
3583 yaffs_grab_chunk_cache(in->my_dev);
3584 cache->object = in;
3585 cache->chunk_id = chunk;
3586 cache->dirty = 0;
3587 cache->locked = 0;
3588 yaffs_rd_data_obj(in, chunk,
3589 cache->data);
3590 cache->n_bytes = 0;
3591 }
3592
3593 yaffs_use_cache(dev, cache, 0);
3594
3595 cache->locked = 1;
3596
3597 memcpy(buffer, &cache->data[start], n_copy);
3598
3599 cache->locked = 0;
3600 } else {
3601 /* Read into the local buffer then copy.. */
3602
3603 u8 *local_buffer =
3604 yaffs_get_temp_buffer(dev, __LINE__);
3605 yaffs_rd_data_obj(in, chunk, local_buffer);
3606
3607 memcpy(buffer, &local_buffer[start], n_copy);
3608
3609 yaffs_release_temp_buffer(dev, local_buffer,
3610 __LINE__);
3611 }
3612
3613 } else {
3614
3615 /* A full chunk. Read directly into the supplied buffer. */
3616 yaffs_rd_data_obj(in, chunk, buffer);
3617
3618 }
3619
3620 n -= n_copy;
3621 offset += n_copy;
3622 buffer += n_copy;
3623 n_done += n_copy;
3624
3625 }
3626
3627 return n_done;
3628}
3629
3630int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
3631 int n_bytes, int write_trhrough)
3632{
3633
3634 int chunk;
3635 u32 start;
3636 int n_copy;
3637 int n = n_bytes;
3638 int n_done = 0;
3639 int n_writeback;
3640 int start_write = offset;
3641 int chunk_written = 0;
3642 u32 n_bytes_read;
3643 u32 chunk_start;
3644
3645 struct yaffs_dev *dev;
3646
3647 dev = in->my_dev;
3648
3649 while (n > 0 && chunk_written >= 0) {
3650 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3651
3652 if (chunk * dev->data_bytes_per_chunk + start != offset ||
3653 start >= dev->data_bytes_per_chunk) {
3654 yaffs_trace(YAFFS_TRACE_ERROR,
3655 "AddrToChunk of offset %d gives chunk %d start %d",
3656 (int)offset, chunk, start);
3657 }
3658 chunk++; /* File pos to chunk in file offset */
3659
3660 /* OK now check for the curveball where the start and end are in
3661 * the same chunk.
3662 */
3663
3664 if ((start + n) < dev->data_bytes_per_chunk) {
3665 n_copy = n;
3666
3667 /* Now folks, to calculate how many bytes to write back....
3668 * If we're overwriting and not writing to then end of file then
3669 * we need to write back as much as was there before.
3670 */
3671
3672 chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
3673
3674 if (chunk_start > in->variant.file_variant.file_size)
3675 n_bytes_read = 0; /* Past end of file */
3676 else
3677 n_bytes_read =
3678 in->variant.file_variant.file_size -
3679 chunk_start;
3680
3681 if (n_bytes_read > dev->data_bytes_per_chunk)
3682 n_bytes_read = dev->data_bytes_per_chunk;
3683
3684 n_writeback =
3685 (n_bytes_read >
3686 (start + n)) ? n_bytes_read : (start + n);
3687
3688 if (n_writeback < 0
3689 || n_writeback > dev->data_bytes_per_chunk)
3690 YBUG();
3691
3692 } else {
3693 n_copy = dev->data_bytes_per_chunk - start;
3694 n_writeback = dev->data_bytes_per_chunk;
3695 }
3696
3697 if (n_copy != dev->data_bytes_per_chunk
3698 || dev->param.inband_tags) {
3699 /* An incomplete start or end chunk (or maybe both start and end chunk),
3700 * or we're using inband tags, so we want to use the cache buffers.
3701 */
3702 if (dev->param.n_caches > 0) {
3703 struct yaffs_cache *cache;
3704 /* If we can't find the data in the cache, then load the cache */
3705 cache = yaffs_find_chunk_cache(in, chunk);
3706
3707 if (!cache
3708 && yaffs_check_alloc_available(dev, 1)) {
3709 cache = yaffs_grab_chunk_cache(dev);
3710 cache->object = in;
3711 cache->chunk_id = chunk;
3712 cache->dirty = 0;
3713 cache->locked = 0;
3714 yaffs_rd_data_obj(in, chunk,
3715 cache->data);
3716 } else if (cache &&
3717 !cache->dirty &&
3718 !yaffs_check_alloc_available(dev,
3719 1)) {
3720 /* Drop the cache if it was a read cache item and
3721 * no space check has been made for it.
3722 */
3723 cache = NULL;
3724 }
3725
3726 if (cache) {
3727 yaffs_use_cache(dev, cache, 1);
3728 cache->locked = 1;
3729
3730 memcpy(&cache->data[start], buffer,
3731 n_copy);
3732
3733 cache->locked = 0;
3734 cache->n_bytes = n_writeback;
3735
3736 if (write_trhrough) {
3737 chunk_written =
3738 yaffs_wr_data_obj
3739 (cache->object,
3740 cache->chunk_id,
3741 cache->data,
3742 cache->n_bytes, 1);
3743 cache->dirty = 0;
3744 }
3745
3746 } else {
3747 chunk_written = -1; /* fail the write */
3748 }
3749 } else {
3750 /* An incomplete start or end chunk (or maybe both start and end chunk)
3751 * Read into the local buffer then copy, then copy over and write back.
3752 */
3753
3754 u8 *local_buffer =
3755 yaffs_get_temp_buffer(dev, __LINE__);
3756
3757 yaffs_rd_data_obj(in, chunk, local_buffer);
3758
3759 memcpy(&local_buffer[start], buffer, n_copy);
3760
3761 chunk_written =
3762 yaffs_wr_data_obj(in, chunk,
3763 local_buffer,
3764 n_writeback, 0);
3765
3766 yaffs_release_temp_buffer(dev, local_buffer,
3767 __LINE__);
3768
3769 }
3770
3771 } else {
3772 /* A full chunk. Write directly from the supplied buffer. */
3773
3774 chunk_written =
3775 yaffs_wr_data_obj(in, chunk, buffer,
3776 dev->data_bytes_per_chunk, 0);
3777
3778 /* Since we've overwritten the cached data, we better invalidate it. */
3779 yaffs_invalidate_chunk_cache(in, chunk);
3780 }
3781
3782 if (chunk_written >= 0) {
3783 n -= n_copy;
3784 offset += n_copy;
3785 buffer += n_copy;
3786 n_done += n_copy;
3787 }
3788
3789 }
3790
3791 /* Update file object */
3792
3793 if ((start_write + n_done) > in->variant.file_variant.file_size)
3794 in->variant.file_variant.file_size = (start_write + n_done);
3795
3796 in->dirty = 1;
3797
3798 return n_done;
3799}
3800
3801int yaffs_wr_file(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
3802 int n_bytes, int write_trhrough)
3803{
3804 yaffs2_handle_hole(in, offset);
3805 return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
3806}
3807
3808/* ---------------------- File resizing stuff ------------------ */
3809
3810static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
3811{
3812
3813 struct yaffs_dev *dev = in->my_dev;
3814 int old_size = in->variant.file_variant.file_size;
3815
3816 int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
3817
3818 int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
3819 dev->data_bytes_per_chunk;
3820 int i;
3821 int chunk_id;
3822
3823 /* Delete backwards so that we don't end up with holes if
3824 * power is lost part-way through the operation.
3825 */
3826 for (i = last_del; i >= start_del; i--) {
3827 /* NB this could be optimised somewhat,
3828 * eg. could retrieve the tags and write them without
3829 * using yaffs_chunk_del
3830 */
3831
3832 chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
3833 if (chunk_id > 0) {
3834 if (chunk_id <
3835 (dev->internal_start_block *
3836 dev->param.chunks_per_block)
3837 || chunk_id >=
3838 ((dev->internal_end_block +
3839 1) * dev->param.chunks_per_block)) {
3840 yaffs_trace(YAFFS_TRACE_ALWAYS,
3841 "Found daft chunk_id %d for %d",
3842 chunk_id, i);
3843 } else {
3844 in->n_data_chunks--;
3845 yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
3846 }
3847 }
3848 }
3849
3850}
3851
3852void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
3853{
3854 int new_full;
3855 u32 new_partial;
3856 struct yaffs_dev *dev = obj->my_dev;
3857
3858 yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
3859
3860 yaffs_prune_chunks(obj, new_size);
3861
3862 if (new_partial != 0) {
3863 int last_chunk = 1 + new_full;
3864 u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
3865
3866 /* Rewrite the last chunk with its new size and zero pad */
3867 yaffs_rd_data_obj(obj, last_chunk, local_buffer);
3868 memset(local_buffer + new_partial, 0,
3869 dev->data_bytes_per_chunk - new_partial);
3870
3871 yaffs_wr_data_obj(obj, last_chunk, local_buffer,
3872 new_partial, 1);
3873
3874 yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
3875 }
3876
3877 obj->variant.file_variant.file_size = new_size;
3878
3879 yaffs_prune_tree(dev, &obj->variant.file_variant);
3880}
3881
3882int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
3883{
3884 struct yaffs_dev *dev = in->my_dev;
3885 int old_size = in->variant.file_variant.file_size;
3886
3887 yaffs_flush_file_cache(in);
3888 yaffs_invalidate_whole_cache(in);
3889
3890 yaffs_check_gc(dev, 0);
3891
3892 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
3893 return YAFFS_FAIL;
3894
3895 if (new_size == old_size)
3896 return YAFFS_OK;
3897
3898 if (new_size > old_size) {
3899 yaffs2_handle_hole(in, new_size);
3900 in->variant.file_variant.file_size = new_size;
3901 } else {
3902 /* new_size < old_size */
3903 yaffs_resize_file_down(in, new_size);
3904 }
3905
3906 /* Write a new object header to reflect the resize.
3907 * show we've shrunk the file, if need be
3908 * Do this only if the file is not in the deleted directories
3909 * and is not shadowed.
3910 */
3911 if (in->parent &&
3912 !in->is_shadowed &&
3913 in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
3914 in->parent->obj_id != YAFFS_OBJECTID_DELETED)
3915 yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
3916
3917 return YAFFS_OK;
3918}
3919
3920int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
3921{
3922 int ret_val;
3923 if (in->dirty) {
3924 yaffs_flush_file_cache(in);
3925 if (data_sync) /* Only sync data */
3926 ret_val = YAFFS_OK;
3927 else {
3928 if (update_time)
3929 yaffs_load_current_time(in, 0, 0);
3930
3931 ret_val = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
3932 0) ? YAFFS_OK : YAFFS_FAIL;
3933 }
3934 } else {
3935 ret_val = YAFFS_OK;
3936 }
3937
3938 return ret_val;
3939
3940}
3941
3942
3943/* yaffs_del_file deletes the whole file data
3944 * and the inode associated with the file.
3945 * It does not delete the links associated with the file.
3946 */
3947static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
3948{
3949
3950 int ret_val;
3951 int del_now = 0;
3952 struct yaffs_dev *dev = in->my_dev;
3953
3954 if (!in->my_inode)
3955 del_now = 1;
3956
3957 if (del_now) {
3958 ret_val =
3959 yaffs_change_obj_name(in, in->my_dev->del_dir,
3960 _Y("deleted"), 0, 0);
3961 yaffs_trace(YAFFS_TRACE_TRACING,
3962 "yaffs: immediate deletion of file %d",
3963 in->obj_id);
3964 in->deleted = 1;
3965 in->my_dev->n_deleted_files++;
3966 if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3967 yaffs_resize_file(in, 0);
3968 yaffs_soft_del_file(in);
3969 } else {
3970 ret_val =
3971 yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
3972 _Y("unlinked"), 0, 0);
3973 }
3974
3975 return ret_val;
3976}
3977
3978int yaffs_del_file(struct yaffs_obj *in)
3979{
3980 int ret_val = YAFFS_OK;
3981 int deleted; /* Need to cache value on stack if in is freed */
3982 struct yaffs_dev *dev = in->my_dev;
3983
3984 if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3985 yaffs_resize_file(in, 0);
3986
3987 if (in->n_data_chunks > 0) {
3988 /* Use soft deletion if there is data in the file.
3989 * That won't be the case if it has been resized to zero.
3990 */
3991 if (!in->unlinked)
3992 ret_val = yaffs_unlink_file_if_needed(in);
3993
3994 deleted = in->deleted;
3995
3996 if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
3997 in->deleted = 1;
3998 deleted = 1;
3999 in->my_dev->n_deleted_files++;
4000 yaffs_soft_del_file(in);
4001 }
4002 return deleted ? YAFFS_OK : YAFFS_FAIL;
4003 } else {
4004 /* The file has no data chunks so we toss it immediately */
4005 yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
4006 in->variant.file_variant.top = NULL;
4007 yaffs_generic_obj_del(in);
4008
4009 return YAFFS_OK;
4010 }
4011}
4012
4013int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
4014{
4015 return (obj &&
4016 obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
4017 !(list_empty(&obj->variant.dir_variant.children));
4018}
4019
4020static int yaffs_del_dir(struct yaffs_obj *obj)
4021{
4022 /* First check that the directory is empty. */
4023 if (yaffs_is_non_empty_dir(obj))
4024 return YAFFS_FAIL;
4025
4026 return yaffs_generic_obj_del(obj);
4027}
4028
4029static int yaffs_del_symlink(struct yaffs_obj *in)
4030{
4031 if (in->variant.symlink_variant.alias)
4032 kfree(in->variant.symlink_variant.alias);
4033 in->variant.symlink_variant.alias = NULL;
4034
4035 return yaffs_generic_obj_del(in);
4036}
4037
4038static int yaffs_del_link(struct yaffs_obj *in)
4039{
4040 /* remove this hardlink from the list assocaited with the equivalent
4041 * object
4042 */
4043 list_del_init(&in->hard_links);
4044 return yaffs_generic_obj_del(in);
4045}
4046
4047int yaffs_del_obj(struct yaffs_obj *obj)
4048{
4049 int ret_val = -1;
4050 switch (obj->variant_type) {
4051 case YAFFS_OBJECT_TYPE_FILE:
4052 ret_val = yaffs_del_file(obj);
4053 break;
4054 case YAFFS_OBJECT_TYPE_DIRECTORY:
4055 if (!list_empty(&obj->variant.dir_variant.dirty)) {
4056 yaffs_trace(YAFFS_TRACE_BACKGROUND,
4057 "Remove object %d from dirty directories",
4058 obj->obj_id);
4059 list_del_init(&obj->variant.dir_variant.dirty);
4060 }
4061 return yaffs_del_dir(obj);
4062 break;
4063 case YAFFS_OBJECT_TYPE_SYMLINK:
4064 ret_val = yaffs_del_symlink(obj);
4065 break;
4066 case YAFFS_OBJECT_TYPE_HARDLINK:
4067 ret_val = yaffs_del_link(obj);
4068 break;
4069 case YAFFS_OBJECT_TYPE_SPECIAL:
4070 ret_val = yaffs_generic_obj_del(obj);
4071 break;
4072 case YAFFS_OBJECT_TYPE_UNKNOWN:
4073 ret_val = 0;
4074 break; /* should not happen. */
4075 }
4076
4077 return ret_val;
4078}
4079
4080static int yaffs_unlink_worker(struct yaffs_obj *obj)
4081{
4082
4083 int del_now = 0;
4084
4085 if (!obj->my_inode)
4086 del_now = 1;
4087
4088 if (obj)
4089 yaffs_update_parent(obj->parent);
4090
4091 if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
4092 return yaffs_del_link(obj);
4093 } else if (!list_empty(&obj->hard_links)) {
4094 /* Curve ball: We're unlinking an object that has a hardlink.
4095 *
4096 * This problem arises because we are not strictly following
4097 * The Linux link/inode model.
4098 *
4099 * We can't really delete the object.
4100 * Instead, we do the following:
4101 * - Select a hardlink.
4102 * - Unhook it from the hard links
4103 * - Move it from its parent directory (so that the rename can work)
4104 * - Rename the object to the hardlink's name.
4105 * - Delete the hardlink
4106 */
4107
4108 struct yaffs_obj *hl;
4109 struct yaffs_obj *parent;
4110 int ret_val;
4111 YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
4112
4113 hl = list_entry(obj->hard_links.next, struct yaffs_obj,
4114 hard_links);
4115
4116 yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
4117 parent = hl->parent;
4118
4119 list_del_init(&hl->hard_links);
4120
4121 yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
4122
4123 ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
4124
4125 if (ret_val == YAFFS_OK)
4126 ret_val = yaffs_generic_obj_del(hl);
4127
4128 return ret_val;
4129
4130 } else if (del_now) {
4131 switch (obj->variant_type) {
4132 case YAFFS_OBJECT_TYPE_FILE:
4133 return yaffs_del_file(obj);
4134 break;
4135 case YAFFS_OBJECT_TYPE_DIRECTORY:
4136 list_del_init(&obj->variant.dir_variant.dirty);
4137 return yaffs_del_dir(obj);
4138 break;
4139 case YAFFS_OBJECT_TYPE_SYMLINK:
4140 return yaffs_del_symlink(obj);
4141 break;
4142 case YAFFS_OBJECT_TYPE_SPECIAL:
4143 return yaffs_generic_obj_del(obj);
4144 break;
4145 case YAFFS_OBJECT_TYPE_HARDLINK:
4146 case YAFFS_OBJECT_TYPE_UNKNOWN:
4147 default:
4148 return YAFFS_FAIL;
4149 }
4150 } else if (yaffs_is_non_empty_dir(obj)) {
4151 return YAFFS_FAIL;
4152 } else {
4153 return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
4154 _Y("unlinked"), 0, 0);
4155 }
4156}
4157
4158static int yaffs_unlink_obj(struct yaffs_obj *obj)
4159{
4160
4161 if (obj && obj->unlink_allowed)
4162 return yaffs_unlink_worker(obj);
4163
4164 return YAFFS_FAIL;
4165
4166}
4167
4168int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name)
4169{
4170 struct yaffs_obj *obj;
4171
4172 obj = yaffs_find_by_name(dir, name);
4173 return yaffs_unlink_obj(obj);
4174}
4175
4176/* Note:
4177 * If old_name is NULL then we take old_dir as the object to be renamed.
4178 */
4179int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
4180 struct yaffs_obj *new_dir, const YCHAR * new_name)
4181{
4182 struct yaffs_obj *obj = NULL;
4183 struct yaffs_obj *existing_target = NULL;
4184 int force = 0;
4185 int result;
4186 struct yaffs_dev *dev;
4187
4188 if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
4189 YBUG();
4190 if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
4191 YBUG();
4192
4193 dev = old_dir->my_dev;
4194
4195#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
4196 /* Special case for case insemsitive systems.
4197 * While look-up is case insensitive, the name isn't.
4198 * Therefore we might want to change x.txt to X.txt
4199 */
4200 if (old_dir == new_dir &&
4201 old_name && new_name &&
4202 strcmp(old_name, new_name) == 0)
4203 force = 1;
4204#endif
4205
4206 if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
4207 YAFFS_MAX_NAME_LENGTH)
4208 /* ENAMETOOLONG */
4209 return YAFFS_FAIL;
4210
4211 if(old_name)
4212 obj = yaffs_find_by_name(old_dir, old_name);
4213 else{
4214 obj = old_dir;
4215 old_dir = obj->parent;
4216 }
4217
4218
4219 if (obj && obj->rename_allowed) {
4220
4221 /* Now do the handling for an existing target, if there is one */
4222
4223 existing_target = yaffs_find_by_name(new_dir, new_name);
4224 if (yaffs_is_non_empty_dir(existing_target)){
4225 return YAFFS_FAIL; /* ENOTEMPTY */
4226 } else if (existing_target && existing_target != obj) {
4227 /* Nuke the target first, using shadowing,
4228 * but only if it isn't the same object.
4229 *
4230 * Note we must disable gc otherwise it can mess up the shadowing.
4231 *
4232 */
4233 dev->gc_disable = 1;
4234 yaffs_change_obj_name(obj, new_dir, new_name, force,
4235 existing_target->obj_id);
4236 existing_target->is_shadowed = 1;
4237 yaffs_unlink_obj(existing_target);
4238 dev->gc_disable = 0;
4239 }
4240
4241 result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
4242
4243 yaffs_update_parent(old_dir);
4244 if (new_dir != old_dir)
4245 yaffs_update_parent(new_dir);
4246
4247 return result;
4248 }
4249 return YAFFS_FAIL;
4250}
4251
4252/*----------------------- Initialisation Scanning ---------------------- */
4253
4254void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
4255 int backward_scanning)
4256{
4257 struct yaffs_obj *obj;
4258
4259 if (!backward_scanning) {
4260 /* Handle YAFFS1 forward scanning case
4261 * For YAFFS1 we always do the deletion
4262 */
4263
4264 } else {
4265 /* Handle YAFFS2 case (backward scanning)
4266 * If the shadowed object exists then ignore.
4267 */
4268 obj = yaffs_find_by_number(dev, obj_id);
4269 if (obj)
4270 return;
4271 }
4272
4273 /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
4274 * We put it in unlinked dir to be cleaned up after the scanning
4275 */
4276 obj =
4277 yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
4278 if (!obj)
4279 return;
4280 obj->is_shadowed = 1;
4281 yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
4282 obj->variant.file_variant.shrink_size = 0;
4283 obj->valid = 1; /* So that we don't read any other info for this file */
4284
4285}
4286
4287void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list)
4288{
4289 struct yaffs_obj *hl;
4290 struct yaffs_obj *in;
4291
4292 while (hard_list) {
4293 hl = hard_list;
4294 hard_list = (struct yaffs_obj *)(hard_list->hard_links.next);
4295
4296 in = yaffs_find_by_number(dev,
4297 hl->variant.
4298 hardlink_variant.equiv_id);
4299
4300 if (in) {
4301 /* Add the hardlink pointers */
4302 hl->variant.hardlink_variant.equiv_obj = in;
4303 list_add(&hl->hard_links, &in->hard_links);
4304 } else {
4305 /* Todo Need to report/handle this better.
4306 * Got a problem... hardlink to a non-existant object
4307 */
4308 hl->variant.hardlink_variant.equiv_obj = NULL;
4309 INIT_LIST_HEAD(&hl->hard_links);
4310
4311 }
4312 }
4313}
4314
4315static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
4316{
4317 /*
4318 * Sort out state of unlinked and deleted objects after scanning.
4319 */
4320 struct list_head *i;
4321 struct list_head *n;
4322 struct yaffs_obj *l;
4323
4324 if (dev->read_only)
4325 return;
4326
4327 /* Soft delete all the unlinked files */
4328 list_for_each_safe(i, n,
4329 &dev->unlinked_dir->variant.dir_variant.children) {
4330 if (i) {
4331 l = list_entry(i, struct yaffs_obj, siblings);
4332 yaffs_del_obj(l);
4333 }
4334 }
4335
4336 list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
4337 if (i) {
4338 l = list_entry(i, struct yaffs_obj, siblings);
4339 yaffs_del_obj(l);
4340 }
4341 }
4342
4343}
4344
4345/*
4346 * This code iterates through all the objects making sure that they are rooted.
4347 * Any unrooted objects are re-rooted in lost+found.
4348 * An object needs to be in one of:
4349 * - Directly under deleted, unlinked
4350 * - Directly or indirectly under root.
4351 *
4352 * Note:
4353 * This code assumes that we don't ever change the current relationships between
4354 * directories:
4355 * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
4356 * lost-n-found->parent == root_dir
4357 *
4358 * This fixes the problem where directories might have inadvertently been deleted
4359 * leaving the object "hanging" without being rooted in the directory tree.
4360 */
4361
4362static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
4363{
4364 return (obj == dev->del_dir ||
4365 obj == dev->unlinked_dir || obj == dev->root_dir);
4366}
4367
4368static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
4369{
4370 struct yaffs_obj *obj;
4371 struct yaffs_obj *parent;
4372 int i;
4373 struct list_head *lh;
4374 struct list_head *n;
4375 int depth_limit;
4376 int hanging;
4377
4378 if (dev->read_only)
4379 return;
4380
4381 /* Iterate through the objects in each hash entry,
4382 * looking at each object.
4383 * Make sure it is rooted.
4384 */
4385
4386 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
4387 list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
4388 if (lh) {
4389 obj =
4390 list_entry(lh, struct yaffs_obj, hash_link);
4391 parent = obj->parent;
4392
4393 if (yaffs_has_null_parent(dev, obj)) {
4394 /* These directories are not hanging */
4395 hanging = 0;
4396 } else if (!parent
4397 || parent->variant_type !=
4398 YAFFS_OBJECT_TYPE_DIRECTORY) {
4399 hanging = 1;
4400 } else if (yaffs_has_null_parent(dev, parent)) {
4401 hanging = 0;
4402 } else {
4403 /*
4404 * Need to follow the parent chain to see if it is hanging.
4405 */
4406 hanging = 0;
4407 depth_limit = 100;
4408
4409 while (parent != dev->root_dir &&
4410 parent->parent &&
4411 parent->parent->variant_type ==
4412 YAFFS_OBJECT_TYPE_DIRECTORY
4413 && depth_limit > 0) {
4414 parent = parent->parent;
4415 depth_limit--;
4416 }
4417 if (parent != dev->root_dir)
4418 hanging = 1;
4419 }
4420 if (hanging) {
4421 yaffs_trace(YAFFS_TRACE_SCAN,
4422 "Hanging object %d moved to lost and found",
4423 obj->obj_id);
4424 yaffs_add_obj_to_dir(dev->lost_n_found,
4425 obj);
4426 }
4427 }
4428 }
4429 }
4430}
4431
4432/*
4433 * Delete directory contents for cleaning up lost and found.
4434 */
4435static void yaffs_del_dir_contents(struct yaffs_obj *dir)
4436{
4437 struct yaffs_obj *obj;
4438 struct list_head *lh;
4439 struct list_head *n;
4440
4441 if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
4442 YBUG();
4443
4444 list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
4445 if (lh) {
4446 obj = list_entry(lh, struct yaffs_obj, siblings);
4447 if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
4448 yaffs_del_dir_contents(obj);
4449
4450 yaffs_trace(YAFFS_TRACE_SCAN,
4451 "Deleting lost_found object %d",
4452 obj->obj_id);
4453
4454 /* Need to use UnlinkObject since Delete would not handle
4455 * hardlinked objects correctly.
4456 */
4457 yaffs_unlink_obj(obj);
4458 }
4459 }
4460
4461}
4462
4463static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
4464{
4465 yaffs_del_dir_contents(dev->lost_n_found);
4466}
4467
4468
4469struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
4470 const YCHAR * name)
4471{
4472 int sum;
4473
4474 struct list_head *i;
4475 YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
4476
4477 struct yaffs_obj *l;
4478
4479 if (!name)
4480 return NULL;
4481
4482 if (!directory) {
4483 yaffs_trace(YAFFS_TRACE_ALWAYS,
4484 "tragedy: yaffs_find_by_name: null pointer directory"
4485 );
4486 YBUG();
4487 return NULL;
4488 }
4489 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4490 yaffs_trace(YAFFS_TRACE_ALWAYS,
4491 "tragedy: yaffs_find_by_name: non-directory"
4492 );
4493 YBUG();
4494 }
4495
4496 sum = yaffs_calc_name_sum(name);
4497
4498 list_for_each(i, &directory->variant.dir_variant.children) {
4499 if (i) {
4500 l = list_entry(i, struct yaffs_obj, siblings);
4501
4502 if (l->parent != directory)
4503 YBUG();
4504
4505 yaffs_check_obj_details_loaded(l);
4506
4507 /* Special case for lost-n-found */
4508 if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4509 if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
4510 return l;
4511 } else if (l->sum == sum
4512 || l->hdr_chunk <= 0) {
4513 /* LostnFound chunk called Objxxx
4514 * Do a real check
4515 */
4516 yaffs_get_obj_name(l, buffer,
4517 YAFFS_MAX_NAME_LENGTH + 1);
4518 if (strncmp
4519 (name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
4520 return l;
4521 }
4522 }
4523 }
4524
4525 return NULL;
4526}
4527
4528/* GetEquivalentObject dereferences any hard links to get to the
4529 * actual object.
4530 */
4531
4532struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
4533{
4534 if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
4535 /* We want the object id of the equivalent object, not this one */
4536 obj = obj->variant.hardlink_variant.equiv_obj;
4537 yaffs_check_obj_details_loaded(obj);
4538 }
4539 return obj;
4540}
4541
4542/*
4543 * A note or two on object names.
4544 * * If the object name is missing, we then make one up in the form objnnn
4545 *
4546 * * ASCII names are stored in the object header's name field from byte zero
4547 * * Unicode names are historically stored starting from byte zero.
4548 *
4549 * Then there are automatic Unicode names...
4550 * The purpose of these is to save names in a way that can be read as
4551 * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
4552 * system to share files.
4553 *
4554 * These automatic unicode are stored slightly differently...
4555 * - If the name can fit in the ASCII character space then they are saved as
4556 * ascii names as per above.
4557 * - If the name needs Unicode then the name is saved in Unicode
4558 * starting at oh->name[1].
4559
4560 */
4561static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR * name,
4562 int buffer_size)
4563{
4564 /* Create an object name if we could not find one. */
4565 if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
4566 YCHAR local_name[20];
4567 YCHAR num_string[20];
4568 YCHAR *x = &num_string[19];
4569 unsigned v = obj->obj_id;
4570 num_string[19] = 0;
4571 while (v > 0) {
4572 x--;
4573 *x = '0' + (v % 10);
4574 v /= 10;
4575 }
4576 /* make up a name */
4577 strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
4578 strcat(local_name, x);
4579 strncpy(name, local_name, buffer_size - 1);
4580 }
4581}
4582
4583int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size)
4584{
4585 memset(name, 0, buffer_size * sizeof(YCHAR));
4586
4587 yaffs_check_obj_details_loaded(obj);
4588
4589 if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4590 strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
4591 }
4592#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
4593 else if (obj->short_name[0]) {
4594 strcpy(name, obj->short_name);
4595 }
4596#endif
4597 else if (obj->hdr_chunk > 0) {
4598 int result;
4599 u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
4600
4601 struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
4602
4603 memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
4604
4605 if (obj->hdr_chunk > 0) {
4606 result = yaffs_rd_chunk_tags_nand(obj->my_dev,
4607 obj->hdr_chunk,
4608 buffer, NULL);
4609 }
4610 yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
4611 buffer_size);
4612
4613 yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
4614 }
4615
4616 yaffs_fix_null_name(obj, name, buffer_size);
4617
4618 return strnlen(name, YAFFS_MAX_NAME_LENGTH);
4619}
4620
4621int yaffs_get_obj_length(struct yaffs_obj *obj)
4622{
4623 /* Dereference any hard linking */
4624 obj = yaffs_get_equivalent_obj(obj);
4625
4626 if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
4627 return obj->variant.file_variant.file_size;
4628 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
4629 if (!obj->variant.symlink_variant.alias)
4630 return 0;
4631 return strnlen(obj->variant.symlink_variant.alias,
4632 YAFFS_MAX_ALIAS_LENGTH);
4633 } else {
4634 /* Only a directory should drop through to here */
4635 return obj->my_dev->data_bytes_per_chunk;
4636 }
4637}
4638
4639int yaffs_get_obj_link_count(struct yaffs_obj *obj)
4640{
4641 int count = 0;
4642 struct list_head *i;
4643
4644 if (!obj->unlinked)
4645 count++; /* the object itself */
4646
4647 list_for_each(i, &obj->hard_links)
4648 count++; /* add the hard links; */
4649
4650 return count;
4651}
4652
4653int yaffs_get_obj_inode(struct yaffs_obj *obj)
4654{
4655 obj = yaffs_get_equivalent_obj(obj);
4656
4657 return obj->obj_id;
4658}
4659
4660unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
4661{
4662 obj = yaffs_get_equivalent_obj(obj);
4663
4664 switch (obj->variant_type) {
4665 case YAFFS_OBJECT_TYPE_FILE:
4666 return DT_REG;
4667 break;
4668 case YAFFS_OBJECT_TYPE_DIRECTORY:
4669 return DT_DIR;
4670 break;
4671 case YAFFS_OBJECT_TYPE_SYMLINK:
4672 return DT_LNK;
4673 break;
4674 case YAFFS_OBJECT_TYPE_HARDLINK:
4675 return DT_REG;
4676 break;
4677 case YAFFS_OBJECT_TYPE_SPECIAL:
4678 if (S_ISFIFO(obj->yst_mode))
4679 return DT_FIFO;
4680 if (S_ISCHR(obj->yst_mode))
4681 return DT_CHR;
4682 if (S_ISBLK(obj->yst_mode))
4683 return DT_BLK;
4684 if (S_ISSOCK(obj->yst_mode))
4685 return DT_SOCK;
4686 default:
4687 return DT_REG;
4688 break;
4689 }
4690}
4691
4692YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
4693{
4694 obj = yaffs_get_equivalent_obj(obj);
4695 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
4696 return yaffs_clone_str(obj->variant.symlink_variant.alias);
4697 else
4698 return yaffs_clone_str(_Y(""));
4699}
4700
4701/*--------------------------- Initialisation code -------------------------- */
4702
4703static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
4704{
4705
4706 /* Common functions, gotta have */
4707 if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
4708 return 0;
4709
4710#ifdef CONFIG_YAFFS_YAFFS2
4711
4712 /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
4713 if (dev->param.write_chunk_tags_fn &&
4714 dev->param.read_chunk_tags_fn &&
4715 !dev->param.write_chunk_fn &&
4716 !dev->param.read_chunk_fn &&
4717 dev->param.bad_block_fn && dev->param.query_block_fn)
4718 return 1;
4719#endif
4720
4721 /* Can use the "spare" style interface for yaffs1 */
4722 if (!dev->param.is_yaffs2 &&
4723 !dev->param.write_chunk_tags_fn &&
4724 !dev->param.read_chunk_tags_fn &&
4725 dev->param.write_chunk_fn &&
4726 dev->param.read_chunk_fn &&
4727 !dev->param.bad_block_fn && !dev->param.query_block_fn)
4728 return 1;
4729
4730 return 0; /* bad */
4731}
4732
4733static int yaffs_create_initial_dir(struct yaffs_dev *dev)
4734{
4735 /* Initialise the unlinked, deleted, root and lost and found directories */
4736
4737 dev->lost_n_found = dev->root_dir = NULL;
4738 dev->unlinked_dir = dev->del_dir = NULL;
4739
4740 dev->unlinked_dir =
4741 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
4742
4743 dev->del_dir =
4744 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
4745
4746 dev->root_dir =
4747 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
4748 YAFFS_ROOT_MODE | S_IFDIR);
4749 dev->lost_n_found =
4750 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
4751 YAFFS_LOSTNFOUND_MODE | S_IFDIR);
4752
4753 if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
4754 && dev->del_dir) {
4755 yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
4756 return YAFFS_OK;
4757 }
4758
4759 return YAFFS_FAIL;
4760}
4761
4762int yaffs_guts_initialise(struct yaffs_dev *dev)
4763{
4764 int init_failed = 0;
4765 unsigned x;
4766 int bits;
4767
4768 yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()" );
4769
4770 /* Check stuff that must be set */
4771
4772 if (!dev) {
4773 yaffs_trace(YAFFS_TRACE_ALWAYS,
4774 "yaffs: Need a device"
4775 );
4776 return YAFFS_FAIL;
4777 }
4778
4779 dev->internal_start_block = dev->param.start_block;
4780 dev->internal_end_block = dev->param.end_block;
4781 dev->block_offset = 0;
4782 dev->chunk_offset = 0;
4783 dev->n_free_chunks = 0;
4784
4785 dev->gc_block = 0;
4786
4787 if (dev->param.start_block == 0) {
4788 dev->internal_start_block = dev->param.start_block + 1;
4789 dev->internal_end_block = dev->param.end_block + 1;
4790 dev->block_offset = 1;
4791 dev->chunk_offset = dev->param.chunks_per_block;
4792 }
4793
4794 /* Check geometry parameters. */
4795
4796 if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
4797 dev->param.total_bytes_per_chunk < 1024) ||
4798 (!dev->param.is_yaffs2 &&
4799 dev->param.total_bytes_per_chunk < 512) ||
4800 (dev->param.inband_tags && !dev->param.is_yaffs2) ||
4801 dev->param.chunks_per_block < 2 ||
4802 dev->param.n_reserved_blocks < 2 ||
4803 dev->internal_start_block <= 0 ||
4804 dev->internal_end_block <= 0 ||
4805 dev->internal_end_block <=
4806 (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
4807 ) {
4808 /* otherwise it is too small */
4809 yaffs_trace(YAFFS_TRACE_ALWAYS,
4810 "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
4811 dev->param.total_bytes_per_chunk,
4812 dev->param.is_yaffs2 ? "2" : "",
4813 dev->param.inband_tags);
4814 return YAFFS_FAIL;
4815 }
4816
4817 if (yaffs_init_nand(dev) != YAFFS_OK) {
4818 yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
4819 return YAFFS_FAIL;
4820 }
4821
4822 /* Sort out space for inband tags, if required */
4823 if (dev->param.inband_tags)
4824 dev->data_bytes_per_chunk =
4825 dev->param.total_bytes_per_chunk -
4826 sizeof(struct yaffs_packed_tags2_tags_only);
4827 else
4828 dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
4829
4830 /* Got the right mix of functions? */
4831 if (!yaffs_check_dev_fns(dev)) {
4832 /* Function missing */
4833 yaffs_trace(YAFFS_TRACE_ALWAYS,
4834 "device function(s) missing or wrong");
4835
4836 return YAFFS_FAIL;
4837 }
4838
4839 if (dev->is_mounted) {
4840 yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
4841 return YAFFS_FAIL;
4842 }
4843
4844 /* Finished with most checks. One or two more checks happen later on too. */
4845
4846 dev->is_mounted = 1;
4847
4848 /* OK now calculate a few things for the device */
4849
4850 /*
4851 * Calculate all the chunk size manipulation numbers:
4852 */
4853 x = dev->data_bytes_per_chunk;
4854 /* We always use dev->chunk_shift and dev->chunk_div */
4855 dev->chunk_shift = calc_shifts(x);
4856 x >>= dev->chunk_shift;
4857 dev->chunk_div = x;
4858 /* We only use chunk mask if chunk_div is 1 */
4859 dev->chunk_mask = (1 << dev->chunk_shift) - 1;
4860
4861 /*
4862 * Calculate chunk_grp_bits.
4863 * We need to find the next power of 2 > than internal_end_block
4864 */
4865
4866 x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
4867
4868 bits = calc_shifts_ceiling(x);
4869
4870 /* Set up tnode width if wide tnodes are enabled. */
4871 if (!dev->param.wide_tnodes_disabled) {
4872 /* bits must be even so that we end up with 32-bit words */
4873 if (bits & 1)
4874 bits++;
4875 if (bits < 16)
4876 dev->tnode_width = 16;
4877 else
4878 dev->tnode_width = bits;
4879 } else {
4880 dev->tnode_width = 16;
4881 }
4882
4883 dev->tnode_mask = (1 << dev->tnode_width) - 1;
4884
4885 /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
4886 * so if the bitwidth of the
4887 * chunk range we're using is greater than 16 we need
4888 * to figure out chunk shift and chunk_grp_size
4889 */
4890
4891 if (bits <= dev->tnode_width)
4892 dev->chunk_grp_bits = 0;
4893 else
4894 dev->chunk_grp_bits = bits - dev->tnode_width;
4895
4896 dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
4897 if (dev->tnode_size < sizeof(struct yaffs_tnode))
4898 dev->tnode_size = sizeof(struct yaffs_tnode);
4899
4900 dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
4901
4902 if (dev->param.chunks_per_block < dev->chunk_grp_size) {
4903 /* We have a problem because the soft delete won't work if
4904 * the chunk group size > chunks per block.
4905 * This can be remedied by using larger "virtual blocks".
4906 */
4907 yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
4908
4909 return YAFFS_FAIL;
4910 }
4911
4912 /* OK, we've finished verifying the device, lets continue with initialisation */
4913
4914 /* More device initialisation */
4915 dev->all_gcs = 0;
4916 dev->passive_gc_count = 0;
4917 dev->oldest_dirty_gc_count = 0;
4918 dev->bg_gcs = 0;
4919 dev->gc_block_finder = 0;
4920 dev->buffered_block = -1;
4921 dev->doing_buffered_block_rewrite = 0;
4922 dev->n_deleted_files = 0;
4923 dev->n_bg_deletions = 0;
4924 dev->n_unlinked_files = 0;
4925 dev->n_ecc_fixed = 0;
4926 dev->n_ecc_unfixed = 0;
4927 dev->n_tags_ecc_fixed = 0;
4928 dev->n_tags_ecc_unfixed = 0;
4929 dev->n_erase_failures = 0;
4930 dev->n_erased_blocks = 0;
4931 dev->gc_disable = 0;
4932 dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
4933 INIT_LIST_HEAD(&dev->dirty_dirs);
4934 dev->oldest_dirty_seq = 0;
4935 dev->oldest_dirty_block = 0;
4936
4937 /* Initialise temporary buffers and caches. */
4938 if (!yaffs_init_tmp_buffers(dev))
4939 init_failed = 1;
4940
4941 dev->cache = NULL;
4942 dev->gc_cleanup_list = NULL;
4943
4944 if (!init_failed && dev->param.n_caches > 0) {
4945 int i;
4946 void *buf;
4947 int cache_bytes =
4948 dev->param.n_caches * sizeof(struct yaffs_cache);
4949
4950 if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
4951 dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
4952
4953 dev->cache = kmalloc(cache_bytes, GFP_NOFS);
4954
4955 buf = (u8 *) dev->cache;
4956
4957 if (dev->cache)
4958 memset(dev->cache, 0, cache_bytes);
4959
4960 for (i = 0; i < dev->param.n_caches && buf; i++) {
4961 dev->cache[i].object = NULL;
4962 dev->cache[i].last_use = 0;
4963 dev->cache[i].dirty = 0;
4964 dev->cache[i].data = buf =
4965 kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
4966 }
4967 if (!buf)
4968 init_failed = 1;
4969
4970 dev->cache_last_use = 0;
4971 }
4972
4973 dev->cache_hits = 0;
4974
4975 if (!init_failed) {
4976 dev->gc_cleanup_list =
4977 kmalloc(dev->param.chunks_per_block * sizeof(u32),
4978 GFP_NOFS);
4979 if (!dev->gc_cleanup_list)
4980 init_failed = 1;
4981 }
4982
4983 if (dev->param.is_yaffs2)
4984 dev->param.use_header_file_size = 1;
4985
4986 if (!init_failed && !yaffs_init_blocks(dev))
4987 init_failed = 1;
4988
4989 yaffs_init_tnodes_and_objs(dev);
4990
4991 if (!init_failed && !yaffs_create_initial_dir(dev))
4992 init_failed = 1;
4993
4994 if (!init_failed) {
4995 /* Now scan the flash. */
4996 if (dev->param.is_yaffs2) {
4997 if (yaffs2_checkpt_restore(dev)) {
4998 yaffs_check_obj_details_loaded(dev->root_dir);
4999 yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
5000 "yaffs: restored from checkpoint"
5001 );
5002 } else {
5003
5004 /* Clean up the mess caused by an aborted checkpoint load
5005 * and scan backwards.
5006 */
5007 yaffs_deinit_blocks(dev);
5008
5009 yaffs_deinit_tnodes_and_objs(dev);
5010
5011 dev->n_erased_blocks = 0;
5012 dev->n_free_chunks = 0;
5013 dev->alloc_block = -1;
5014 dev->alloc_page = -1;
5015 dev->n_deleted_files = 0;
5016 dev->n_unlinked_files = 0;
5017 dev->n_bg_deletions = 0;
5018
5019 if (!init_failed && !yaffs_init_blocks(dev))
5020 init_failed = 1;
5021
5022 yaffs_init_tnodes_and_objs(dev);
5023
5024 if (!init_failed
5025 && !yaffs_create_initial_dir(dev))
5026 init_failed = 1;
5027
5028 if (!init_failed && !yaffs2_scan_backwards(dev))
5029 init_failed = 1;
5030 }
5031 } else if (!yaffs1_scan(dev)) {
5032 init_failed = 1;
5033 }
5034
5035 yaffs_strip_deleted_objs(dev);
5036 yaffs_fix_hanging_objs(dev);
5037 if (dev->param.empty_lost_n_found)
5038 yaffs_empty_l_n_f(dev);
5039 }
5040
5041 if (init_failed) {
5042 /* Clean up the mess */
5043 yaffs_trace(YAFFS_TRACE_TRACING,
5044 "yaffs: yaffs_guts_initialise() aborted.");
5045
5046 yaffs_deinitialise(dev);
5047 return YAFFS_FAIL;
5048 }
5049
5050 /* Zero out stats */
5051 dev->n_page_reads = 0;
5052 dev->n_page_writes = 0;
5053 dev->n_erasures = 0;
5054 dev->n_gc_copies = 0;
5055 dev->n_retired_writes = 0;
5056
5057 dev->n_retired_blocks = 0;
5058
5059 yaffs_verify_free_chunks(dev);
5060 yaffs_verify_blocks(dev);
5061
5062 /* Clean up any aborted checkpoint data */
5063 if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
5064 yaffs2_checkpt_invalidate(dev);
5065
5066 yaffs_trace(YAFFS_TRACE_TRACING,
5067 "yaffs: yaffs_guts_initialise() done.");
5068 return YAFFS_OK;
5069
5070}
5071
5072void yaffs_deinitialise(struct yaffs_dev *dev)
5073{
5074 if (dev->is_mounted) {
5075 int i;
5076
5077 yaffs_deinit_blocks(dev);
5078 yaffs_deinit_tnodes_and_objs(dev);
5079 if (dev->param.n_caches > 0 && dev->cache) {
5080
5081 for (i = 0; i < dev->param.n_caches; i++) {
5082 if (dev->cache[i].data)
5083 kfree(dev->cache[i].data);
5084 dev->cache[i].data = NULL;
5085 }
5086
5087 kfree(dev->cache);
5088 dev->cache = NULL;
5089 }
5090
5091 kfree(dev->gc_cleanup_list);
5092
5093 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
5094 kfree(dev->temp_buffer[i].buffer);
5095
5096 dev->is_mounted = 0;
5097
5098 if (dev->param.deinitialise_flash_fn)
5099 dev->param.deinitialise_flash_fn(dev);
5100 }
5101}
5102
5103int yaffs_count_free_chunks(struct yaffs_dev *dev)
5104{
5105 int n_free = 0;
5106 int b;
5107
5108 struct yaffs_block_info *blk;
5109
5110 blk = dev->block_info;
5111 for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
5112 switch (blk->block_state) {
5113 case YAFFS_BLOCK_STATE_EMPTY:
5114 case YAFFS_BLOCK_STATE_ALLOCATING:
5115 case YAFFS_BLOCK_STATE_COLLECTING:
5116 case YAFFS_BLOCK_STATE_FULL:
5117 n_free +=
5118 (dev->param.chunks_per_block - blk->pages_in_use +
5119 blk->soft_del_pages);
5120 break;
5121 default:
5122 break;
5123 }
5124 blk++;
5125 }
5126
5127 return n_free;
5128}
5129
5130int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
5131{
5132 /* This is what we report to the outside world */
5133
5134 int n_free;
5135 int n_dirty_caches;
5136 int blocks_for_checkpt;
5137 int i;
5138
5139 n_free = dev->n_free_chunks;
5140 n_free += dev->n_deleted_files;
5141
5142 /* Now count the number of dirty chunks in the cache and subtract those */
5143
5144 for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
5145 if (dev->cache[i].dirty)
5146 n_dirty_caches++;
5147 }
5148
5149 n_free -= n_dirty_caches;
5150
5151 n_free -=
5152 ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
5153
5154 /* Now we figure out how much to reserve for the checkpoint and report that... */
5155 blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
5156
5157 n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
5158
5159 if (n_free < 0)
5160 n_free = 0;
5161
5162 return n_free;
5163
5164}