[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: What on Earth is JFFS2 GC doing?



After the suggested patch. Not much diffrence. I even got a 22seconds
jitter! However the # of ticks for the suspect routine has gone *way*
down.

(Note that I don't start the entire test from scratch, but just continue on
the exsiting ~74% full fs. Also now I'm copying ~120KB binary file and
immediately deleting it to generate some garbage for the GC task to GC (this
originally was ~3KB, but took a L-O-N-G time to fill the fs)).

But, basically the high jitters are still there.

Vipin

*******************************************************************
<SNIP>

  188.680000 ms   88.680000 ms
 195.462000 ms   95.462000 ms
CHIP_ERASE:Erasing_at_address: 0x780000
 9514.488000 ms   9414.488000 ms!<Profile saved in
file:JitterTest.profilesnap-0>
 540.090000 ms   440.090000 ms
 99.916000 ms   -0.084000 ms

<SNIP>

 100.009000 ms   0.009000 ms
 171.285000 ms   71.285000 ms
 138.704000 ms   38.704000 ms
CHIP_ERASE:Erasing_at_address: 0x4c0000
 13069.977000 ms   12969.977000 ms!<Profile saved in
file:JitterTest.profilesnap-1>
 520.859000 ms   420.859000 ms
 114.043000 ms   14.043000 ms

<SNIP>

 120.085000 ms   20.085000 ms
 109.949000 ms   9.949000 ms
CHIP_ERASE:Erasing_at_address: 0x5c0000
 13759.939000 ms   13659.939000 ms!<Profile saved in
file:JitterTest.profilesnap-2>
 550.490000 ms   450.490000 ms
 101.693000 ms   1.693000 ms

<SNIP>

120.017000 ms   20.017000 ms
 145.338000 ms   45.338000 ms
CHIP_ERASE:Erasing_at_address: 0x500000
 15154.609000 ms   15054.609000 ms!<Profile saved in
file:JitterTest.profilesnap-3>
 580.087000 ms   480.087000 ms
 12639.967000 ms   12539.967000 ms
CHIP_ERASE:Erasing_at_address: 0x380000
 153.791000 ms   53.791000 ms
 576.218000 ms   476.218000 ms
 293.423000 ms   193.423000 ms

<SNIP>

 119.432000 ms   19.432000 ms
CHIP_ERASE:Erasing_at_address: 0x300000
 15022.010000 ms   14922.010000 ms
 581.441000 ms   481.441000 ms
 15640.177000 ms   15540.177000 ms!<Profile saved in
file:JitterTest.profilesnap-4>
 12339.540000 ms   12239.540000 ms
 106.821000 ms   6.821000 ms
CHIP_ERASE:Erasing_at_address: 0x2c0000

<SNIP>

 120.013000 ms   20.013000 ms
 109.975000 ms   9.975000 ms
 100.000000 ms   0.000000 ms
CHIP_ERASE:Erasing_at_address: 0x5c0000
 22909.981000 ms   22809.981000 ms!<Profile saved in
file:JitterTest.profilesnap-5>
 9594.762000 ms   9494.762000 ms
CHIP_ERASE:Erasing_at_address: 0x580000
 218.730000 ms   118.730000 ms
 607.281000 ms   507.281000 ms
CHIP_ERASE:Erasing_at_address: 0x540000
 109.254000 ms   9.254000 ms
 590.691000 ms   490.691000 ms
 129.297000 ms   29.297000 ms
 110.026000 ms   10.026000 ms
 100.045000 ms   0.045000 ms
 105.026000 ms   5.026000 ms
 124.918000 ms   24.918000 ms
 150.001000 ms   50.001000 ms
/dev/mtdblock1            8192      6248      1944  76% /mnt/jffs1
 179.986000 ms   79.986000 ms
 121.242000 ms   21.242000 ms
 128.754000 ms   28.754000 ms

<SNIP>

[root@xxxxxxx.profilesnap-0 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 write_profile                              0.0208
     1 ext2_block_to_path                         0.0037
     1 jffs2_free_all_node_refs                   0.0033
     5 cfi_amdstd_erase_onesize                   0.0051
     2 physmap_read32                             0.0625
     2 __delay                                    0.0417
     1 __const_udelay                             0.0312
     4 __generic_copy_to_user                     0.0625
    21 total                                      0.0000
[root@xxxxxxx.profilesnap-1 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 printk                                     0.0024
     2 write_profile                              0.0417
     1 ext2_new_block                             0.0005
     1 cfi_amdstd_erase_onesize                   0.0010
     4 physmap_read32                             0.1250
     2 __delay                                    0.0417
     1 __const_udelay                             0.0312
     2 __generic_copy_to_user                     0.0312
     3 __generic_copy_from_user                   0.0469
    21 total                                      0.0000
[root@xxxxxxx.profilesnap-2 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 deactivate_page_nolock                     0.0031
     1 getblk                                     0.0035
     1 write_profile                              0.0208
     1 jffs2_free_all_node_refs                   0.0033
     3 cfi_amdstd_erase_onesize                   0.0031
     4 physmap_read32                             0.1250
     3 __const_udelay                             0.0938
     4 __generic_copy_from_user                   0.0625
    22 total                                      0.0000
[root@xxxxxxx.profilesnap-3 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 write_profile                              0.0208
     1 ext2_get_block                             0.0008
     1 jffs2_free_all_node_refs                   0.0033
     5 cfi_amdstd_erase_onesize                   0.0051
     1 __loop_delay                               0.0208
     1 __delay                                    0.0208
     1 __const_udelay                             0.0312
     1 __generic_copy_to_user                     0.0156
     1 __generic_copy_from_user                   0.0156
    17 total                                      0.0000
[root@xxxxxxx.profilesnap-4 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 restore_sigcontext                         0.0031
     1 lru_cache_add                              0.0045
     1 sys_read                                   0.0048
     1 __remove_from_queues                       0.0156
     1 lookup_hash                                0.0069
     1 open_namei                                 0.0007
     1 do_select                                  0.0018
     1 write_profile                              0.0208
     1 ext2_alloc_block                           0.0069
     1 ext2_get_block                             0.0008
     1 ext2_add_entry                             0.0010
     1 jffs2_mark_erased_blocks                   0.0009
     1 pcnet32_wio_read_csr                       0.0208
     1 pcnet32_rx                                 0.0013
     2 physmap_copy_from                          0.0312
     1 sock_sendmsg                               0.0057
    13 __generic_copy_to_user                     0.2031
    10 __generic_copy_from_user                   0.1562
    44 total                                      0.0000
[root@xxxxxxx.profilesnap-5 -m
/boot/linux-2.4.1-JFFS2.System.map
     4 startup_32                                 0.0127
     1 printk                                     0.0024
     1 get_unused_buffer_head                     0.0069
     2 write_profile                              0.0417
     3 cfi_amdstd_erase_onesize                   0.0031
     3 physmap_read32                             0.0938
     2 __generic_copy_to_user                     0.0312
     1 __generic_copy_from_user                   0.0156
    17 total                                      0.0000


-----Original Message-----
From: David Woodhouse
Cc: Vipin Malik; 'Tim Riker '; 'jffs-dev '
Sent: 6/22/01 6:36 PM
Subject: Re: What on Earth is JFFS2 GC doing? 


dwmw2@xxxxxxx.org said:
>  There's one optimisation I'm tempted to try before doing that - at
> least we  don't have to walk any given per-inode list more than once,
> if we're being  sensible about it. Currently we go round it once for
> every node we want to  delete. If there's more than one node to delete
> from a given inode, that's  silly. 

Try something like this...

Index: erase.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/erase.c,v
retrieving revision 1.19
diff -u -r1.19 erase.c
--- erase.c	2001/03/25 22:36:12	1.19
+++ erase.c	2001/06/22 23:35:14
@@ -164,48 +164,71 @@
 
 /* Hmmm. Maybe we should accept the extra space it takes and make
    this a standard doubly-linked list? */
-static inline void jffs2_remove_node_ref_from_ino_list(struct
jffs2_sb_info *sbinfo, struct jffs2_raw_node_ref *ref)
+static inline void jffs2_remove_node_refs_from_ino_list(struct
jffs2_sb_info *c,
+			struct jffs2_raw_node_ref *ref, struct
jffs2_eraseblock *jeb)
 {
-	struct jffs2_inode_cache *ic;
-	struct jffs2_raw_node_ref **prev, *this;
-	D2(int c=0);
+	struct jffs2_inode_cache *ic = NULL;
+	struct jffs2_raw_node_ref **prev;
 
-	this = ref;
-	while(this->next_in_ino)
-		 this = this->next_in_ino;
+	prev = &ref->next_in_ino;
 
-	ic = (struct jffs2_inode_cache *)this;
+	/* Walk the inode's list once, removing any nodes from this
eraseblock */
+	while (1) {
+		if (!(*prev)->next_in_ino) {
+			/* We're looking at the jffs2_inode_cache, which
is 
+			   at the end of the linked list. Stash it and
continue
+			   from the beginning of the list */
+			ic = (struct jffs2_inode_cache *)(*prev);
+			prev = &ic->nodes;
+			continue;
+		} 
 
-	D1(printk(KERN_DEBUG "Removing node at phys 0x%08x from ino
#%u\n", ref->flash_offset &~3, ic->ino));
+		if (((*prev)->flash_offset & ~(c->sector_size -1)) ==
jeb->offset) {
+			/* It's in the block we're erasing */
+			struct jffs2_raw_node_ref *this;
 
-	prev = &ic->nodes;
-	if (!*prev) {
-		printk(KERN_WARNING "Eep. ic->nodes == NULL.\n");
-		return;
-	}
-	while (*prev != ref) {
-		if (!(*prev)->next_in_ino) {
-		        printk(KERN_WARNING "Eep. node at phys 0x%08x,
mem %p. next_in_ino is NULL.\n", (*prev)->flash_offset &~3, 
-			       *prev);
-			return;
+			this = *prev;
+			*prev = this->next_in_ino;
+			this->next_in_ino = NULL;
+
+			if (this == ref)
+				break;
+
+			continue;
 		}
-		prev = &(*prev)->next_in_ino;
+		/* Not to be deleted. Skip */
+		prev = &((*prev)->next_in_ino);
 	}
-	*prev = ref->next_in_ino;
-	this = ic->nodes;
-	D2(printk(KERN_DEBUG "After remove_node_ref_from_ino_list: \n"
KERN_DEBUG);
-	while(this) {
-		printk( "0x%08x(%d)->", this->flash_offset & ~3,
this->flash_offset &3);
-		if (++c == 5) {
-			printk("\n" KERN_DEBUG);
-			c=0;
-		}
-		this = this->next_in_ino;
+
+	/* PARANOIA */
+	if (!ic) {
+		printk(KERN_WARNING "inode_cache not found in
remove_node_refs()!!\n");
+		return;
 	}
-	printk("\n"););
+
+	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from
ino #%u\n",
+		  jeb->offset, jeb->offset + c->sector_size, ic->ino));
+
+	D2({
+		int i=0;
+		printk(KERN_DEBUG "After remove_node_refs_from_ino_list:
\n" KERN_DEBUG);
+
+		this = ic->nodes;
+	   
+		while(this) {
+			printk( "0x%08x(%d)->", this->flash_offset & ~3,
this->flash_offset &3);
+			if (++i == 5) {
+				printk("\n" KERN_DEBUG);
+				i=0;
+			}
+			this = this->next_in_ino;
+		}
+		printk("\n");
+	});
+
 	if (ic->nodes == (void *)ic) {
 		D1(printk(KERN_DEBUG "inocache for ino #%u is all gone
now. Freeing\n", ic->ino));
-		jffs2_del_ino_cache(sbinfo, ic);
+		jffs2_del_ino_cache(c, ic);
 		jffs2_free_inode_cache(ic);
 	}
 }
@@ -220,8 +243,8 @@
 		
 		/* Remove from the inode-list */
 		if (ref->next_in_ino)
-			jffs2_remove_node_ref_from_ino_list(c, ref);
-		/* else it was a non-inode node so don't bother */
+			jffs2_remove_node_refs_from_ino_list(c, ref,
jeb);
+		/* else it was a non-inode node or already removed, so
don't bother */
 
 		jffs2_free_raw_node_ref(ref);
 	}


--
dwmw2


To unsubscribe from this list: send the line "unsubscribe jffs-dev" in
the body of a message to majordomo@xxxxxxx.com